hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
f38111d6d890aed73d9241cb966ab3dfcd30f192.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ //Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/bitonicen.htm #include <assert.h> //#include <helper_cuda.h> #include "sortingNetworks_common.h" #include "sortingNetworks_common.cuh" //////////////////////////////////////////////////////////////////////////////// // Monolithic bitonic sort kernel for short arrays fitting into shared memory //////////////////////////////////////////////////////////////////////////////// __global__ void bitonicSortShared( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint dir ) { //Shared memory storage for one or more short vectors __shared__ float s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; //Offset to the beginning of subbatch and load data d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for (uint size = 2; size < arrayLength; size <<= 1) { //Bitonic merge uint ddd = dir ^ ((threadIdx.x & (size / 2)) != 0); for (uint stride = size / 2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd ); } } //ddd == dir for the last bitonic merge step { for (uint stride = arrayLength / 2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], dir ); } } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //////////////////////////////////////////////////////////////////////////////// // Bitonic sort kernel for large arrays (not fitting into shared memory) //////////////////////////////////////////////////////////////////////////////// //Bottom-level bitonic sort //Almost the same as bitonicSortShared with the exception of //even / odd subarrays being sorted in opposite directions //Bitonic merge accepts both //Ascending | descending or descending | ascending sorted pairs __global__ void bitonicSortShared1( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal ) { //Shared memory storage for current subarray __shared__ float s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; //Offset to the beginning of subarray and load data d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for (uint size = 2; size < SHARED_SIZE_LIMIT; size <<= 1) { //Bitonic merge uint ddd = (threadIdx.x & (size / 2)) != 0; for (uint stride = size / 2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd ); } } //Odd / even arrays of SHARED_SIZE_LIMIT elements //sorted in opposite directions uint ddd = blockIdx.x & 1; { for (uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd ); } } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //Bitonic merge iteration for stride >= SHARED_SIZE_LIMIT __global__ void bitonicMergeGlobal( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint size, uint stride, uint dir ) { uint global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x; uint comparatorI = global_comparatorI & (arrayLength / 2 - 1); //Bitonic merge uint ddd = dir ^ ((comparatorI & (size / 2)) != 0); uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1)); float keyA = d_SrcKey[pos + 0]; uint valA = d_SrcVal[pos + 0]; float keyB = d_SrcKey[pos + stride]; uint valB = d_SrcVal[pos + stride]; Comparator( keyA, valA, keyB, valB, ddd ); d_DstKey[pos + 0] = keyA; d_DstVal[pos + 0] = valA; d_DstKey[pos + stride] = keyB; d_DstVal[pos + stride] = valB; } //Combined bitonic merge steps for //size > SHARED_SIZE_LIMIT and stride = [1 .. SHARED_SIZE_LIMIT / 2] __global__ void bitonicMergeShared( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint size, uint dir ) { //Shared memory storage for current subarray __shared__ float s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; //Bitonic merge uint comparatorI = UMAD(blockIdx.x, blockDim.x, threadIdx.x) & ((arrayLength / 2) - 1); uint ddd = dir ^ ((comparatorI & (size / 2)) != 0); for (uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd ); } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //////////////////////////////////////////////////////////////////////////////// // Interface function //////////////////////////////////////////////////////////////////////////////// //Helper function (also used by odd-even merge sort) extern "C" uint factorRadix2(uint *log2L, uint L) { if (!L) { *log2L = 0; return 0; } else { for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++); return L; } } extern "C" uint bitonicSortShared( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir ) { uint blockCount = 1; uint threadCount = arrayLength; bitonicSortShared << <blockCount, threadCount >> >(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir); return threadCount; } extern "C" uint bitonicSort( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir ) { //Nothing to sort if (arrayLength < 2) return 0; //Only power-of-two array lengths are supported by this implementation uint log2L; uint factorizationRemainder = factorRadix2(&log2L, arrayLength); assert(factorizationRemainder == 1); dir = (dir != 0); uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT; uint threadCount = SHARED_SIZE_LIMIT / 2; if (arrayLength <= SHARED_SIZE_LIMIT) { assert((batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0); hipLaunchKernelGGL(( bitonicSortShared), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir); } else { hipLaunchKernelGGL(( bitonicSortShared1), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal); for (uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1) for (unsigned stride = size / 2; stride > 0; stride >>= 1) if (stride >= SHARED_SIZE_LIMIT) { hipLaunchKernelGGL(( bitonicMergeGlobal), dim3((batchSize * arrayLength) / 512), dim3(256), 0, 0, d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir); } else { hipLaunchKernelGGL(( bitonicMergeShared), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, dir); break; } } return threadCount; }
f38111d6d890aed73d9241cb966ab3dfcd30f192.cu
/* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ //Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/bitonicen.htm #include <assert.h> //#include <helper_cuda.h> #include "sortingNetworks_common.h" #include "sortingNetworks_common.cuh" //////////////////////////////////////////////////////////////////////////////// // Monolithic bitonic sort kernel for short arrays fitting into shared memory //////////////////////////////////////////////////////////////////////////////// __global__ void bitonicSortShared( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint dir ) { //Shared memory storage for one or more short vectors __shared__ float s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; //Offset to the beginning of subbatch and load data d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for (uint size = 2; size < arrayLength; size <<= 1) { //Bitonic merge uint ddd = dir ^ ((threadIdx.x & (size / 2)) != 0); for (uint stride = size / 2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd ); } } //ddd == dir for the last bitonic merge step { for (uint stride = arrayLength / 2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], dir ); } } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //////////////////////////////////////////////////////////////////////////////// // Bitonic sort kernel for large arrays (not fitting into shared memory) //////////////////////////////////////////////////////////////////////////////// //Bottom-level bitonic sort //Almost the same as bitonicSortShared with the exception of //even / odd subarrays being sorted in opposite directions //Bitonic merge accepts both //Ascending | descending or descending | ascending sorted pairs __global__ void bitonicSortShared1( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal ) { //Shared memory storage for current subarray __shared__ float s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; //Offset to the beginning of subarray and load data d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for (uint size = 2; size < SHARED_SIZE_LIMIT; size <<= 1) { //Bitonic merge uint ddd = (threadIdx.x & (size / 2)) != 0; for (uint stride = size / 2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd ); } } //Odd / even arrays of SHARED_SIZE_LIMIT elements //sorted in opposite directions uint ddd = blockIdx.x & 1; { for (uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd ); } } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //Bitonic merge iteration for stride >= SHARED_SIZE_LIMIT __global__ void bitonicMergeGlobal( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint size, uint stride, uint dir ) { uint global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x; uint comparatorI = global_comparatorI & (arrayLength / 2 - 1); //Bitonic merge uint ddd = dir ^ ((comparatorI & (size / 2)) != 0); uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1)); float keyA = d_SrcKey[pos + 0]; uint valA = d_SrcVal[pos + 0]; float keyB = d_SrcKey[pos + stride]; uint valB = d_SrcVal[pos + stride]; Comparator( keyA, valA, keyB, valB, ddd ); d_DstKey[pos + 0] = keyA; d_DstVal[pos + 0] = valA; d_DstKey[pos + stride] = keyB; d_DstVal[pos + stride] = valB; } //Combined bitonic merge steps for //size > SHARED_SIZE_LIMIT and stride = [1 .. SHARED_SIZE_LIMIT / 2] __global__ void bitonicMergeShared( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint size, uint dir ) { //Shared memory storage for current subarray __shared__ float s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; //Bitonic merge uint comparatorI = UMAD(blockIdx.x, blockDim.x, threadIdx.x) & ((arrayLength / 2) - 1); uint ddd = dir ^ ((comparatorI & (size / 2)) != 0); for (uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd ); } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //////////////////////////////////////////////////////////////////////////////// // Interface function //////////////////////////////////////////////////////////////////////////////// //Helper function (also used by odd-even merge sort) extern "C" uint factorRadix2(uint *log2L, uint L) { if (!L) { *log2L = 0; return 0; } else { for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++); return L; } } extern "C" uint bitonicSortShared( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir ) { uint blockCount = 1; uint threadCount = arrayLength; bitonicSortShared << <blockCount, threadCount >> >(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir); return threadCount; } extern "C" uint bitonicSort( float *d_DstKey, uint *d_DstVal, float *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir ) { //Nothing to sort if (arrayLength < 2) return 0; //Only power-of-two array lengths are supported by this implementation uint log2L; uint factorizationRemainder = factorRadix2(&log2L, arrayLength); assert(factorizationRemainder == 1); dir = (dir != 0); uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT; uint threadCount = SHARED_SIZE_LIMIT / 2; if (arrayLength <= SHARED_SIZE_LIMIT) { assert((batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0); bitonicSortShared<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir); } else { bitonicSortShared1<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal); for (uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1) for (unsigned stride = size / 2; stride > 0; stride >>= 1) if (stride >= SHARED_SIZE_LIMIT) { bitonicMergeGlobal<<<(batchSize * arrayLength) / 512, 256>>>(d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir); } else { bitonicMergeShared<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, dir); break; } } return threadCount; }
d75b9f675459f18d6529ea670ed1e0c49d01b4c4.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <cstdlib> #include <cstdio> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "LatticeBoltzmann.cuh" __device__ const double C_S = 0.57735; //__device__ const double TAU2 = 0.505556; //__device__ const double RHO_0 = 1.; __device__ const double c_l[9 * 2] = //VELOCITY COMPONENTS { 0.,0. , 1.,0. , 0.,1. , -1.,0. , 0.,-1. , 1.,1. , -1.,1. , -1.,-1. , 1.,-1. }; __device__ const double t[9] = //WEIGHT VALUES { 4. / 9, 1. / 9, 1. / 9, 1. / 9, 1. / 9, 1. / 36, 1. / 36, 1. / 36, 1. / 36 }; __global__ void equilibrium(const double * u, const double * rho, double * f0, const double * force, double * F, int XDIM, int YDIM, double TAU) { unsigned int i(0), j(0); int threadnum = blockIdx.x*blockDim.x + threadIdx.x; double vec[2] = { 0,0 }; { j = threadnum; for (i = 0; i < 9; i++) { f0[9 * j + i] = rho[j] * t[i] * (1 + (u[2 * j + 0] * c_l[2 * i + 0] + u[2 * j + 1] * c_l[2 * i + 1]) / (C_S*C_S) + (u[2 * j + 0] * c_l[2 * i + 0] + u[2 * j + 1] * c_l[2 * i + 1])*(u[2 * j + 0] * c_l[2 * i + 0] + u[2 * j + 1] * c_l[2 * i + 1]) / (2 * C_S*C_S*C_S*C_S) - (u[2 * j + 0] * u[2 * j + 0] + u[2 * j + 1] * u[2 * j + 1]) / (2 * C_S*C_S)); vec[0] = (c_l[i * 2 + 0] - u[i * 2 + 0]) / (C_S*C_S) + (c_l[i * 2 + 0] * u[i * 2 + 0] + c_l[i * 2 + 1] * u[i * 2 + 1]) / (C_S*C_S*C_S*C_S) * c_l[i * 2 + 0]; vec[1] = (c_l[i * 2 + 1] - u[i * 2 + 1]) / (C_S*C_S) + (c_l[i * 2 + 0] * u[i * 2 + 0] + c_l[i * 2 + 1] * u[i * 2 + 1]) / (C_S*C_S*C_S*C_S) * c_l[i * 2 + 1]; F[9 * j + i] = (1. - 1. / (2. * TAU))*t[i] * (vec[0] * force[j * 2 + 0] + vec[1] * force[j * 2 + 1]); } } } __global__ void collision(const double * f0, const double * f, double * f1, const double * F, double TAU, double TAU2, int XDIM, int YDIM, int it) { unsigned int j(0); //double rho_set = 1.; //double u_set[2] = { 0.00004,0. }; //double u_s[2] = { 0.,0. }; double omega_plus = 1 / TAU; double omega_minus = 1 / TAU2; double f_plus(0.), f_minus(0.), f0_plus(0.), f0_minus(0.); int threadnum = blockIdx.x*blockDim.x + threadIdx.x; { j = threadnum; //for (i = 0; i < 9; i++) { //f1[9 * j + i] = (1 - (1 / TAU[0]))*f[9 * j + i] + (1 / TAU[0])*f0[9 * j + i] + F[j * 9 + i]; f1[9 * j + 0] = f[9 * j + 0] - omega_plus*(f[9 * j + 0] - f0[9 * j + 0]) + F[j * 9 + 0]; f_plus = (f[9 * j + 1] + f[9 * j + 3]) / 2.; f_minus = (f[9 * j + 1] - f[9 * j + 3]) / 2.; f0_plus = (f0[9 * j + 1] + f0[9 * j + 3]) / 2.; f0_minus = (f0[9 * j + 1] - f0[9 * j + 3]) / 2.; f1[9 * j + 1] = f[9 * j + 1] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_minus *= -1.; f0_minus *= -1.; f1[9 * j + 3] = f[9 * j + 3] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_plus = (f[9 * j + 2] + f[9 * j + 4]) / 2.; f_minus = (f[9 * j + 2] - f[9 * j + 4]) / 2.; f0_plus = (f0[9 * j + 2] + f0[9 * j + 4]) / 2.; f0_minus = (f0[9 * j + 2] - f0[9 * j + 4]) / 2.; f1[9 * j + 2] = f[9 * j + 2] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_minus *= -1.; f0_minus *= -1.; f1[9 * j + 4] = f[9 * j + 4] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_plus = (f[9 * j + 5] + f[9 * j + 7]) / 2.; f_minus = (f[9 * j + 5] - f[9 * j + 7]) / 2.; f0_plus = (f0[9 * j + 5] + f0[9 * j + 7]) / 2.; f0_minus = (f0[9 * j + 5] - f0[9 * j + 7]) / 2.; f1[9 * j + 5] = f[9 * j + 5] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_minus *= -1.; f0_minus *= -1.; f1[9 * j + 7] = f[9 * j + 7] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_plus = (f[9 * j + 6] + f[9 * j + 8]) / 2.; f_minus = (f[9 * j + 6] - f[9 * j + 8]) / 2.; f0_plus = (f0[9 * j + 6] + f0[9 * j + 8]) / 2.; f0_minus = (f0[9 * j + 6] - f0[9 * j + 8]) / 2.; f1[9 * j + 6] = f[9 * j + 6] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_minus *= -1.; f0_minus *= -1.; f1[9 * j + 8] = f[9 * j + 8] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); } //--------------------------------ZOU-HE VELOCITY BOUNDARY------------------------- /* if (j%XDIM[0] == 0) //LEFT { //rho_set = 1 / (1 - u_set[0])*(f[9 * j + 0] + f[9 * j + 2] + f[9 * j + 4] + 2 * (f[9 * j + 3] + f[9 * j + 6] + f[9 * j + 7])); rho_set = RHO_0; f1[9 * j + 1] = f[9 * j + 3] + (2./3.)*rho_set*u_set[0]; f1[9 * j + 5] = f[9 * j + 7] - 0.5*(f[9 * j + 2] - f[9 * j + 4]) + 0.5*rho_set*u_set[1] + (1. / 6.)*rho_set*u_set[0]; f1[9 * j + 8] = f[9 * j + 6] + 0.5*(f[9 * j + 2] - f[9 * j + 4]) - 0.5*rho_set*u_set[1] + (1. / 6.)*rho_set*u_set[0]; } */ /* if (j % XDIM[0] == XDIM[0]-1 ) //RIGHT { rho_set = RHO_0; u_s[0] = 1. - (f[9 * j + 0] + f[9 * j + 2] + f[9 * j + 4] + 2. * (f[9 * j + 1] + f[9 * j + 5] + f[9 * j + 8]))/rho_set; u_s[1] = 0.; f1[9 * j + 3] = f[9 * j + 1] + (2. / 3.)*rho_set*u_s[0]; f1[9 * j + 7] = f[9 * j + 5] - 0.5*(f[9 * j + 4] - f[9 * j + 2]) + 0.5*rho_set*u_s[1] + (1. / 6.)*rho_set*u_s[0]; f1[9 * j + 6] = f[9 * j + 8] + 0.5*(f[9 * j + 4] - f[9 * j + 2]) - 0.5*rho_set*u_s[1] + (1. / 6.)*rho_set*u_s[0]; } */ } } __global__ void streaming(const double * f1, double * f, int XDIM, int YDIM) { int threadnum = blockIdx.x*blockDim.x + threadIdx.x; unsigned int i(0), j(0), k(0); unsigned int jstream(0); bool back(0), thru(0), done(0), slip(0); bool up(0), down(0), left(0), right(0); int x(0), y(0); { j = threadnum; x = j%XDIM; y = (j - j%XDIM) / XDIM; //------------------------------------WALL CONDITIONS------------------------------------------------ up = 0; down = 0; left = 0; right = 0; if (y == YDIM - 1) up = 1; if (y == 0) down = 1; if (x == 0) left = 1; if (x == XDIM - 1) right = 1; for (i = 0; i < 9; i++) { //cout << i << endl; back = 0; thru = 0; done = 0; slip = 0; //---------------------------------------------------MID GRID NON-SLIP BOUNDARY------------------------------ if (down || up || left || right) { switch (i) { case 0: break; case 1: if (right) { thru = 1; break; } break; case 2: if (up) { slip = 1; break; } break; case 3: if (left) { thru = 1; break; } break; case 4: if (down) { back = 1; break; } break; case 5: /* if (up && left) { jstream = j - (XDIM[0] - 1)*c_l[7 * 2 + 0] + XDIM[0] * c_l[7 * 2 + 1]; //THROUGH STREAM 7 k = 7; done = 1; break; } */ if (up) { slip = 1; break; } else if (right) { thru = 1; break; } break; case 6: /* if (up && right) { jstream = j - (XDIM[0] - 1)*c_l[8 * 2 + 0] + XDIM[0] * c_l[8 * 2 + 1]; //THROUGH STREAM 8 k = 8; done = 1; break; } */ if (up) { slip = 1; break; } else if (left) { thru = 1; break; } break; case 7: /* if (down && right) { jstream = j - (XDIM[0] - 1)*c_l[5 * 2 + 0] + XDIM[0] * c_l[5 * 2 + 1]; //THROUGH STREAM 5 k = 5; done = 1; break; } */ if (down) { back = 1; break; } else if (left) { thru = 1; break; } break; case 8: /* if (down && left) { jstream = j - (XDIM[0] - 1)*c_l[6 * 2 + 0] + XDIM[0] * c_l[6 * 2 + 1]; //THROUGH STREAM 6 k = 6; done = 1; break; } */ if (down) { back = 1; break; } else if (right) { thru = 1; break; } break; } } //--------------------------------------------------STREAMING CALCULATIONS------------------------------- if (back && !done) { jstream = j; //BACK STREAM if (i == 1) k = 3; if (i == 2) k = 4; if (i == 3) k = 1; if (i == 4) k = 2; if (i == 5) k = 7; if (i == 6) k = 8; if (i == 7) k = 5; if (i == 8) k = 6; } else if (slip && !done) { jstream = j; //SLIP STREAM if (i == 1) k = 3; if (i == 2) k = 4; if (i == 3) k = 1; if (i == 4) k = 2; if (i == 5) k = 8; if (i == 6) k = 7; if (i == 7) k = 6; if (i == 8) k = 5; } else if (thru && !done) { jstream = j - (XDIM-1)*c_l[i * 2 + 0] + XDIM*c_l[i * 2 + 1]; //THROUGH STREAM k = i; } else if (!done) { jstream = j + c_l[i * 2 + 0] + XDIM*c_l[i * 2 + 1]; //NORMAL STREAM k = i; } f[9 * jstream + k] = f1[9 * j + i]; //STREAM TO ADJACENT CELL IN DIRECTION OF MOVEMENT } } } __global__ void macro(const double * f, double * u, double * rho, int XDIM, int YDIM) { int threadnum = blockIdx.x*blockDim.x + threadIdx.x; unsigned int i(0), j(0); double momentum[2] = { 0,0 }; { j = threadnum; rho[j] = 0; u[2 * j + 0] = 0; u[2 * j + 1] = 0; momentum[0] = 0; momentum[1] = 0; for (i = 0; i < 9; i++) { rho[j] += f[9 * j + i]; momentum[0] += c_l[i * 2 + 0] * f[9 * j + i]; momentum[1] += c_l[i * 2 + 1] * f[9 * j + i]; } u[2 * j + 0] = momentum[0] / rho[j]; u[2 * j + 1] = momentum[1] / rho[j]; } }
d75b9f675459f18d6529ea670ed1e0c49d01b4c4.cu
#include <cmath> #include <cstdlib> #include <cstdio> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "LatticeBoltzmann.cuh" __device__ const double C_S = 0.57735; //__device__ const double TAU2 = 0.505556; //__device__ const double RHO_0 = 1.; __device__ const double c_l[9 * 2] = //VELOCITY COMPONENTS { 0.,0. , 1.,0. , 0.,1. , -1.,0. , 0.,-1. , 1.,1. , -1.,1. , -1.,-1. , 1.,-1. }; __device__ const double t[9] = //WEIGHT VALUES { 4. / 9, 1. / 9, 1. / 9, 1. / 9, 1. / 9, 1. / 36, 1. / 36, 1. / 36, 1. / 36 }; __global__ void equilibrium(const double * u, const double * rho, double * f0, const double * force, double * F, int XDIM, int YDIM, double TAU) { unsigned int i(0), j(0); int threadnum = blockIdx.x*blockDim.x + threadIdx.x; double vec[2] = { 0,0 }; { j = threadnum; for (i = 0; i < 9; i++) { f0[9 * j + i] = rho[j] * t[i] * (1 + (u[2 * j + 0] * c_l[2 * i + 0] + u[2 * j + 1] * c_l[2 * i + 1]) / (C_S*C_S) + (u[2 * j + 0] * c_l[2 * i + 0] + u[2 * j + 1] * c_l[2 * i + 1])*(u[2 * j + 0] * c_l[2 * i + 0] + u[2 * j + 1] * c_l[2 * i + 1]) / (2 * C_S*C_S*C_S*C_S) - (u[2 * j + 0] * u[2 * j + 0] + u[2 * j + 1] * u[2 * j + 1]) / (2 * C_S*C_S)); vec[0] = (c_l[i * 2 + 0] - u[i * 2 + 0]) / (C_S*C_S) + (c_l[i * 2 + 0] * u[i * 2 + 0] + c_l[i * 2 + 1] * u[i * 2 + 1]) / (C_S*C_S*C_S*C_S) * c_l[i * 2 + 0]; vec[1] = (c_l[i * 2 + 1] - u[i * 2 + 1]) / (C_S*C_S) + (c_l[i * 2 + 0] * u[i * 2 + 0] + c_l[i * 2 + 1] * u[i * 2 + 1]) / (C_S*C_S*C_S*C_S) * c_l[i * 2 + 1]; F[9 * j + i] = (1. - 1. / (2. * TAU))*t[i] * (vec[0] * force[j * 2 + 0] + vec[1] * force[j * 2 + 1]); } } } __global__ void collision(const double * f0, const double * f, double * f1, const double * F, double TAU, double TAU2, int XDIM, int YDIM, int it) { unsigned int j(0); //double rho_set = 1.; //double u_set[2] = { 0.00004,0. }; //double u_s[2] = { 0.,0. }; double omega_plus = 1 / TAU; double omega_minus = 1 / TAU2; double f_plus(0.), f_minus(0.), f0_plus(0.), f0_minus(0.); int threadnum = blockIdx.x*blockDim.x + threadIdx.x; { j = threadnum; //for (i = 0; i < 9; i++) { //f1[9 * j + i] = (1 - (1 / TAU[0]))*f[9 * j + i] + (1 / TAU[0])*f0[9 * j + i] + F[j * 9 + i]; f1[9 * j + 0] = f[9 * j + 0] - omega_plus*(f[9 * j + 0] - f0[9 * j + 0]) + F[j * 9 + 0]; f_plus = (f[9 * j + 1] + f[9 * j + 3]) / 2.; f_minus = (f[9 * j + 1] - f[9 * j + 3]) / 2.; f0_plus = (f0[9 * j + 1] + f0[9 * j + 3]) / 2.; f0_minus = (f0[9 * j + 1] - f0[9 * j + 3]) / 2.; f1[9 * j + 1] = f[9 * j + 1] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_minus *= -1.; f0_minus *= -1.; f1[9 * j + 3] = f[9 * j + 3] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_plus = (f[9 * j + 2] + f[9 * j + 4]) / 2.; f_minus = (f[9 * j + 2] - f[9 * j + 4]) / 2.; f0_plus = (f0[9 * j + 2] + f0[9 * j + 4]) / 2.; f0_minus = (f0[9 * j + 2] - f0[9 * j + 4]) / 2.; f1[9 * j + 2] = f[9 * j + 2] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_minus *= -1.; f0_minus *= -1.; f1[9 * j + 4] = f[9 * j + 4] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_plus = (f[9 * j + 5] + f[9 * j + 7]) / 2.; f_minus = (f[9 * j + 5] - f[9 * j + 7]) / 2.; f0_plus = (f0[9 * j + 5] + f0[9 * j + 7]) / 2.; f0_minus = (f0[9 * j + 5] - f0[9 * j + 7]) / 2.; f1[9 * j + 5] = f[9 * j + 5] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_minus *= -1.; f0_minus *= -1.; f1[9 * j + 7] = f[9 * j + 7] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_plus = (f[9 * j + 6] + f[9 * j + 8]) / 2.; f_minus = (f[9 * j + 6] - f[9 * j + 8]) / 2.; f0_plus = (f0[9 * j + 6] + f0[9 * j + 8]) / 2.; f0_minus = (f0[9 * j + 6] - f0[9 * j + 8]) / 2.; f1[9 * j + 6] = f[9 * j + 6] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); f_minus *= -1.; f0_minus *= -1.; f1[9 * j + 8] = f[9 * j + 8] - omega_plus*(f_plus - f0_plus) - omega_minus*(f_minus - f0_minus); } //--------------------------------ZOU-HE VELOCITY BOUNDARY------------------------- /* if (j%XDIM[0] == 0) //LEFT { //rho_set = 1 / (1 - u_set[0])*(f[9 * j + 0] + f[9 * j + 2] + f[9 * j + 4] + 2 * (f[9 * j + 3] + f[9 * j + 6] + f[9 * j + 7])); rho_set = RHO_0; f1[9 * j + 1] = f[9 * j + 3] + (2./3.)*rho_set*u_set[0]; f1[9 * j + 5] = f[9 * j + 7] - 0.5*(f[9 * j + 2] - f[9 * j + 4]) + 0.5*rho_set*u_set[1] + (1. / 6.)*rho_set*u_set[0]; f1[9 * j + 8] = f[9 * j + 6] + 0.5*(f[9 * j + 2] - f[9 * j + 4]) - 0.5*rho_set*u_set[1] + (1. / 6.)*rho_set*u_set[0]; } */ /* if (j % XDIM[0] == XDIM[0]-1 ) //RIGHT { rho_set = RHO_0; u_s[0] = 1. - (f[9 * j + 0] + f[9 * j + 2] + f[9 * j + 4] + 2. * (f[9 * j + 1] + f[9 * j + 5] + f[9 * j + 8]))/rho_set; u_s[1] = 0.; f1[9 * j + 3] = f[9 * j + 1] + (2. / 3.)*rho_set*u_s[0]; f1[9 * j + 7] = f[9 * j + 5] - 0.5*(f[9 * j + 4] - f[9 * j + 2]) + 0.5*rho_set*u_s[1] + (1. / 6.)*rho_set*u_s[0]; f1[9 * j + 6] = f[9 * j + 8] + 0.5*(f[9 * j + 4] - f[9 * j + 2]) - 0.5*rho_set*u_s[1] + (1. / 6.)*rho_set*u_s[0]; } */ } } __global__ void streaming(const double * f1, double * f, int XDIM, int YDIM) { int threadnum = blockIdx.x*blockDim.x + threadIdx.x; unsigned int i(0), j(0), k(0); unsigned int jstream(0); bool back(0), thru(0), done(0), slip(0); bool up(0), down(0), left(0), right(0); int x(0), y(0); { j = threadnum; x = j%XDIM; y = (j - j%XDIM) / XDIM; //------------------------------------WALL CONDITIONS------------------------------------------------ up = 0; down = 0; left = 0; right = 0; if (y == YDIM - 1) up = 1; if (y == 0) down = 1; if (x == 0) left = 1; if (x == XDIM - 1) right = 1; for (i = 0; i < 9; i++) { //cout << i << endl; back = 0; thru = 0; done = 0; slip = 0; //---------------------------------------------------MID GRID NON-SLIP BOUNDARY------------------------------ if (down || up || left || right) { switch (i) { case 0: break; case 1: if (right) { thru = 1; break; } break; case 2: if (up) { slip = 1; break; } break; case 3: if (left) { thru = 1; break; } break; case 4: if (down) { back = 1; break; } break; case 5: /* if (up && left) { jstream = j - (XDIM[0] - 1)*c_l[7 * 2 + 0] + XDIM[0] * c_l[7 * 2 + 1]; //THROUGH STREAM 7 k = 7; done = 1; break; } */ if (up) { slip = 1; break; } else if (right) { thru = 1; break; } break; case 6: /* if (up && right) { jstream = j - (XDIM[0] - 1)*c_l[8 * 2 + 0] + XDIM[0] * c_l[8 * 2 + 1]; //THROUGH STREAM 8 k = 8; done = 1; break; } */ if (up) { slip = 1; break; } else if (left) { thru = 1; break; } break; case 7: /* if (down && right) { jstream = j - (XDIM[0] - 1)*c_l[5 * 2 + 0] + XDIM[0] * c_l[5 * 2 + 1]; //THROUGH STREAM 5 k = 5; done = 1; break; } */ if (down) { back = 1; break; } else if (left) { thru = 1; break; } break; case 8: /* if (down && left) { jstream = j - (XDIM[0] - 1)*c_l[6 * 2 + 0] + XDIM[0] * c_l[6 * 2 + 1]; //THROUGH STREAM 6 k = 6; done = 1; break; } */ if (down) { back = 1; break; } else if (right) { thru = 1; break; } break; } } //--------------------------------------------------STREAMING CALCULATIONS------------------------------- if (back && !done) { jstream = j; //BACK STREAM if (i == 1) k = 3; if (i == 2) k = 4; if (i == 3) k = 1; if (i == 4) k = 2; if (i == 5) k = 7; if (i == 6) k = 8; if (i == 7) k = 5; if (i == 8) k = 6; } else if (slip && !done) { jstream = j; //SLIP STREAM if (i == 1) k = 3; if (i == 2) k = 4; if (i == 3) k = 1; if (i == 4) k = 2; if (i == 5) k = 8; if (i == 6) k = 7; if (i == 7) k = 6; if (i == 8) k = 5; } else if (thru && !done) { jstream = j - (XDIM-1)*c_l[i * 2 + 0] + XDIM*c_l[i * 2 + 1]; //THROUGH STREAM k = i; } else if (!done) { jstream = j + c_l[i * 2 + 0] + XDIM*c_l[i * 2 + 1]; //NORMAL STREAM k = i; } f[9 * jstream + k] = f1[9 * j + i]; //STREAM TO ADJACENT CELL IN DIRECTION OF MOVEMENT } } } __global__ void macro(const double * f, double * u, double * rho, int XDIM, int YDIM) { int threadnum = blockIdx.x*blockDim.x + threadIdx.x; unsigned int i(0), j(0); double momentum[2] = { 0,0 }; { j = threadnum; rho[j] = 0; u[2 * j + 0] = 0; u[2 * j + 1] = 0; momentum[0] = 0; momentum[1] = 0; for (i = 0; i < 9; i++) { rho[j] += f[9 * j + i]; momentum[0] += c_l[i * 2 + 0] * f[9 * j + i]; momentum[1] += c_l[i * 2 + 1] * f[9 * j + i]; } u[2 * j + 0] = momentum[0] / rho[j]; u[2 * j + 1] = momentum[1] / rho[j]; } }
3f303d1577a27f7bb22d77bf9233b6e0ab91610a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // RUN: %clang_cc1 %s -triple nvptx-unknown-unknown -fcuda-is-device -emit-llvm -o - | FileCheck %s #include "Inputs/cuda.h" #define MAX_THREADS_PER_BLOCK 256 #define MIN_BLOCKS_PER_MP 2 // Test both max threads per block and Min cta per sm. extern "C" { __global__ void __launch_bounds__( MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP ) Kernel1() { } } // CHECK: !{{[0-9]+}} = !{void ()* @Kernel1, !"maxntidx", i32 256} // CHECK: !{{[0-9]+}} = !{void ()* @Kernel1, !"minctasm", i32 2} // Test only max threads per block. Min cta per sm defaults to 0, and // CodeGen doesn't output a zero value for minctasm. extern "C" { __global__ void __launch_bounds__( MAX_THREADS_PER_BLOCK ) Kernel2() { } } // CHECK: !{{[0-9]+}} = !{void ()* @Kernel2, !"maxntidx", i32 256} template <int max_threads_per_block> __global__ void __launch_bounds__(max_threads_per_block) Kernel3() { } template void Kernel3<MAX_THREADS_PER_BLOCK>(); // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel3{{.*}}, !"maxntidx", i32 256} template <int max_threads_per_block, int min_blocks_per_mp> __global__ void __launch_bounds__(max_threads_per_block, min_blocks_per_mp) Kernel4() { } template void Kernel4<MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP>(); // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel4{{.*}}, !"maxntidx", i32 256} // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel4{{.*}}, !"minctasm", i32 2} const int constint = 100; template <int max_threads_per_block, int min_blocks_per_mp> __global__ void __launch_bounds__(max_threads_per_block + constint, min_blocks_per_mp + max_threads_per_block) Kernel5() { } template void Kernel5<MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP>(); // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel5{{.*}}, !"maxntidx", i32 356} // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel5{{.*}}, !"minctasm", i32 258} // Make sure we don't emit negative launch bounds values. __global__ void __launch_bounds__( -MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP ) Kernel6() { } // CHECK-NOT: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel6{{.*}}, !"maxntidx", // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel6{{.*}}, !"minctasm", __global__ void __launch_bounds__( MAX_THREADS_PER_BLOCK, -MIN_BLOCKS_PER_MP ) Kernel7() { } // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel7{{.*}}, !"maxntidx", // CHECK-NOT: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel7{{.*}}, !"minctasm",
3f303d1577a27f7bb22d77bf9233b6e0ab91610a.cu
// RUN: %clang_cc1 %s -triple nvptx-unknown-unknown -fcuda-is-device -emit-llvm -o - | FileCheck %s #include "Inputs/cuda.h" #define MAX_THREADS_PER_BLOCK 256 #define MIN_BLOCKS_PER_MP 2 // Test both max threads per block and Min cta per sm. extern "C" { __global__ void __launch_bounds__( MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP ) Kernel1() { } } // CHECK: !{{[0-9]+}} = !{void ()* @Kernel1, !"maxntidx", i32 256} // CHECK: !{{[0-9]+}} = !{void ()* @Kernel1, !"minctasm", i32 2} // Test only max threads per block. Min cta per sm defaults to 0, and // CodeGen doesn't output a zero value for minctasm. extern "C" { __global__ void __launch_bounds__( MAX_THREADS_PER_BLOCK ) Kernel2() { } } // CHECK: !{{[0-9]+}} = !{void ()* @Kernel2, !"maxntidx", i32 256} template <int max_threads_per_block> __global__ void __launch_bounds__(max_threads_per_block) Kernel3() { } template void Kernel3<MAX_THREADS_PER_BLOCK>(); // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel3{{.*}}, !"maxntidx", i32 256} template <int max_threads_per_block, int min_blocks_per_mp> __global__ void __launch_bounds__(max_threads_per_block, min_blocks_per_mp) Kernel4() { } template void Kernel4<MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP>(); // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel4{{.*}}, !"maxntidx", i32 256} // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel4{{.*}}, !"minctasm", i32 2} const int constint = 100; template <int max_threads_per_block, int min_blocks_per_mp> __global__ void __launch_bounds__(max_threads_per_block + constint, min_blocks_per_mp + max_threads_per_block) Kernel5() { } template void Kernel5<MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP>(); // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel5{{.*}}, !"maxntidx", i32 356} // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel5{{.*}}, !"minctasm", i32 258} // Make sure we don't emit negative launch bounds values. __global__ void __launch_bounds__( -MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP ) Kernel6() { } // CHECK-NOT: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel6{{.*}}, !"maxntidx", // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel6{{.*}}, !"minctasm", __global__ void __launch_bounds__( MAX_THREADS_PER_BLOCK, -MIN_BLOCKS_PER_MP ) Kernel7() { } // CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel7{{.*}}, !"maxntidx", // CHECK-NOT: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel7{{.*}}, !"minctasm",
1a4e73a25e73f847f4811a119f19d5d5f8f08573.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include "params.h" __device__ int getIndex(int t_x, int t_y) { // calculate full index from a grid position int indx = __mul24(blockIdx.x,blockDim.x) + t_x; return __mul24(t_y, __mul24(gridDim.x, blockDim.x)) + indx; } __global__ void d_initRands(hiprandState_t *state, int seed) { int id = getIndex(threadIdx.x, threadIdx.y); /* Each thread gets same seed, a different sequence * number, no offset */ hiprand_init(seed, id, 0, &state[id]); } __global__ void d_generateOU(hiprandState_t *state, float* ou_process, float* wg) { int id = getIndex(threadIdx.x, threadIdx.y); float last = ou_process[id]; float nm = hiprand_normal(&state[id]); float dt = 1.0f; float wgi = wg[id]; float lambda = 1.0f; float sigma = 1.0f; ou_process[id] = (last*exp(-wgi * dt)) + (lambda * (1 - exp(-wgi*dt))) + ( nm * sigma * sqrtf((1.0f-exp(-2.0f*wgi*dt))/(2.0f*wgi)) ); } __global__ void d_updateStates(int* states, float* ou_process, float* wg, int N_x, hiprandState_t* d_rands, float sw) { int id = getIndex(threadIdx.x, threadIdx.y); int edges=8; int neigh[8][2] = { { 1, 1 }, { 1, 0 }, { 1, -1 } , { 0, 1 }, { 0, -1 }, { -1, -1 } , { -1, 0 }, { -1, 1 } }; int deltan = 0; for (int e=0;e<edges;e++) { if (hiprand_uniform(&d_rands[id])<sw) { int n2 = hiprand_uniform(&d_rands[id])*8; if (n2==8) n2 = 0; int x_n = (((threadIdx.x + neigh[n2][0]) % N_x) + N_x) % N_x; int y_n = (((threadIdx.y + neigh[n2][1]) % N_x) + N_x) % N_x; int n2_id = getIndex(x_n, y_n); if (states[n2_id]>0.5) deltan++; } else { int x_n = hiprand_uniform(&d_rands[id])*N_x; int y_n = hiprand_uniform(&d_rands[id])*N_x; if (x_n==N_x) x_n = 0; if (y_n==N_x) y_n = 0; int n2_id = getIndex(x_n, y_n); if (states[n2_id]>0.5) deltan++; } } // deltan is N+ right now but we want (N+ - N-) deltan*=2; deltan-=edges; float pup = exp(-4.0f*wg[id]*ou_process[id]); float pall = pup*powf((1.0f - ws)/ws,deltan); int newState; if (pall<1.0f) newState = 1; else newState = 0; __syncthreads(); states[id] = newState; } __global__ void d_recordData(int* states, int* net, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t) { int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; int grid_width = gridDim.x * blockDim.x; int group_id = threadIdx.y * N_x + threadIdx.x; int N = N_x*N_x; if (group_id==0) { { int totalUp = 0; for (int i=0;i<N;i++) if (states[blockIdx.y * N + i] > 0) totalUp++; int nowDown = 0; float pcDown = 0.0f; float pcUp = 0.0f; for (int i=0;i<N;i++) { int pop_id = blockIdx.y*N + i; int up =0; for (int j=0;j<N;j++) if (net[pop_id * N + j]>0) if (states[blockIdx.y * N + j]>0) up++; if (states[blockIdx.y * N + i]>0) { if ((float)up/3.0f>0.5) pcUp += 1.0f; } else { if ((float)up/3.0f<0.5) pcDown += 1.0f ; } } pcUp /= totalUp; pcDown /= (N-totalUp); d_upcount[totalUp]+=1; int c = d_upcount[totalUp]; if (c<1000) { d_down[totalUp] = (pcDown)/(float)c + (c-1)*d_down[totalUp]/(float)c; d_up[totalUp] = (pcUp)/(float)c + (c-1)*d_up[totalUp]/(float)c; } // res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1); } //res[t * gridDim.y + blockIdx.y] = counter; // if (t==0) // res[blockIdx.y] = counter; // else // res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1); } } __global__ void block_sum(const int *input, int *per_block_results, const size_t n) { extern __shared__ int sdata[]; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // load input into __shared__ memory int x = 0; if(i < n) { x = input[i]; } sdata[threadIdx.x] = x; __syncthreads(); // contiguous range pattern for(int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if(threadIdx.x < offset) { // add a partial sum upstream to our own sdata[threadIdx.x] += sdata[threadIdx.x + offset]; } // wait until all threads in the block hav // updated their partial sums __syncthreads(); } // thread 0 writes the final result if(threadIdx.x == 0) { per_block_results[blockIdx.x] = sdata[0]; } } void initRands(dim3 threadGrid, int numBlocks, hiprandState_t *state, int seed) { hipLaunchKernelGGL(( d_initRands), dim3(numBlocks), dim3(threadGrid) , 0, 0, state, seed); if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" ); } void advanceTimestep(dim3 threadGrid, int numBlocks, hiprandState_t *rands, float* OU, float* wg, int* states, int N_x, float sw) { hipLaunchKernelGGL(( d_generateOU), dim3(numBlocks), dim3(threadGrid) , 0, 0, rands, OU, wg); if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" ); hipLaunchKernelGGL(( d_updateStates), dim3(numBlocks), dim3(threadGrid) , 0, 0, states, OU, wg, N_x, rands, sw); if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" ); } void countStates(int numThreads, int numBlocks, int* states, int* blockTotals, int N_ALL) { hipLaunchKernelGGL(( block_sum), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(int) , 0, states, blockTotals, N_ALL); if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" ); } void recordData(dim3 threadGrid, int numBlocks, int* states, int* net, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t) { hipLaunchKernelGGL(( d_recordData), dim3(numBlocks), dim3(threadGrid) , 0, 0, states, net, N_x, d_up, d_down, d_upcount, d_downcount, t); if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" ); }
1a4e73a25e73f847f4811a119f19d5d5f8f08573.cu
#include <curand_kernel.h> #include <stdio.h> #include "params.h" __device__ int getIndex(int t_x, int t_y) { // calculate full index from a grid position int indx = __mul24(blockIdx.x,blockDim.x) + t_x; return __mul24(t_y, __mul24(gridDim.x, blockDim.x)) + indx; } __global__ void d_initRands(curandState *state, int seed) { int id = getIndex(threadIdx.x, threadIdx.y); /* Each thread gets same seed, a different sequence * number, no offset */ curand_init(seed, id, 0, &state[id]); } __global__ void d_generateOU(curandState *state, float* ou_process, float* wg) { int id = getIndex(threadIdx.x, threadIdx.y); float last = ou_process[id]; float nm = curand_normal(&state[id]); float dt = 1.0f; float wgi = wg[id]; float lambda = 1.0f; float sigma = 1.0f; ou_process[id] = (last*exp(-wgi * dt)) + (lambda * (1 - exp(-wgi*dt))) + ( nm * sigma * sqrtf((1.0f-exp(-2.0f*wgi*dt))/(2.0f*wgi)) ); } __global__ void d_updateStates(int* states, float* ou_process, float* wg, int N_x, curandState* d_rands, float sw) { int id = getIndex(threadIdx.x, threadIdx.y); int edges=8; int neigh[8][2] = { { 1, 1 }, { 1, 0 }, { 1, -1 } , { 0, 1 }, { 0, -1 }, { -1, -1 } , { -1, 0 }, { -1, 1 } }; int deltan = 0; for (int e=0;e<edges;e++) { if (curand_uniform(&d_rands[id])<sw) { int n2 = curand_uniform(&d_rands[id])*8; if (n2==8) n2 = 0; int x_n = (((threadIdx.x + neigh[n2][0]) % N_x) + N_x) % N_x; int y_n = (((threadIdx.y + neigh[n2][1]) % N_x) + N_x) % N_x; int n2_id = getIndex(x_n, y_n); if (states[n2_id]>0.5) deltan++; } else { int x_n = curand_uniform(&d_rands[id])*N_x; int y_n = curand_uniform(&d_rands[id])*N_x; if (x_n==N_x) x_n = 0; if (y_n==N_x) y_n = 0; int n2_id = getIndex(x_n, y_n); if (states[n2_id]>0.5) deltan++; } } // deltan is N+ right now but we want (N+ - N-) deltan*=2; deltan-=edges; float pup = exp(-4.0f*wg[id]*ou_process[id]); float pall = pup*powf((1.0f - ws)/ws,deltan); int newState; if (pall<1.0f) newState = 1; else newState = 0; __syncthreads(); states[id] = newState; } __global__ void d_recordData(int* states, int* net, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t) { int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; int grid_width = gridDim.x * blockDim.x; int group_id = threadIdx.y * N_x + threadIdx.x; int N = N_x*N_x; if (group_id==0) { { int totalUp = 0; for (int i=0;i<N;i++) if (states[blockIdx.y * N + i] > 0) totalUp++; int nowDown = 0; float pcDown = 0.0f; float pcUp = 0.0f; for (int i=0;i<N;i++) { int pop_id = blockIdx.y*N + i; int up =0; for (int j=0;j<N;j++) if (net[pop_id * N + j]>0) if (states[blockIdx.y * N + j]>0) up++; if (states[blockIdx.y * N + i]>0) { if ((float)up/3.0f>0.5) pcUp += 1.0f; } else { if ((float)up/3.0f<0.5) pcDown += 1.0f ; } } pcUp /= totalUp; pcDown /= (N-totalUp); d_upcount[totalUp]+=1; int c = d_upcount[totalUp]; if (c<1000) { d_down[totalUp] = (pcDown)/(float)c + (c-1)*d_down[totalUp]/(float)c; d_up[totalUp] = (pcUp)/(float)c + (c-1)*d_up[totalUp]/(float)c; } // res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1); } //res[t * gridDim.y + blockIdx.y] = counter; // if (t==0) // res[blockIdx.y] = counter; // else // res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1); } } __global__ void block_sum(const int *input, int *per_block_results, const size_t n) { extern __shared__ int sdata[]; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // load input into __shared__ memory int x = 0; if(i < n) { x = input[i]; } sdata[threadIdx.x] = x; __syncthreads(); // contiguous range pattern for(int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if(threadIdx.x < offset) { // add a partial sum upstream to our own sdata[threadIdx.x] += sdata[threadIdx.x + offset]; } // wait until all threads in the block hav // updated their partial sums __syncthreads(); } // thread 0 writes the final result if(threadIdx.x == 0) { per_block_results[blockIdx.x] = sdata[0]; } } void initRands(dim3 threadGrid, int numBlocks, curandState *state, int seed) { d_initRands<<< numBlocks, threadGrid >>>(state, seed); if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" ); } void advanceTimestep(dim3 threadGrid, int numBlocks, curandState *rands, float* OU, float* wg, int* states, int N_x, float sw) { d_generateOU<<< numBlocks, threadGrid >>>(rands, OU, wg); if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" ); d_updateStates<<< numBlocks, threadGrid >>>(states, OU, wg, N_x, rands, sw); if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" ); } void countStates(int numThreads, int numBlocks, int* states, int* blockTotals, int N_ALL) { block_sum<<< numBlocks, numThreads, numThreads * sizeof(int) >>>(states, blockTotals, N_ALL); if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" ); } void recordData(dim3 threadGrid, int numBlocks, int* states, int* net, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t) { d_recordData<<< numBlocks, threadGrid >>>(states, net, N_x, d_up, d_down, d_upcount, d_downcount, t); if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" ); }
3043e2166aa41a11005e85056cffba61ee4296c8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include <assert.h> #define N 2//64 __device__ int bar(float* A) { if(threadIdx.x != 0) { return 0; } return 1; } __global__ void foo(float* A) { int y = bar(A); A[threadIdx.x]=y; } int main(void){ int i; float *A; float *dev_A; float size= N*sizeof(float); A=(float*)malloc(size); for(i=0;i<N;i++) A[i]=2; hipMalloc((void**)&dev_A,size); hipMemcpy(dev_A, A,size, hipMemcpyHostToDevice); //foo<<<1,N>>>(dev_A); ESBMC_verify_kernel_f(foo,1,N,dev_A); hipMemcpy(A,dev_A,size,hipMemcpyDeviceToHost); for(i=0;i<N;i++){ if (i!=0) assert(A[i]==0); else{ assert(A[i]==1); } } free(A); hipFree(dev_A); return 0; }
3043e2166aa41a11005e85056cffba61ee4296c8.cu
#include <stdio.h> #include <stdlib.h> #include "cuda.h" #include <assert.h> #define N 2//64 __device__ int bar(float* A) { if(threadIdx.x != 0) { return 0; } return 1; } __global__ void foo(float* A) { int y = bar(A); A[threadIdx.x]=y; } int main(void){ int i; float *A; float *dev_A; float size= N*sizeof(float); A=(float*)malloc(size); for(i=0;i<N;i++) A[i]=2; cudaMalloc((void**)&dev_A,size); cudaMemcpy(dev_A, A,size, cudaMemcpyHostToDevice); //foo<<<1,N>>>(dev_A); ESBMC_verify_kernel_f(foo,1,N,dev_A); cudaMemcpy(A,dev_A,size,cudaMemcpyDeviceToHost); for(i=0;i<N;i++){ if (i!=0) assert(A[i]==0); else{ assert(A[i]==1); } } free(A); cudaFree(dev_A); return 0; }
02b60d9200c74c3c160ea7eb7561822ec0d13d42.hip
// !!! This is a file automatically generated by hipify!!! #include <bm4d-gpu/fft_bm4d_tools.cuh> // int fft_bm4d_tools::N = 4; // int fft_bm4d_tools::xm = 2; // int fft_bm4d_tools::ym = 2; // int fft_bm4d_tools::zm = 2; // int fft_bm4d_tools::B = 8; // __device__ void init_patch_n(int N_) { // N = N_; // xm = N_ / 2; // ym = N_ / 2; // zm = N_ / 2; // return; // } // __device__ void init_bandwidth(int B_) { // B = B_; // } __device__ __constant__ int fftshift[64] = {42, 43, 40, 41, 46, 47, 44, 45, 34, 35, 32, 33, 38, 39, 36, 37, 58, 59, 56, 57, 62, 63, 60, 61, 50, 51, 48, 49, 54, 55, 52, 53, 10, 11, 8, 9, 14, 15, 12, 13, 2, 3, 0, 1, 6, 7, 4, 5, 26, 27, 24, 25, 30, 31, 28, 29, 18, 19, 16, 17, 22, 23, 20, 21}; //In-place 3-d fftshift __device__ void fft_bm4d_tools::fftshift_3d_in(float* data_re, float* data_im) { float tmp_re, tmp_im; for (int i = 0; i < N * N * N / 2; i++) { tmp_re = data_re[i]; tmp_im = data_im[i]; data_re[i] = data_re[fftshift[i]]; data_re[i] = data_re[fftshift[i]]; data_re[fftshift[i]] = tmp_re; data_im[fftshift[i]] = tmp_im; } return; // int target_x, target_y, target_z; // float temp_re, temp_im; // //(*NEED TO ADD*) hipMalloc needed? // for (int x = 0; x < N; x++) // for (int y = 0; y < N; y++) // for (int z = 0; z < N; z++) { // temp_re = data_re[x * N * N + y * N + z]; // temp_im = data_im[x * N * N + y * N + z]; // if (x < xm) { // if (y < ym) { // if (z < zm) { // // Section 1 // target_x = x + xm; // target_y = y + ym; // target_z = z + zm; // // Equivalent to: // // data[x][y][z] = data[target_x][target_y][target_z]; // // data[target_x][target_y][target_z] = temp; // data_re[x * N * N + y * N + z] = // data_re[target_x * N * N + target_y * N + target_z]; // data_re[target_x * N * N + target_y * N + target_z] = temp_re; // data_im[x * N * N + y * N + z] = // data_im[target_x * N * N + target_y * N + target_z]; // data_im[target_x * N * N + target_y * N + target_z] = temp_im; // } // else { // //section 2 // target_x = x + xm; // target_y = y + ym; // target_z = z - zm; // data_re[x * N * N + y * N + z] = // data_re[target_x * N * N + target_y * N + target_z]; // data_re[target_x * N * N + target_y * N + target_z] = temp_re; // data_im[x * N * N + y * N + z] = // data_im[target_x * N * N + target_y * N + target_z]; // data_im[target_x * N * N + target_y * N + target_z] = temp_im; // } // } // else { // if (z < zm) { // // Section 3 // target_x = x + xm; // target_y = y + ym; // target_z = z + zm; // data_re[x * N * N + y * N + z] = // data_re[target_x * N * N + target_y * N + target_z]; // data_re[target_x * N * N + target_y * N + target_z] = temp_re; // data_im[x * N * N + y * N + z] = // data_im[target_x * N * N + target_y * N + target_z]; // data_im[target_x * N * N + target_y * N + target_z] = temp_im; // } // else { // //section 4 // target_x = x + xm; // target_y = y - ym; // target_z = z - zm; // data_re[x * N * N + y * N + z] = // data_re[target_x * N * N + target_y * N + target_z]; // data_re[target_x * N * N + target_y * N + target_z] = temp_re; // data_im[x * N * N + y * N + z] = // data_im[target_x * N * N + target_y * N + target_z]; // data_im[target_x * N * N + target_y * N + target_z] = temp_im; // } // } // } // } // //(*NEED TO ADD*) Free temp if hipMalloc is required above // return; } //In-place 3-d ifftshift __device__ void fft_bm4d_tools::ifftshift_3d_in(float* data_re, float* data_im) { if (N % 2 != 0 || N % 2 != 0 || N % 2 != 0) printf("Error: UNIMPLEMENTED\n"); fftshift_3d_in(data_re, data_im); } // __device__ void stockham(float x_re[], float x_im[], int n, int flag, int n2, float y_re[], float y_im[]) __device__ void stockham(float x_re[], float x_im[], int n, int flag, int n2, float y_re[], float y_im[]) { float *y_orig_re, *y_orig_im, *tmp_re, *tmp_im; int i, j, k, k2, Ls, r, jrs; int half, m, m2; float wr, wi, tr, ti; y_orig_re = y_re; y_orig_im = y_im; r = half = n >> 1; Ls = 1; /* Ls=L/2 is the L star */ while(r >= n2) { /* loops log2(n/n2) times */ tmp_re = x_re; /* swap pointers, y is always old */ tmp_im = x_im; x_re = y_re; /* x is always for new data */ x_im = y_im; y_re = tmp_re; y_im = tmp_im; m = 0; /* m runs over first half of the array */ m2 = half; /* m2 for second half, n2=n/2 */ for(j = 0; j < Ls; ++j) { wr = cos(M_PI*j/Ls); /* real and imaginary part */ wi = -flag * sin(M_PI*j/Ls); /* of the omega */ jrs = j*(r+r); for(k = jrs; k < jrs+r; ++k) { /* "butterfly" operation */ k2 = k + r; tr = wr*y_re[k2] - wi*y_im[k2]; /* complex multiply, w*y */ ti = wr*y_im[k2] + wi*y_re[k2]; x_re[m] = y_re[k] + tr; x_im[m] = y_im[k] + ti; x_re[m2] = y_re[k] - tr; x_im[m2] = y_im[k] - ti; ++m; ++m2; } } r >>= 1; Ls <<= 1; }; if (y_re != y_orig_re) { /* copy back to permanent memory */ for(i = 0; i < n; ++i) { /* if it is not already there */ y_re[i] = x_re[i]; /* performed only if log2(n/n2) is odd */ } } if (y_im != y_orig_im) { /* copy back to permanent memory */ for(i = 0; i < n; ++i) { /* if it is not already there */ y_im[i] = x_im[i]; /* performed only if log2(n/n2) is odd */ } } //assert(Ls == n/n2); /* ensure n is a power of 2 */ //assert(1 == n || m2 == n); /* check array index within bound */ } /* The Cooley-Tukey multiple column algorithm, see page 124 of Loan. x[] is input data, overwritten by output, viewed as n/n2 by n2 array. flag = 1 for forward and -1 for backward transform. */ __device__ void cooley_tukey(float x_re[], float x_im[], int n, int flag, int n2) { float c_re, c_im; int i, j, k, m, p, n1; int Ls, ks, ms, jm, dk; float wr, wi, tr, ti; n1 = n/n2; /* do bit reversal permutation */ for(k = 0; k < n1; ++k) { /* This is algorithms 1.5.1 and 1.5.2. */ j = 0; m = k; p = 1; /* p = 2^q, q used in the book */ while(p < n1) { j = 2*j + (m&1); m >>= 1; p <<= 1; } //assert(p == n1); /* make sure n1 is a power of two */ if(j > k) { for(i = 0; i < n2; ++i) { /* swap k <-> j row */ // c = x[k*n2+i]; /* for all columns */ // x[k*n2+i] = x[j*n2+i]; // x[j*n2+i] = c; c_re = x_re[k * n2 + i]; c_im = x_im[k * n2 + i]; x_re[k * n2 + i] = x_re[j * n2 + i]; x_im[k * n2 + i] = x_im[j * n2 + i]; x_re[j * n2 + i] = c_re; x_im[j * n2 + i] = c_im; } } } /* This is (3.1.7), page 124 */ p = 1; while(p < n1) { Ls = p; p <<= 1; jm = 0; /* jm is j*n2 */ dk = p*n2; for(j = 0; j < Ls; ++j) { wr = cos(M_PI*j/Ls); /* real and imaginary part */ wi = -flag * sin(M_PI*j/Ls); /* of the omega */ for(k = jm; k < n; k += dk) { /* "butterfly" */ ks = k + Ls*n2; for(i = 0; i < n2; ++i) { /* for each row */ m = k + i; ms = ks + i; // tr = wr*x[ms].Re - wi*x[ms].Im; // ti = wr*x[ms].Im + wi*x[ms].Re; // x[ms].Re = x[m].Re - tr; // x[ms].Im = x[m].Im - ti; // x[m].Re += tr; // x[m].Im += ti; tr = wr * x_re[ms] - wi * x_im[ms]; ti = wr * x_im[ms] + wi * x_re[ms]; x_re[ms] = x_re[m] - tr; x_im[ms] = x_im[m] - ti; x_re[m] += tr; x_im[m] += ti; } } jm += n2; } } } __device__ void clear_buffer(float* buf, int size) { for (int i = 0 ; i < size; i++) { buf[i] = 0.0; } } __device__ void fft3D_helper(float x_re[], float x_im[], float y_re[], float y_im[], int n1, int n2, int n3, int flag) { // float *y_re, *y_im; int i, n, n23; //assert(1 == flag || -1 == flag); n23 = n2*n3; n = n1*n23; // y_re = (float*) malloc( n23*sizeof(float) ); // y_im = (float*) malloc( n23*sizeof(float) ); //assert(NULL != y_re); //assert(NULL != y_im); for(i=0; i < n; i += n3) { /* FFT in z */ stockham(x_re+i, x_im+i, n3, flag, 1, y_re, y_im); } for(i=0; i < n; i += n23) { /* FFT in y */ stockham(x_re+i, x_im+i, n23, flag, n3, y_re, y_im); } clear_buffer(y_re, n23); clear_buffer(y_im, n23); cooley_tukey(x_re, x_im, n, flag, n23); /* FFT in x */ } //Apply 3-d fft on hipfftComplex data in place //buf_re and buf_im are used to buffer data in stockam: each of size n23 = N^2 __device__ void fft_bm4d_tools::fft_3d(float* data_re, float* data_im, float* buf_re, float* buf_im) { fft3D_helper(data_re, data_im, buf_re, buf_im, N, N, N, FORWARD); } //Calculate the absolute value of complex array //Data_im will not be needed after this function __device__ void fft_bm4d_tools::complex_abs(float* data_re, float* data_im) { for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) for (int k = 0; k < N; k++) { data_re[N * N * i + N * j + k] = sqrt( pow(data_re[N * N * i + N * j + k], 2) + pow(data_im[N * N * i + N * j + k], 2) ); data_im[N * N * i + N * j + k] = 0.0; } } // calculate x from theta, phi __device__ float x_spherical(float theta, float phi) { return cos(phi) * cos(theta); } // calculate y from theta, phi __device__ float y_spherical(float theta, float phi) { return sin(phi) * sin(phi); } // calculate z from theta, phi __device__ float z_spherical(float theta) { return cos(theta); } // calculate the trilinear interpolation at coordinate x, y, z // the complex part of data should all be zero __device__ float trilinear_interpolation(float* data_re, float x, float y, float z, int N) { int x0 = int(x); int y0 = int(y); int z0 = int(z); float xd = x - floor(x); float yd = y - floor(y); float zd = z - floor(z); int x1 = int(ceil(x)); int y1 = int(ceil(y)); int z1 = int(ceil(z)); // c000, c001 float c00 = data_re[x0 * N * N + y0 * N + z0] * (1 - xd) + data_re[x1 * N * N + y0 * N + z0] * xd; // c001, c101 float c01 = data_re[x0 * N * N + y0 * N + z1] * (1 - xd) + data_re[x1 * N * N + y0 * N + z1] * xd; // c010, c110 float c10 = data_re[x0 * N * N + y1 * N + z0] * (1 - xd) + data_re[x1 * N * N + y1 * N + z0] * xd; // c011, c111 float c11 = data_re[x0 * N * N + y1 * N + z1] * (1 - xd) + data_re[x1 * N * N + y1 * N + z1] * xd; float c0 = c00 * (1 - yd) + c10 * yd; float c1 = c01 * (1 - yd) + c11 * yd; // result of trilinear interpolation float c = c0 * (1 - zd) + c1 * zd; return c; } //Map volume data to spherical coordinates and do the integration __device__ void fft_bm4d_tools::spherical_mapping(double* maga_re, double* maga_im, float* data_re) { float theta, phi; float x, y, z; float sum; int maga_idx; float interpol_val; for (int theta_idx = 0; theta_idx < 2 * B; theta_idx++) { for (int phi_idx = 0; phi_idx < 2 * B; phi_idx++) { theta = M_PI * (2 * theta_idx + 1) / (4 * B); phi = M_PI * phi_idx / B; maga_idx = theta_idx * 2 * B + phi_idx; x = x_spherical(theta, phi); y = y_spherical(theta, phi); z = z_spherical(theta); sum = 0; for (float rho = 0.5; rho < 2; rho+=1.0) { interpol_val = trilinear_interpolation(data_re, x * rho + xm, y * rho + ym, z * rho + zm, N ); sum += fabs(interpol_val); } maga_re[maga_idx] = sum; maga_im[maga_idx] = 0.0; } } }
02b60d9200c74c3c160ea7eb7561822ec0d13d42.cu
#include <bm4d-gpu/fft_bm4d_tools.cuh> // int fft_bm4d_tools::N = 4; // int fft_bm4d_tools::xm = 2; // int fft_bm4d_tools::ym = 2; // int fft_bm4d_tools::zm = 2; // int fft_bm4d_tools::B = 8; // __device__ void init_patch_n(int N_) { // N = N_; // xm = N_ / 2; // ym = N_ / 2; // zm = N_ / 2; // return; // } // __device__ void init_bandwidth(int B_) { // B = B_; // } __device__ __constant__ int fftshift[64] = {42, 43, 40, 41, 46, 47, 44, 45, 34, 35, 32, 33, 38, 39, 36, 37, 58, 59, 56, 57, 62, 63, 60, 61, 50, 51, 48, 49, 54, 55, 52, 53, 10, 11, 8, 9, 14, 15, 12, 13, 2, 3, 0, 1, 6, 7, 4, 5, 26, 27, 24, 25, 30, 31, 28, 29, 18, 19, 16, 17, 22, 23, 20, 21}; //In-place 3-d fftshift __device__ void fft_bm4d_tools::fftshift_3d_in(float* data_re, float* data_im) { float tmp_re, tmp_im; for (int i = 0; i < N * N * N / 2; i++) { tmp_re = data_re[i]; tmp_im = data_im[i]; data_re[i] = data_re[fftshift[i]]; data_re[i] = data_re[fftshift[i]]; data_re[fftshift[i]] = tmp_re; data_im[fftshift[i]] = tmp_im; } return; // int target_x, target_y, target_z; // float temp_re, temp_im; // //(*NEED TO ADD*) cudaMalloc needed? // for (int x = 0; x < N; x++) // for (int y = 0; y < N; y++) // for (int z = 0; z < N; z++) { // temp_re = data_re[x * N * N + y * N + z]; // temp_im = data_im[x * N * N + y * N + z]; // if (x < xm) { // if (y < ym) { // if (z < zm) { // // Section 1 // target_x = x + xm; // target_y = y + ym; // target_z = z + zm; // // Equivalent to: // // data[x][y][z] = data[target_x][target_y][target_z]; // // data[target_x][target_y][target_z] = temp; // data_re[x * N * N + y * N + z] = // data_re[target_x * N * N + target_y * N + target_z]; // data_re[target_x * N * N + target_y * N + target_z] = temp_re; // data_im[x * N * N + y * N + z] = // data_im[target_x * N * N + target_y * N + target_z]; // data_im[target_x * N * N + target_y * N + target_z] = temp_im; // } // else { // //section 2 // target_x = x + xm; // target_y = y + ym; // target_z = z - zm; // data_re[x * N * N + y * N + z] = // data_re[target_x * N * N + target_y * N + target_z]; // data_re[target_x * N * N + target_y * N + target_z] = temp_re; // data_im[x * N * N + y * N + z] = // data_im[target_x * N * N + target_y * N + target_z]; // data_im[target_x * N * N + target_y * N + target_z] = temp_im; // } // } // else { // if (z < zm) { // // Section 3 // target_x = x + xm; // target_y = y + ym; // target_z = z + zm; // data_re[x * N * N + y * N + z] = // data_re[target_x * N * N + target_y * N + target_z]; // data_re[target_x * N * N + target_y * N + target_z] = temp_re; // data_im[x * N * N + y * N + z] = // data_im[target_x * N * N + target_y * N + target_z]; // data_im[target_x * N * N + target_y * N + target_z] = temp_im; // } // else { // //section 4 // target_x = x + xm; // target_y = y - ym; // target_z = z - zm; // data_re[x * N * N + y * N + z] = // data_re[target_x * N * N + target_y * N + target_z]; // data_re[target_x * N * N + target_y * N + target_z] = temp_re; // data_im[x * N * N + y * N + z] = // data_im[target_x * N * N + target_y * N + target_z]; // data_im[target_x * N * N + target_y * N + target_z] = temp_im; // } // } // } // } // //(*NEED TO ADD*) Free temp if cudaMalloc is required above // return; } //In-place 3-d ifftshift __device__ void fft_bm4d_tools::ifftshift_3d_in(float* data_re, float* data_im) { if (N % 2 != 0 || N % 2 != 0 || N % 2 != 0) printf("Error: UNIMPLEMENTED\n"); fftshift_3d_in(data_re, data_im); } // __device__ void stockham(float x_re[], float x_im[], int n, int flag, int n2, float y_re[], float y_im[]) __device__ void stockham(float x_re[], float x_im[], int n, int flag, int n2, float y_re[], float y_im[]) { float *y_orig_re, *y_orig_im, *tmp_re, *tmp_im; int i, j, k, k2, Ls, r, jrs; int half, m, m2; float wr, wi, tr, ti; y_orig_re = y_re; y_orig_im = y_im; r = half = n >> 1; Ls = 1; /* Ls=L/2 is the L star */ while(r >= n2) { /* loops log2(n/n2) times */ tmp_re = x_re; /* swap pointers, y is always old */ tmp_im = x_im; x_re = y_re; /* x is always for new data */ x_im = y_im; y_re = tmp_re; y_im = tmp_im; m = 0; /* m runs over first half of the array */ m2 = half; /* m2 for second half, n2=n/2 */ for(j = 0; j < Ls; ++j) { wr = cos(M_PI*j/Ls); /* real and imaginary part */ wi = -flag * sin(M_PI*j/Ls); /* of the omega */ jrs = j*(r+r); for(k = jrs; k < jrs+r; ++k) { /* "butterfly" operation */ k2 = k + r; tr = wr*y_re[k2] - wi*y_im[k2]; /* complex multiply, w*y */ ti = wr*y_im[k2] + wi*y_re[k2]; x_re[m] = y_re[k] + tr; x_im[m] = y_im[k] + ti; x_re[m2] = y_re[k] - tr; x_im[m2] = y_im[k] - ti; ++m; ++m2; } } r >>= 1; Ls <<= 1; }; if (y_re != y_orig_re) { /* copy back to permanent memory */ for(i = 0; i < n; ++i) { /* if it is not already there */ y_re[i] = x_re[i]; /* performed only if log2(n/n2) is odd */ } } if (y_im != y_orig_im) { /* copy back to permanent memory */ for(i = 0; i < n; ++i) { /* if it is not already there */ y_im[i] = x_im[i]; /* performed only if log2(n/n2) is odd */ } } //assert(Ls == n/n2); /* ensure n is a power of 2 */ //assert(1 == n || m2 == n); /* check array index within bound */ } /* The Cooley-Tukey multiple column algorithm, see page 124 of Loan. x[] is input data, overwritten by output, viewed as n/n2 by n2 array. flag = 1 for forward and -1 for backward transform. */ __device__ void cooley_tukey(float x_re[], float x_im[], int n, int flag, int n2) { float c_re, c_im; int i, j, k, m, p, n1; int Ls, ks, ms, jm, dk; float wr, wi, tr, ti; n1 = n/n2; /* do bit reversal permutation */ for(k = 0; k < n1; ++k) { /* This is algorithms 1.5.1 and 1.5.2. */ j = 0; m = k; p = 1; /* p = 2^q, q used in the book */ while(p < n1) { j = 2*j + (m&1); m >>= 1; p <<= 1; } //assert(p == n1); /* make sure n1 is a power of two */ if(j > k) { for(i = 0; i < n2; ++i) { /* swap k <-> j row */ // c = x[k*n2+i]; /* for all columns */ // x[k*n2+i] = x[j*n2+i]; // x[j*n2+i] = c; c_re = x_re[k * n2 + i]; c_im = x_im[k * n2 + i]; x_re[k * n2 + i] = x_re[j * n2 + i]; x_im[k * n2 + i] = x_im[j * n2 + i]; x_re[j * n2 + i] = c_re; x_im[j * n2 + i] = c_im; } } } /* This is (3.1.7), page 124 */ p = 1; while(p < n1) { Ls = p; p <<= 1; jm = 0; /* jm is j*n2 */ dk = p*n2; for(j = 0; j < Ls; ++j) { wr = cos(M_PI*j/Ls); /* real and imaginary part */ wi = -flag * sin(M_PI*j/Ls); /* of the omega */ for(k = jm; k < n; k += dk) { /* "butterfly" */ ks = k + Ls*n2; for(i = 0; i < n2; ++i) { /* for each row */ m = k + i; ms = ks + i; // tr = wr*x[ms].Re - wi*x[ms].Im; // ti = wr*x[ms].Im + wi*x[ms].Re; // x[ms].Re = x[m].Re - tr; // x[ms].Im = x[m].Im - ti; // x[m].Re += tr; // x[m].Im += ti; tr = wr * x_re[ms] - wi * x_im[ms]; ti = wr * x_im[ms] + wi * x_re[ms]; x_re[ms] = x_re[m] - tr; x_im[ms] = x_im[m] - ti; x_re[m] += tr; x_im[m] += ti; } } jm += n2; } } } __device__ void clear_buffer(float* buf, int size) { for (int i = 0 ; i < size; i++) { buf[i] = 0.0; } } __device__ void fft3D_helper(float x_re[], float x_im[], float y_re[], float y_im[], int n1, int n2, int n3, int flag) { // float *y_re, *y_im; int i, n, n23; //assert(1 == flag || -1 == flag); n23 = n2*n3; n = n1*n23; // y_re = (float*) malloc( n23*sizeof(float) ); // y_im = (float*) malloc( n23*sizeof(float) ); //assert(NULL != y_re); //assert(NULL != y_im); for(i=0; i < n; i += n3) { /* FFT in z */ stockham(x_re+i, x_im+i, n3, flag, 1, y_re, y_im); } for(i=0; i < n; i += n23) { /* FFT in y */ stockham(x_re+i, x_im+i, n23, flag, n3, y_re, y_im); } clear_buffer(y_re, n23); clear_buffer(y_im, n23); cooley_tukey(x_re, x_im, n, flag, n23); /* FFT in x */ } //Apply 3-d fft on cufftComplex data in place //buf_re and buf_im are used to buffer data in stockam: each of size n23 = N^2 __device__ void fft_bm4d_tools::fft_3d(float* data_re, float* data_im, float* buf_re, float* buf_im) { fft3D_helper(data_re, data_im, buf_re, buf_im, N, N, N, FORWARD); } //Calculate the absolute value of complex array //Data_im will not be needed after this function __device__ void fft_bm4d_tools::complex_abs(float* data_re, float* data_im) { for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) for (int k = 0; k < N; k++) { data_re[N * N * i + N * j + k] = sqrt( pow(data_re[N * N * i + N * j + k], 2) + pow(data_im[N * N * i + N * j + k], 2) ); data_im[N * N * i + N * j + k] = 0.0; } } // calculate x from theta, phi __device__ float x_spherical(float theta, float phi) { return cos(phi) * cos(theta); } // calculate y from theta, phi __device__ float y_spherical(float theta, float phi) { return sin(phi) * sin(phi); } // calculate z from theta, phi __device__ float z_spherical(float theta) { return cos(theta); } // calculate the trilinear interpolation at coordinate x, y, z // the complex part of data should all be zero __device__ float trilinear_interpolation(float* data_re, float x, float y, float z, int N) { int x0 = int(x); int y0 = int(y); int z0 = int(z); float xd = x - floor(x); float yd = y - floor(y); float zd = z - floor(z); int x1 = int(ceil(x)); int y1 = int(ceil(y)); int z1 = int(ceil(z)); // c000, c001 float c00 = data_re[x0 * N * N + y0 * N + z0] * (1 - xd) + data_re[x1 * N * N + y0 * N + z0] * xd; // c001, c101 float c01 = data_re[x0 * N * N + y0 * N + z1] * (1 - xd) + data_re[x1 * N * N + y0 * N + z1] * xd; // c010, c110 float c10 = data_re[x0 * N * N + y1 * N + z0] * (1 - xd) + data_re[x1 * N * N + y1 * N + z0] * xd; // c011, c111 float c11 = data_re[x0 * N * N + y1 * N + z1] * (1 - xd) + data_re[x1 * N * N + y1 * N + z1] * xd; float c0 = c00 * (1 - yd) + c10 * yd; float c1 = c01 * (1 - yd) + c11 * yd; // result of trilinear interpolation float c = c0 * (1 - zd) + c1 * zd; return c; } //Map volume data to spherical coordinates and do the integration __device__ void fft_bm4d_tools::spherical_mapping(double* maga_re, double* maga_im, float* data_re) { float theta, phi; float x, y, z; float sum; int maga_idx; float interpol_val; for (int theta_idx = 0; theta_idx < 2 * B; theta_idx++) { for (int phi_idx = 0; phi_idx < 2 * B; phi_idx++) { theta = M_PI * (2 * theta_idx + 1) / (4 * B); phi = M_PI * phi_idx / B; maga_idx = theta_idx * 2 * B + phi_idx; x = x_spherical(theta, phi); y = y_spherical(theta, phi); z = z_spherical(theta); sum = 0; for (float rho = 0.5; rho < 2; rho+=1.0) { interpol_val = trilinear_interpolation(data_re, x * rho + xm, y * rho + ym, z * rho + zm, N ); sum += fabs(interpol_val); } maga_re[maga_idx] = sum; maga_im[maga_idx] = 0.0; } } }
731d95c0aad081bba0e30f5c0c524818a8c9bfe8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/copy_kernel.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/linspace_kernel.h" namespace phi { template <typename T> __global__ void LinspaceKernelInner( T start, T stop, double step, int64_t size, T* out) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; for (; index < size; index += blockDim.x * gridDim.x) { if (index < size / 2) { out[index] = static_cast<T>(start + step * index); } else { out[index] = static_cast<T>(stop - step * (size - index - 1)); } } } template <typename T> __global__ void LinspaceSpecialKernel(T start, T* out) { out[0] = static_cast<T>(start); } template <typename T, typename Context> T GetValue(const Context& ctx, const DenseTensor& x) { T value = static_cast<T>(0); if (x.place() != CPUPlace()) { DenseTensor cpu_x; Copy(ctx, x, CPUPlace(), true, &cpu_x); value = cpu_x.data<T>()[0]; } else { value = x.data<T>()[0]; } return value; } template <typename T, typename Context> T GetValueOfExpectedType(const Context& ctx, const DenseTensor& x) { switch (x.dtype()) { case DataType::FLOAT32: return static_cast<T>(GetValue<float, Context>(ctx, x)); case DataType::FLOAT64: return static_cast<T>(GetValue<double, Context>(ctx, x)); case DataType::INT32: return static_cast<T>(GetValue<int32_t, Context>(ctx, x)); case DataType::INT64: return static_cast<T>(GetValue<int64_t, Context>(ctx, x)); case DataType::FLOAT16: return static_cast<T>(GetValue<phi::dtype::float16, Context>(ctx, x)); case DataType::BFLOAT16: return static_cast<T>(GetValue<phi::dtype::bfloat16, Context>(ctx, x)); case DataType::BOOL: return static_cast<T>(GetValue<bool, Context>(ctx, x)); case DataType::INT16: return static_cast<T>(GetValue<int16_t, Context>(ctx, x)); case DataType::UINT8: return static_cast<T>(GetValue<uint8_t, Context>(ctx, x)); default: PADDLE_THROW(phi::errors::Unimplemented( "Data type (%s) is not supported when casting data type.", x.dtype())); } } template <typename T, typename Context> void LinspaceKernel(const Context& ctx, const DenseTensor& start, const DenseTensor& stop, const DenseTensor& number, DataType dtype, DenseTensor* out) { T start_value = GetValueOfExpectedType<T, Context>(ctx, start); T stop_value = GetValueOfExpectedType<T, Context>(ctx, stop); int64_t num = GetValueOfExpectedType<int64_t, Context>(ctx, number); PADDLE_ENFORCE_GT( num, 0, phi::errors::InvalidArgument("The num of linspace op should be larger " "than 0, but received num is %d", num)); out->Resize(phi::make_ddim({num})); T* out_data = ctx.template Alloc<T>(out); auto stream = ctx.stream(); if (num != 1) { int block = 512; int grid = (num + block - 1) / block; double step = (static_cast<double>(stop_value - start_value)) / (num - 1); hipLaunchKernelGGL(( LinspaceKernelInner<T>), dim3(grid), dim3(block), 0, stream, start_value, stop_value, step, num, out_data); } else { hipLaunchKernelGGL(( LinspaceSpecialKernel<T>), dim3(1), dim3(1), 0, stream, start_value, out_data); } } } // namespace phi PD_REGISTER_KERNEL(linspace, GPU, ALL_LAYOUT, phi::LinspaceKernel, float, int32_t, int64_t, double) { kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND); }
731d95c0aad081bba0e30f5c0c524818a8c9bfe8.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/copy_kernel.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/linspace_kernel.h" namespace phi { template <typename T> __global__ void LinspaceKernelInner( T start, T stop, double step, int64_t size, T* out) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; for (; index < size; index += blockDim.x * gridDim.x) { if (index < size / 2) { out[index] = static_cast<T>(start + step * index); } else { out[index] = static_cast<T>(stop - step * (size - index - 1)); } } } template <typename T> __global__ void LinspaceSpecialKernel(T start, T* out) { out[0] = static_cast<T>(start); } template <typename T, typename Context> T GetValue(const Context& ctx, const DenseTensor& x) { T value = static_cast<T>(0); if (x.place() != CPUPlace()) { DenseTensor cpu_x; Copy(ctx, x, CPUPlace(), true, &cpu_x); value = cpu_x.data<T>()[0]; } else { value = x.data<T>()[0]; } return value; } template <typename T, typename Context> T GetValueOfExpectedType(const Context& ctx, const DenseTensor& x) { switch (x.dtype()) { case DataType::FLOAT32: return static_cast<T>(GetValue<float, Context>(ctx, x)); case DataType::FLOAT64: return static_cast<T>(GetValue<double, Context>(ctx, x)); case DataType::INT32: return static_cast<T>(GetValue<int32_t, Context>(ctx, x)); case DataType::INT64: return static_cast<T>(GetValue<int64_t, Context>(ctx, x)); case DataType::FLOAT16: return static_cast<T>(GetValue<phi::dtype::float16, Context>(ctx, x)); case DataType::BFLOAT16: return static_cast<T>(GetValue<phi::dtype::bfloat16, Context>(ctx, x)); case DataType::BOOL: return static_cast<T>(GetValue<bool, Context>(ctx, x)); case DataType::INT16: return static_cast<T>(GetValue<int16_t, Context>(ctx, x)); case DataType::UINT8: return static_cast<T>(GetValue<uint8_t, Context>(ctx, x)); default: PADDLE_THROW(phi::errors::Unimplemented( "Data type (%s) is not supported when casting data type.", x.dtype())); } } template <typename T, typename Context> void LinspaceKernel(const Context& ctx, const DenseTensor& start, const DenseTensor& stop, const DenseTensor& number, DataType dtype, DenseTensor* out) { T start_value = GetValueOfExpectedType<T, Context>(ctx, start); T stop_value = GetValueOfExpectedType<T, Context>(ctx, stop); int64_t num = GetValueOfExpectedType<int64_t, Context>(ctx, number); PADDLE_ENFORCE_GT( num, 0, phi::errors::InvalidArgument("The num of linspace op should be larger " "than 0, but received num is %d", num)); out->Resize(phi::make_ddim({num})); T* out_data = ctx.template Alloc<T>(out); auto stream = ctx.stream(); if (num != 1) { int block = 512; int grid = (num + block - 1) / block; double step = (static_cast<double>(stop_value - start_value)) / (num - 1); LinspaceKernelInner<T><<<grid, block, 0, stream>>>( start_value, stop_value, step, num, out_data); } else { LinspaceSpecialKernel<T><<<1, 1, 0, stream>>>(start_value, out_data); } } } // namespace phi PD_REGISTER_KERNEL(linspace, GPU, ALL_LAYOUT, phi::LinspaceKernel, float, int32_t, int64_t, double) { kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND); }
56d5bc1192ba3fcb92620a0ee0119309c80251f6.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> #include <helper_cuda.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(void) { int devid = 0; hipSetDevice(devid); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, devid); printf("Device Number: %d\n", devid); printf(" Device name: %s\n", prop.name); printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (prop.deviceOverlap ? "Yes" : "No"), prop.asyncEngineCount); // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the vector length to be used, and compute its size int numElements = 50000; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 512; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
56d5bc1192ba3fcb92620a0ee0119309c80251f6.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #include <helper_cuda.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(void) { int devid = 0; cudaSetDevice(devid); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, devid); printf("Device Number: %d\n", devid); printf(" Device name: %s\n", prop.name); printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (prop.deviceOverlap ? "Yes" : "No"), prop.asyncEngineCount); // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = 50000; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 512; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
9a9a9634d541e80f3a96f31d24c12c5943ecaaf2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define DEBUG #ifdef DEBUG #include <stdio.h> #endif #include <time.h> // for rand() #define BLOCKDIM 8 #define WIDTH 10 #define HEIGHT 10 #define RADIUS BLOCKDIM-2 __global__ void shared1D(int * in, int * out, int radius, int n) { extern __shared__ int smem[]; int tx = threadIdx.x; int bx = blockIdx.x; int dx = blockDim.x; int xIndex = tx + bx*dx; int index = xIndex; #define SMEM(txOffset) smem[tx + txOffset] if (xIndex < n) SMEM(radius) = in[index]; else SMEM(radius) = in[n - 1]; if (tx < radius) { if (xIndex - radius >= 0) { SMEM(0) = in[index - radius]; } else { SMEM(0) = in[0]; } if (xIndex + dx < n) { SMEM(dx + radius) = in[index + dx]; } else { SMEM(dx + radius) = in[n - 1]; } } __syncthreads(); if (xIndex >= n) return ; tx += radius; out[index] = SMEM(RADIUS-1); #undef SMEM } __global__ void horizShared2D(int * in, int * out, int radius, int width, int height) { extern __shared__ int smem[]; int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int dx = blockDim.x, dy = blockDim.y; int xIndex = tx + bx*dx; int yIndex = ty + by*dy; int index = xIndex + yIndex*width; if (yIndex >= height) return ; #define SMEM(txOffset, tyOffset) smem[tx + txOffset + (ty+(tyOffset))*(dx+2*radius)] if (xIndex < width) SMEM(radius, 0) = in[index]; else SMEM(radius, 0) = in[(yIndex+1)*width - 1]; if (tx < radius) { if (xIndex - radius >= 0) { SMEM(0, 0) = in[index - radius]; } else { SMEM(0, 0) = in[yIndex*width]; } if (xIndex + dx < width) { SMEM(dx + radius, 0) = in[index + dx]; } else { SMEM(dx + radius, 0) = in[(yIndex + 1)*width - 1]; } } __syncthreads(); if (xIndex >= width) return ; tx += radius; out[index] = SMEM(RADIUS-1, 0); #undef SMEM } __global__ void vertShared2D(int * in, int * out, int radius, int width, int height) { extern __shared__ int smem[]; int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int dx = blockDim.x, dy = blockDim.y; int xIndex = tx + bx*dx; int yIndex = ty + by*dy; int index = xIndex + yIndex*width; if (xIndex >= width) return ; #define SMEM(txOffset, tyOffset) smem[tx + txOffset + (ty+(tyOffset))*dx] if (yIndex < height) SMEM(0, radius) = in[index]; else SMEM(0, radius) = in[(height-1)*width + xIndex]; if (ty < radius) { if (yIndex - radius >= 0) { SMEM(0, 0) = in[index - radius*width]; } else { SMEM(0, 0) = in[xIndex]; } if (yIndex + dy < height) { SMEM(0, dy + radius) = in[index + dy*width]; } else { SMEM(0, dy + radius) = in[(height - 1)*width + xIndex]; } } __syncthreads(); if (yIndex >= height) return ; ty += radius; out[index] = SMEM(0, RADIUS-1); #undef SMEM } __global__ void shared2D(int * in, int * out, int radius, int width, int height) { extern __shared__ int smem[]; int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int dx = blockDim.x, dy = blockDim.y; int xIndex = tx + bx*dx; int yIndex = ty + by*dy; int index = xIndex + yIndex*width; #define SMEM(txOffset, tyOffset) smem[tx + txOffset + (ty+(tyOffset))*(dx+2*radius)] if (xIndex < width && yIndex < height) { SMEM(radius, radius) = in[index]; } else if (xIndex < width) { SMEM(radius, radius) = in[(height - 1)*width + xIndex]; } else if (yIndex < height) { SMEM(radius, radius) = in[(yIndex + 1)*width - 1]; } else { SMEM(radius, radius) = in[height*width - 1]; } if (tx < radius) { if (xIndex - radius >= 0) { if (yIndex < height) { SMEM(0, radius) = in[index - radius]; } else { SMEM(0, radius) = in[(height - 1)*width + xIndex - radius]; } } else { if (yIndex < height) { SMEM(0, radius) = in[yIndex*width]; } else { SMEM(0, radius) = in[(height - 1)*width]; } } if (xIndex + dx < width) { if (yIndex < height) { SMEM(dx+radius, radius) = in[index + dx]; } else { SMEM(dx+radius, radius) = in[(height - 1)*width + xIndex + dx]; } } else { if (yIndex < height) { SMEM(dx+radius, radius) = in[(yIndex + 1)*width - 1]; } else { SMEM(dx+radius, radius) = in[height*width - 1]; } } } if (ty < radius) { if (yIndex - radius >= 0) { if (xIndex < width) { SMEM(radius, 0) = in[index - width*radius]; } else { SMEM(radius, 0) = in[(yIndex - radius)*width]; } } else { if (xIndex < width) { SMEM(radius, 0) = in[xIndex]; } else { SMEM(radius, 0) = in[0]; } } if (yIndex + dy < width) { if (xIndex < width) { SMEM(radius, dy + radius) = in[index + dy*width]; } else { SMEM(radius, dy + radius) = in[(yIndex + dy + 1)*width - 1]; } } else { if (xIndex < width) { SMEM(radius, dy + radius) = in[(height - 1)*width + xIndex]; } else { SMEM(radius, dy + radius) = in[height*width - 1]; } } } if (tx < radius && ty < radius) { // top left corner if (xIndex - radius >= 0) { if (yIndex - radius >= 0) SMEM(0, 0) = in[index - radius*(width + 1)]; else SMEM(0, 0) = in[xIndex - radius]; } else { if (yIndex - radius >= 0) SMEM(0, 0) = in[(yIndex - radius)*width]; else SMEM(0, 0) = in[0]; } // bottom left corner if (xIndex - radius >= 0) { if (yIndex + dy < height) SMEM(0, dy + radius) = in[index - radius + dx*width]; else SMEM(0, dy + radius) = in[xIndex - radius + (height - 1)*width]; } else { if (yIndex + dy < height) SMEM(0, dy + radius) = in[(yIndex + dy)*width]; else SMEM(0, dy + radius) = in[(height - 1)*width]; } // top right corner if (xIndex + dx < width) { if (yIndex - radius >= 0) SMEM(dx + radius, 0) = in[index + dx - radius*width]; else SMEM(dx + radius, 0) = in[xIndex + dx]; } else { if (yIndex - radius >= 0) SMEM(dx + radius, 0) = in[(yIndex - radius + 1)*width - 1]; else SMEM(dx + radius, 0) = in[width - 1]; } // bottom right corner if (xIndex + dx < width) { if (yIndex + dy < height) SMEM(dx + radius, dy + radius) = in[index + dy*(width + 1)]; else SMEM(dx + radius, dy + radius) = in[xIndex + dx + (height - 1)*width]; } else { if (yIndex + dy < height) SMEM(dx + radius, dy + radius) = in[(yIndex + dy + 1)*width - 1]; else SMEM(dx + radius, dy + radius) = in[height*width - 1]; } SMEM(0, 0) = 11; SMEM(dx + radius, 0) = 33; SMEM(0, dy + radius) = 88; SMEM(dx + radius, dy + radius) = 99; } if (xIndex >= width || yIndex >= height) return ; tx += radius; ty += radius; out[index] = SMEM(4, 4); #undef SMEM } __host__ void init(int * list, int width, int height) { int ii, jj; for (ii = 0; ii < height; ii++) { for (jj = 0; jj < width; jj++) { list[ii*width + jj] = ii; } } } int main( ) { int * in, * d_in; int * out, * d_out; int width, height, radius; int sharedMemSize; #ifdef DEBUG int ii, jj; #endif width = WIDTH; height = HEIGHT; radius = RADIUS; in = (int *) malloc(width*height*sizeof(int)); out = (int *) malloc(width*height*sizeof(int)); init(in, width, height); hipMalloc((void **) &d_in, width*height*sizeof(int)); hipMalloc((void **) &d_out, width*height*sizeof(int)); hipMemcpy(d_in, in, width*height*sizeof(int), hipMemcpyHostToDevice); dim3 blockDim(BLOCKDIM,BLOCKDIM); dim3 gridDim((width + BLOCKDIM - 1)/BLOCKDIM, (height + BLOCKDIM -1)/BLOCKDIM); sharedMemSize = BLOCKDIM*(BLOCKDIM+2*RADIUS)*sizeof(int)*sizeof(int); hipLaunchKernelGGL(( shared2D), dim3(gridDim), dim3(blockDim), sharedMemSize, 0, d_in, d_out, radius, width, height); hipMemcpy(out, d_out, width*height*sizeof(int), hipMemcpyDeviceToHost); #ifdef NDEBUG for (jj = 0; jj < height; jj++) { for (ii = 0; ii < width; ii++) { if (out[jj*width + ii] != in[jj*width + (ii + RADIUS)%WIDTH]) { printf("foo"); } } } #endif #ifdef DEBUG for (jj = 0; jj < height; jj++) { for (ii = 0; ii < width; ii++) { printf("%d ", out[jj*width + ii]); } printf("\n"); } #endif free(in); free(out); hipFree(d_in); hipFree(d_out); return 0; }
9a9a9634d541e80f3a96f31d24c12c5943ecaaf2.cu
#define DEBUG #ifdef DEBUG #include <stdio.h> #endif #include <time.h> // for rand() #define BLOCKDIM 8 #define WIDTH 10 #define HEIGHT 10 #define RADIUS BLOCKDIM-2 __global__ void shared1D(int * in, int * out, int radius, int n) { extern __shared__ int smem[]; int tx = threadIdx.x; int bx = blockIdx.x; int dx = blockDim.x; int xIndex = tx + bx*dx; int index = xIndex; #define SMEM(txOffset) smem[tx + txOffset] if (xIndex < n) SMEM(radius) = in[index]; else SMEM(radius) = in[n - 1]; if (tx < radius) { if (xIndex - radius >= 0) { SMEM(0) = in[index - radius]; } else { SMEM(0) = in[0]; } if (xIndex + dx < n) { SMEM(dx + radius) = in[index + dx]; } else { SMEM(dx + radius) = in[n - 1]; } } __syncthreads(); if (xIndex >= n) return ; tx += radius; out[index] = SMEM(RADIUS-1); #undef SMEM } __global__ void horizShared2D(int * in, int * out, int radius, int width, int height) { extern __shared__ int smem[]; int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int dx = blockDim.x, dy = blockDim.y; int xIndex = tx + bx*dx; int yIndex = ty + by*dy; int index = xIndex + yIndex*width; if (yIndex >= height) return ; #define SMEM(txOffset, tyOffset) smem[tx + txOffset + (ty+(tyOffset))*(dx+2*radius)] if (xIndex < width) SMEM(radius, 0) = in[index]; else SMEM(radius, 0) = in[(yIndex+1)*width - 1]; if (tx < radius) { if (xIndex - radius >= 0) { SMEM(0, 0) = in[index - radius]; } else { SMEM(0, 0) = in[yIndex*width]; } if (xIndex + dx < width) { SMEM(dx + radius, 0) = in[index + dx]; } else { SMEM(dx + radius, 0) = in[(yIndex + 1)*width - 1]; } } __syncthreads(); if (xIndex >= width) return ; tx += radius; out[index] = SMEM(RADIUS-1, 0); #undef SMEM } __global__ void vertShared2D(int * in, int * out, int radius, int width, int height) { extern __shared__ int smem[]; int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int dx = blockDim.x, dy = blockDim.y; int xIndex = tx + bx*dx; int yIndex = ty + by*dy; int index = xIndex + yIndex*width; if (xIndex >= width) return ; #define SMEM(txOffset, tyOffset) smem[tx + txOffset + (ty+(tyOffset))*dx] if (yIndex < height) SMEM(0, radius) = in[index]; else SMEM(0, radius) = in[(height-1)*width + xIndex]; if (ty < radius) { if (yIndex - radius >= 0) { SMEM(0, 0) = in[index - radius*width]; } else { SMEM(0, 0) = in[xIndex]; } if (yIndex + dy < height) { SMEM(0, dy + radius) = in[index + dy*width]; } else { SMEM(0, dy + radius) = in[(height - 1)*width + xIndex]; } } __syncthreads(); if (yIndex >= height) return ; ty += radius; out[index] = SMEM(0, RADIUS-1); #undef SMEM } __global__ void shared2D(int * in, int * out, int radius, int width, int height) { extern __shared__ int smem[]; int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int dx = blockDim.x, dy = blockDim.y; int xIndex = tx + bx*dx; int yIndex = ty + by*dy; int index = xIndex + yIndex*width; #define SMEM(txOffset, tyOffset) smem[tx + txOffset + (ty+(tyOffset))*(dx+2*radius)] if (xIndex < width && yIndex < height) { SMEM(radius, radius) = in[index]; } else if (xIndex < width) { SMEM(radius, radius) = in[(height - 1)*width + xIndex]; } else if (yIndex < height) { SMEM(radius, radius) = in[(yIndex + 1)*width - 1]; } else { SMEM(radius, radius) = in[height*width - 1]; } if (tx < radius) { if (xIndex - radius >= 0) { if (yIndex < height) { SMEM(0, radius) = in[index - radius]; } else { SMEM(0, radius) = in[(height - 1)*width + xIndex - radius]; } } else { if (yIndex < height) { SMEM(0, radius) = in[yIndex*width]; } else { SMEM(0, radius) = in[(height - 1)*width]; } } if (xIndex + dx < width) { if (yIndex < height) { SMEM(dx+radius, radius) = in[index + dx]; } else { SMEM(dx+radius, radius) = in[(height - 1)*width + xIndex + dx]; } } else { if (yIndex < height) { SMEM(dx+radius, radius) = in[(yIndex + 1)*width - 1]; } else { SMEM(dx+radius, radius) = in[height*width - 1]; } } } if (ty < radius) { if (yIndex - radius >= 0) { if (xIndex < width) { SMEM(radius, 0) = in[index - width*radius]; } else { SMEM(radius, 0) = in[(yIndex - radius)*width]; } } else { if (xIndex < width) { SMEM(radius, 0) = in[xIndex]; } else { SMEM(radius, 0) = in[0]; } } if (yIndex + dy < width) { if (xIndex < width) { SMEM(radius, dy + radius) = in[index + dy*width]; } else { SMEM(radius, dy + radius) = in[(yIndex + dy + 1)*width - 1]; } } else { if (xIndex < width) { SMEM(radius, dy + radius) = in[(height - 1)*width + xIndex]; } else { SMEM(radius, dy + radius) = in[height*width - 1]; } } } if (tx < radius && ty < radius) { // top left corner if (xIndex - radius >= 0) { if (yIndex - radius >= 0) SMEM(0, 0) = in[index - radius*(width + 1)]; else SMEM(0, 0) = in[xIndex - radius]; } else { if (yIndex - radius >= 0) SMEM(0, 0) = in[(yIndex - radius)*width]; else SMEM(0, 0) = in[0]; } // bottom left corner if (xIndex - radius >= 0) { if (yIndex + dy < height) SMEM(0, dy + radius) = in[index - radius + dx*width]; else SMEM(0, dy + radius) = in[xIndex - radius + (height - 1)*width]; } else { if (yIndex + dy < height) SMEM(0, dy + radius) = in[(yIndex + dy)*width]; else SMEM(0, dy + radius) = in[(height - 1)*width]; } // top right corner if (xIndex + dx < width) { if (yIndex - radius >= 0) SMEM(dx + radius, 0) = in[index + dx - radius*width]; else SMEM(dx + radius, 0) = in[xIndex + dx]; } else { if (yIndex - radius >= 0) SMEM(dx + radius, 0) = in[(yIndex - radius + 1)*width - 1]; else SMEM(dx + radius, 0) = in[width - 1]; } // bottom right corner if (xIndex + dx < width) { if (yIndex + dy < height) SMEM(dx + radius, dy + radius) = in[index + dy*(width + 1)]; else SMEM(dx + radius, dy + radius) = in[xIndex + dx + (height - 1)*width]; } else { if (yIndex + dy < height) SMEM(dx + radius, dy + radius) = in[(yIndex + dy + 1)*width - 1]; else SMEM(dx + radius, dy + radius) = in[height*width - 1]; } SMEM(0, 0) = 11; SMEM(dx + radius, 0) = 33; SMEM(0, dy + radius) = 88; SMEM(dx + radius, dy + radius) = 99; } if (xIndex >= width || yIndex >= height) return ; tx += radius; ty += radius; out[index] = SMEM(4, 4); #undef SMEM } __host__ void init(int * list, int width, int height) { int ii, jj; for (ii = 0; ii < height; ii++) { for (jj = 0; jj < width; jj++) { list[ii*width + jj] = ii; } } } int main( ) { int * in, * d_in; int * out, * d_out; int width, height, radius; int sharedMemSize; #ifdef DEBUG int ii, jj; #endif width = WIDTH; height = HEIGHT; radius = RADIUS; in = (int *) malloc(width*height*sizeof(int)); out = (int *) malloc(width*height*sizeof(int)); init(in, width, height); cudaMalloc((void **) &d_in, width*height*sizeof(int)); cudaMalloc((void **) &d_out, width*height*sizeof(int)); cudaMemcpy(d_in, in, width*height*sizeof(int), cudaMemcpyHostToDevice); dim3 blockDim(BLOCKDIM,BLOCKDIM); dim3 gridDim((width + BLOCKDIM - 1)/BLOCKDIM, (height + BLOCKDIM -1)/BLOCKDIM); sharedMemSize = BLOCKDIM*(BLOCKDIM+2*RADIUS)*sizeof(int)*sizeof(int); shared2D<<<gridDim, blockDim, sharedMemSize>>>(d_in, d_out, radius, width, height); cudaMemcpy(out, d_out, width*height*sizeof(int), cudaMemcpyDeviceToHost); #ifdef NDEBUG for (jj = 0; jj < height; jj++) { for (ii = 0; ii < width; ii++) { if (out[jj*width + ii] != in[jj*width + (ii + RADIUS)%WIDTH]) { printf("foo"); } } } #endif #ifdef DEBUG for (jj = 0; jj < height; jj++) { for (ii = 0; ii < width; ii++) { printf("%d ", out[jj*width + ii]); } printf("\n"); } #endif free(in); free(out); cudaFree(d_in); cudaFree(d_out); return 0; }
e321214e86c57966822fa0abce07dc209ff9d651.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 zsymv_upper.cu is nearly identical to zhemv_upper.cu, just change names and drop cuConj. zsymv_kernel_U (upper) in zsymv_upper.cu is very similar to zsymv_kernel_L (lower) in zsymv.cu; diff the two files to compare. Note: [ds] precisions generated from zhemv_upper.cu @precisions normal z -> c @author Mark Gates */ #include "common_magma.h" #include "commonblas_z.h" #define PRECISION_z #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /******************************************************************************* Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. ********************************************************************/ __global__ void zsymv_kernel_U( int n, magmaDoubleComplex const * __restrict__ A, int lda, magmaDoubleComplex const * __restrict__ x, int incx, magmaDoubleComplex * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); magmaDoubleComplex psum, psum_t; magmaDoubleComplex total = MAGMA_Z_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ magmaDoubleComplex sx_blk[NB_X]; // for x[ blk ] __shared__ magmaDoubleComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag magmaDoubleComplex rA[4]; magmaDoubleComplex psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( (partial == 0 || tx < partial) ) { sx_blk[tx] = x[0]; } else { sx_blk[tx] = MAGMA_Z_ZERO; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) // move to 32x32 diag block A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 ) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X ) A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for(int jj=blk+1; jj < gridDim.x; ++jj) { partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_Z_ZERO; } } __syncthreads(); for( int k=0; k < 4; k++ ) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for(int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_Z_ZERO; } } } else { #pragma unroll for(int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for(int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for(int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end zsymv_kernel_U /************************************************************** Upper case, sum up final results Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] ********************************************************************/ __global__ void zsymv_kernel_U_sum( int n, magmaDoubleComplex alpha, int lda, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, int incy, magmaDoubleComplex const * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [0, ..., n) if ( ind < n ) { work += ind; magmaDoubleComplex Ax = MAGMA_Z_ZERO; for(int j = 0; j <= blk; ++j) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } }
e321214e86c57966822fa0abce07dc209ff9d651.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 zsymv_upper.cu is nearly identical to zhemv_upper.cu, just change names and drop cuConj. zsymv_kernel_U (upper) in zsymv_upper.cu is very similar to zsymv_kernel_L (lower) in zsymv.cu; diff the two files to compare. Note: [ds] precisions generated from zhemv_upper.cu @precisions normal z -> c @author Mark Gates */ #include "common_magma.h" #include "commonblas_z.h" #define PRECISION_z #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /******************************************************************************* Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. ********************************************************************/ __global__ void zsymv_kernel_U( int n, magmaDoubleComplex const * __restrict__ A, int lda, magmaDoubleComplex const * __restrict__ x, int incx, magmaDoubleComplex * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); magmaDoubleComplex psum, psum_t; magmaDoubleComplex total = MAGMA_Z_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ magmaDoubleComplex sx_blk[NB_X]; // for x[ blk ] __shared__ magmaDoubleComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag magmaDoubleComplex rA[4]; magmaDoubleComplex psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( (partial == 0 || tx < partial) ) { sx_blk[tx] = x[0]; } else { sx_blk[tx] = MAGMA_Z_ZERO; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) // move to 32x32 diag block A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 ) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X ) A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for(int jj=blk+1; jj < gridDim.x; ++jj) { partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_Z_ZERO; } } __syncthreads(); for( int k=0; k < 4; k++ ) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for(int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_Z_ZERO; } } } else { #pragma unroll for(int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for(int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for(int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end zsymv_kernel_U /************************************************************** Upper case, sum up final results Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] ********************************************************************/ __global__ void zsymv_kernel_U_sum( int n, magmaDoubleComplex alpha, int lda, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, int incy, magmaDoubleComplex const * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [0, ..., n) if ( ind < n ) { work += ind; magmaDoubleComplex Ax = MAGMA_Z_ZERO; for(int j = 0; j <= blk; ++j) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } }
bfdca71b1b89cf9657c6fb2d45a7d9242b4e9be1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "L1TagCuda.h" #include "newrandom.h" __device__ void ReportError ( const L1TagParams& params, const L1TagError& error ) { unsigned64* pErrorCount = GetPtr<unsigned64*>(params.errorCountPtr); L1TagError* pErrorLog = GetPtr<L1TagError*>(params.errorLogPtr); // Increment the error counter unsigned64 errorIdx = atomicAdd(pErrorCount, 1); // Dump the failure if there is room in the error buffer if (errorIdx < params.errorLogLen) { pErrorLog[errorIdx] = error; } } // Given a 16bit offset and 16bit pseudorandom number, encode a 32 bit value // from which we can easily extract the offset. This is done by storing the random value // in the upper bits, then XOR-ing this value with the offset for the lower bits. // // This is superior to only storing the offset since the random data increases the likelihood // of catching noise-dependent failures. __device__ __forceinline__ uint32_t EncodeOffset(uint16_t offset, uint16_t rnd) { return static_cast<uint32_t>(rnd << 16) | static_cast<uint32_t>(rnd ^ offset); } // In order to extract the offset from an encoded value, simply XOR the lower 16 bits with // the upper 16 bits. __device__ __forceinline__ uint16_t DecodeOffset(uint32_t value) { return static_cast<uint16_t>(value >> 16) ^ static_cast<uint16_t>(value); } extern "C" __global__ void InitL1Data(const L1TagParams params) { // Get resident SM ID uint32_t smid; asm volatile ("mov.u32 %0, %%smid;" : "=r"(smid)); // Each SM has its own data region const uint32_t smidDataBytes = params.sizeBytes / gridDim.x; uint32_t* buf = GetPtr<uint32_t*>(params.data + smid * smidDataBytes); // Init RNG (each SM data region will have the same data) unsigned64 s[2]; InitRand<2>(s, params.randSeed + threadIdx.x); for (uint32_t i = threadIdx.x; i < smidDataBytes / sizeof(*buf); i += blockDim.x) { const uint16_t rnd = static_cast<uint16_t>(FastRand(s) >> 48); buf[i] = EncodeOffset(i, rnd); } } extern "C" __global__ void L1TagTest(const L1TagParams params) { // Get SMID and thread info uint32_t smid; uint32_t warpid; uint32_t laneid; asm volatile ("mov.u32 %0, %%smid;" : "=r"(smid)); asm volatile ("mov.u32 %0, %%warpid;" : "=r"(warpid)); asm volatile ("mov.u32 %0, %%laneid;" : "=r"(laneid)); const uint32_t hwtid = laneid + warpid * warpSize; // Each SM has its own data region const uint32_t smidDataBytes = params.sizeBytes / gridDim.x; uint32_t* buf = GetPtr<uint32_t*>(params.data + smid * smidDataBytes); // Init RNG (each SM will use the same seed, for equivalent data accesses) unsigned64 s[2]; InitRand<2>(s, params.randSeed + hwtid); uint32_t rnd = static_cast<uint32_t>(FastRand(s)); // Run the test for the specified iterations for (uint64_t iter = 0; iter < params.iterations; iter++) { // We run the inner loop once for each offset into a cache line constexpr uint32_t lineNumElem = L1_LINE_SIZE_BYTES / sizeof(*buf); for (uint32_t lineOff = 0; lineOff < lineNumElem; lineOff++) { const uint16_t preLoadOff = lineOff + (hwtid * lineNumElem); const uint16_t randOff = rnd % (smidDataBytes / sizeof(*buf)); uint32_t preLoadVal = 0; uint32_t randVal = 0; // Fill up the L1 Cache __syncthreads(); asm volatile("ld.global.ca.u32 %0, [%1];":"=r"(preLoadVal):"l"(buf + preLoadOff)); #if (SM_VER == 82) const bool doSecondRead = (hwtid + blockDim.x) < (smidDataBytes / L1_LINE_SIZE_BYTES); const uint16_t altPreLoadOff = preLoadOff + (blockDim.x * lineNumElem); uint32_t altPreLoadVal = 0; if (doSecondRead) { asm volatile("ld.global.ca.u32 %0, [%1];":"=r"(altPreLoadVal):"l"(buf + altPreLoadOff)); } #endif __syncthreads(); // With the L1 cache fully loaded, randomly read data (RandomLoad) asm volatile("ld.global.ca.u32 %0, [%1];":"=r"(randVal):"l"(buf + randOff)); // Check the values after all reads are complete. Since latency matters in this test // we don't want to waste any cycles that could instead be used on random L1 data loads. // // Of course, the compiler will still reorder non-memory instructions, // but this is better than nothing. __syncthreads(); const uint16_t decodedPreLoad = DecodeOffset(preLoadVal); if (decodedPreLoad != preLoadOff) { const L1TagError err = { TestStage::PreLoad, decodedPreLoad, preLoadOff, iter, lineOff, smid, warpid, laneid }; ReportError(params, err); } #if (SM_VER == 82) if (doSecondRead) { const uint16_t altDecodedPreLoad = DecodeOffset(altPreLoadVal); if (altDecodedPreLoad != altPreLoadOff) { const L1TagError err = { TestStage::PreLoad, altDecodedPreLoad, altPreLoadOff, iter, lineOff, smid, warpid, laneid }; ReportError(params, err); } } #endif const uint16_t decodedRand = DecodeOffset(randVal); if (decodedRand != randOff) { const L1TagError err = { TestStage::RandomLoad, decodedRand, randOff, iter, lineOff, smid, warpid, laneid }; ReportError(params, err); } // Always use a new random offset rnd = static_cast<uint32_t>(FastRand(s)); } } }
bfdca71b1b89cf9657c6fb2d45a7d9242b4e9be1.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "L1TagCuda.h" #include "newrandom.h" __device__ void ReportError ( const L1TagParams& params, const L1TagError& error ) { unsigned64* pErrorCount = GetPtr<unsigned64*>(params.errorCountPtr); L1TagError* pErrorLog = GetPtr<L1TagError*>(params.errorLogPtr); // Increment the error counter unsigned64 errorIdx = atomicAdd(pErrorCount, 1); // Dump the failure if there is room in the error buffer if (errorIdx < params.errorLogLen) { pErrorLog[errorIdx] = error; } } // Given a 16bit offset and 16bit pseudorandom number, encode a 32 bit value // from which we can easily extract the offset. This is done by storing the random value // in the upper bits, then XOR-ing this value with the offset for the lower bits. // // This is superior to only storing the offset since the random data increases the likelihood // of catching noise-dependent failures. __device__ __forceinline__ uint32_t EncodeOffset(uint16_t offset, uint16_t rnd) { return static_cast<uint32_t>(rnd << 16) | static_cast<uint32_t>(rnd ^ offset); } // In order to extract the offset from an encoded value, simply XOR the lower 16 bits with // the upper 16 bits. __device__ __forceinline__ uint16_t DecodeOffset(uint32_t value) { return static_cast<uint16_t>(value >> 16) ^ static_cast<uint16_t>(value); } extern "C" __global__ void InitL1Data(const L1TagParams params) { // Get resident SM ID uint32_t smid; asm volatile ("mov.u32 %0, %%smid;" : "=r"(smid)); // Each SM has its own data region const uint32_t smidDataBytes = params.sizeBytes / gridDim.x; uint32_t* buf = GetPtr<uint32_t*>(params.data + smid * smidDataBytes); // Init RNG (each SM data region will have the same data) unsigned64 s[2]; InitRand<2>(s, params.randSeed + threadIdx.x); for (uint32_t i = threadIdx.x; i < smidDataBytes / sizeof(*buf); i += blockDim.x) { const uint16_t rnd = static_cast<uint16_t>(FastRand(s) >> 48); buf[i] = EncodeOffset(i, rnd); } } extern "C" __global__ void L1TagTest(const L1TagParams params) { // Get SMID and thread info uint32_t smid; uint32_t warpid; uint32_t laneid; asm volatile ("mov.u32 %0, %%smid;" : "=r"(smid)); asm volatile ("mov.u32 %0, %%warpid;" : "=r"(warpid)); asm volatile ("mov.u32 %0, %%laneid;" : "=r"(laneid)); const uint32_t hwtid = laneid + warpid * warpSize; // Each SM has its own data region const uint32_t smidDataBytes = params.sizeBytes / gridDim.x; uint32_t* buf = GetPtr<uint32_t*>(params.data + smid * smidDataBytes); // Init RNG (each SM will use the same seed, for equivalent data accesses) unsigned64 s[2]; InitRand<2>(s, params.randSeed + hwtid); uint32_t rnd = static_cast<uint32_t>(FastRand(s)); // Run the test for the specified iterations for (uint64_t iter = 0; iter < params.iterations; iter++) { // We run the inner loop once for each offset into a cache line constexpr uint32_t lineNumElem = L1_LINE_SIZE_BYTES / sizeof(*buf); for (uint32_t lineOff = 0; lineOff < lineNumElem; lineOff++) { const uint16_t preLoadOff = lineOff + (hwtid * lineNumElem); const uint16_t randOff = rnd % (smidDataBytes / sizeof(*buf)); uint32_t preLoadVal = 0; uint32_t randVal = 0; // Fill up the L1 Cache __syncthreads(); asm volatile("ld.global.ca.u32 %0, [%1];":"=r"(preLoadVal):"l"(buf + preLoadOff)); #if (SM_VER == 82) const bool doSecondRead = (hwtid + blockDim.x) < (smidDataBytes / L1_LINE_SIZE_BYTES); const uint16_t altPreLoadOff = preLoadOff + (blockDim.x * lineNumElem); uint32_t altPreLoadVal = 0; if (doSecondRead) { asm volatile("ld.global.ca.u32 %0, [%1];":"=r"(altPreLoadVal):"l"(buf + altPreLoadOff)); } #endif __syncthreads(); // With the L1 cache fully loaded, randomly read data (RandomLoad) asm volatile("ld.global.ca.u32 %0, [%1];":"=r"(randVal):"l"(buf + randOff)); // Check the values after all reads are complete. Since latency matters in this test // we don't want to waste any cycles that could instead be used on random L1 data loads. // // Of course, the compiler will still reorder non-memory instructions, // but this is better than nothing. __syncthreads(); const uint16_t decodedPreLoad = DecodeOffset(preLoadVal); if (decodedPreLoad != preLoadOff) { const L1TagError err = { TestStage::PreLoad, decodedPreLoad, preLoadOff, iter, lineOff, smid, warpid, laneid }; ReportError(params, err); } #if (SM_VER == 82) if (doSecondRead) { const uint16_t altDecodedPreLoad = DecodeOffset(altPreLoadVal); if (altDecodedPreLoad != altPreLoadOff) { const L1TagError err = { TestStage::PreLoad, altDecodedPreLoad, altPreLoadOff, iter, lineOff, smid, warpid, laneid }; ReportError(params, err); } } #endif const uint16_t decodedRand = DecodeOffset(randVal); if (decodedRand != randOff) { const L1TagError err = { TestStage::RandomLoad, decodedRand, randOff, iter, lineOff, smid, warpid, laneid }; ReportError(params, err); } // Always use a new random offset rnd = static_cast<uint32_t>(FastRand(s)); } } }
b5c2853d86066174f2866ccfa79673ff9184a31b.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "functors.hpp" #include "vector_traits.hpp" #include "grid_stride_range.hpp" #include "execution.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, class ActivationOp, class EltwiseOp, std::size_t N> __global__ void generic_op_eltwise_op_inplace_vec(Span<T> inplace_output, View<T> eltwise, const typename ActivationOp::Params act_params, const typename EltwiseOp::Params eltwise_params) { using vector_type = get_vector_type_t<T, N>; auto inplace_output_vPtr = vector_type::get_pointer(inplace_output.data()); auto eltwise_vPtr = vector_type::get_pointer(eltwise.data()); ActivationOp activation_op(act_params); EltwiseOp eltwise_op(eltwise_params); for (auto i : grid_stride_range(inplace_output.size() / vector_type::size())) { vector_type output_vec, eltwise_vec; v_load(output_vec, inplace_output_vPtr[i]); v_load(eltwise_vec, eltwise_vPtr[i]); for(int j = 0; j < output_vec.size(); j++) output_vec.data[j] = eltwise_op(activation_op(output_vec.data[j]), eltwise_vec.data[j]); v_store(inplace_output_vPtr[i], output_vec); } } } template <class T, class ActivationOp, class EltwiseOp, std::size_t N> static void launch_vectorized_generic_op_eltwise_op_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, const typename ActivationOp::Params& act_params, const typename EltwiseOp::Params& eltwise_params) { CV_Assert(is_fully_aligned<T>(inplace_output, N)); CV_Assert(is_fully_aligned<T>(eltwise, N)); auto kernel = raw::generic_op_eltwise_op_inplace_vec<T, ActivationOp, EltwiseOp, N>; auto policy = make_policy(kernel, inplace_output.size() / N, 0, stream); launch_kernel(kernel, policy, inplace_output, eltwise, act_params, eltwise_params); } template <class T, class ActivationOp, class EltwiseOp> static void generic_op_eltwise_op_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, const typename ActivationOp::Params& act_params = {}, const typename EltwiseOp::Params& eltwise_params = {}) { CV_Assert(inplace_output.size() == eltwise.size()); if (is_fully_aligned<T>(inplace_output, 4) && is_fully_aligned<T>(eltwise, 4)) { launch_vectorized_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 4>(stream, inplace_output, eltwise, act_params, eltwise_params); } else if (is_fully_aligned<T>(inplace_output, 2) && is_fully_aligned<T>(eltwise, 2)) { launch_vectorized_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 2>(stream, inplace_output, eltwise, act_params, eltwise_params); } else { launch_vectorized_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 1>(stream, inplace_output, eltwise, act_params, eltwise_params); } } template <class T> void relu_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, T slope) { generic_op_eltwise_op_inplace<T, ReLUFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise, {slope}); } template <class T> void clipped_relu_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, T floor, T ceiling) { CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling)); generic_op_eltwise_op_inplace<T, ClippedReLUFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise, {floor, ceiling}); } template <class T> void tanh_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { generic_op_eltwise_op_inplace<T, TanHFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); } template <class T> void swish_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { generic_op_eltwise_op_inplace<T, SwishFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); } template <class T> void mish_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { generic_op_eltwise_op_inplace<T, MishFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); } template <class T> void sigmoid_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { generic_op_eltwise_op_inplace<T, SigmoidFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); } template <class T> void power_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, T exp, T scale, T shift) { generic_op_eltwise_op_inplace<T, PowerFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise, {exp, scale, shift}); } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void relu_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>, __half); template void clipped_relu_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>, __half, __half); template void tanh_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); template void swish_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); template void mish_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); template void sigmoid_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); template void power_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half); #endif template void relu_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>, float); template void clipped_relu_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>, float, float); template void tanh_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); template void swish_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); template void mish_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); template void sigmoid_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); template void power_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>, float, float, float); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
b5c2853d86066174f2866ccfa79673ff9184a31b.cu
// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <cuda_runtime.h> #include <cuda_fp16.h> #include "functors.hpp" #include "vector_traits.hpp" #include "grid_stride_range.hpp" #include "execution.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, class ActivationOp, class EltwiseOp, std::size_t N> __global__ void generic_op_eltwise_op_inplace_vec(Span<T> inplace_output, View<T> eltwise, const typename ActivationOp::Params act_params, const typename EltwiseOp::Params eltwise_params) { using vector_type = get_vector_type_t<T, N>; auto inplace_output_vPtr = vector_type::get_pointer(inplace_output.data()); auto eltwise_vPtr = vector_type::get_pointer(eltwise.data()); ActivationOp activation_op(act_params); EltwiseOp eltwise_op(eltwise_params); for (auto i : grid_stride_range(inplace_output.size() / vector_type::size())) { vector_type output_vec, eltwise_vec; v_load(output_vec, inplace_output_vPtr[i]); v_load(eltwise_vec, eltwise_vPtr[i]); for(int j = 0; j < output_vec.size(); j++) output_vec.data[j] = eltwise_op(activation_op(output_vec.data[j]), eltwise_vec.data[j]); v_store(inplace_output_vPtr[i], output_vec); } } } template <class T, class ActivationOp, class EltwiseOp, std::size_t N> static void launch_vectorized_generic_op_eltwise_op_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, const typename ActivationOp::Params& act_params, const typename EltwiseOp::Params& eltwise_params) { CV_Assert(is_fully_aligned<T>(inplace_output, N)); CV_Assert(is_fully_aligned<T>(eltwise, N)); auto kernel = raw::generic_op_eltwise_op_inplace_vec<T, ActivationOp, EltwiseOp, N>; auto policy = make_policy(kernel, inplace_output.size() / N, 0, stream); launch_kernel(kernel, policy, inplace_output, eltwise, act_params, eltwise_params); } template <class T, class ActivationOp, class EltwiseOp> static void generic_op_eltwise_op_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, const typename ActivationOp::Params& act_params = {}, const typename EltwiseOp::Params& eltwise_params = {}) { CV_Assert(inplace_output.size() == eltwise.size()); if (is_fully_aligned<T>(inplace_output, 4) && is_fully_aligned<T>(eltwise, 4)) { launch_vectorized_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 4>(stream, inplace_output, eltwise, act_params, eltwise_params); } else if (is_fully_aligned<T>(inplace_output, 2) && is_fully_aligned<T>(eltwise, 2)) { launch_vectorized_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 2>(stream, inplace_output, eltwise, act_params, eltwise_params); } else { launch_vectorized_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 1>(stream, inplace_output, eltwise, act_params, eltwise_params); } } template <class T> void relu_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, T slope) { generic_op_eltwise_op_inplace<T, ReLUFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise, {slope}); } template <class T> void clipped_relu_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, T floor, T ceiling) { CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling)); generic_op_eltwise_op_inplace<T, ClippedReLUFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise, {floor, ceiling}); } template <class T> void tanh_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { generic_op_eltwise_op_inplace<T, TanHFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); } template <class T> void swish_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { generic_op_eltwise_op_inplace<T, SwishFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); } template <class T> void mish_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { generic_op_eltwise_op_inplace<T, MishFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); } template <class T> void sigmoid_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { generic_op_eltwise_op_inplace<T, SigmoidFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); } template <class T> void power_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, T exp, T scale, T shift) { generic_op_eltwise_op_inplace<T, PowerFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise, {exp, scale, shift}); } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void relu_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>, __half); template void clipped_relu_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>, __half, __half); template void tanh_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); template void swish_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); template void mish_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); template void sigmoid_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); template void power_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half); #endif template void relu_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>, float); template void clipped_relu_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>, float, float); template void tanh_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); template void swish_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); template void mish_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); template void sigmoid_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); template void power_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>, float, float, float); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
7d6022f0ae3552321ccf733ae85bce228481502e.hip
// !!! This is a file automatically generated by hipify!!! //ulimit -s unlimited //nvcc Cuda.cu -arch sm_20 && ./a.out #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <stdint.h> #include <hip/hip_runtime.h> #include "hip/device_functions.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hip/hip_runtime.h> void checkCUDAError(const char *msg); void main(){ } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(-1); } }
7d6022f0ae3552321ccf733ae85bce228481502e.cu
//ulimit -s unlimited //nvcc Cuda.cu -arch sm_20 && ./a.out #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <stdint.h> #include <cuda.h> #include "device_functions.h" #include <curand.h> #include <curand_kernel.h> #include <cuda_runtime.h> void checkCUDAError(const char *msg); void main(){ } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(-1); } }
db32e499072edc994872d875b6b7c4f4fa243552.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <time.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "cudaErrorCheck.h" #include "tools.h" #include "textures.h" // Kernel. __global__ void boundary_kernel(double *d_u, const size_t pitch, const int NrInterior, const int NzInterior, const double h, const double u_inf); /******************************************************************** *** BOUNDARY UPDATE *** ********************************************************************/ __global__ void boundary_kernel(double *d_u, const size_t pitch, const int NrInterior, const int NzInterior, const double h, const double u_inf) { int tid = threadIdx.x; int bid = blockIdx.x; // Global offset goes from 0, 1, ..., NzInterior-1, NzInterior, ..., // NzInterior + NrInterior - 1. // As always, possibly overextends but there will be an out-of-bounds check. int offset = tid + bid * blockDim.x; // Variables to utilize. int r_coord, z_coord; double u1, u2, u3, r, z, rr2, temp; // Out-of bounds check. // Check if we are doing r or z boundary. if (offset < (NrInterior + NzInterior)) { // R boundary. if (offset < NzInterior) { // Axis boundary: parity condition. z_coord = offset + 1; // Fetch value. u1 = doubleTex2D(tex_u, 1, z_coord); // Write to global memory. write2D(d_u, 0, z_coord, pitch, u1); // Now do Robin boundary. // Fetch values. u1 = doubleTex2D(tex_u, NrInterior, z_coord); u2 = doubleTex2D(tex_u, NrInterior-1, z_coord); u3 = doubleTex2D(tex_u, NrInterior-2, z_coord); r = ((double)NrInterior - 0.5) * h; z = ((double)z_coord - 0.5) * h; rr2 = r * r + z * z; // Calculate using fourth-order Robin. temp = 3.0 * (r * h/rr2) * (u_inf - u1) - 1.5 * u1 + 3.0 * u2 - 0.5 * u3; // Write to global memory. write2D(d_u, NrInterior+1, z_coord, pitch, temp); } // Z boundary. else { offset -= NzInterior; r_coord = offset + 1; // Fetch value. u1 = doubleTex2D(tex_u, r_coord, 1); // Write to global memory. write2D(d_u, r_coord, 0, pitch, u1); // On the opposite edge we always have Robin. // Fetch values. u1 = doubleTex2D(tex_u, r_coord, NzInterior); u2 = doubleTex2D(tex_u, r_coord, NzInterior-1); u3 = doubleTex2D(tex_u, r_coord, NzInterior-2); r = ((double)r_coord - 0.5) * h; z = ((double)NzInterior - 0.5) * h; rr2 = r * r + z * z; // Calculate using Robin. temp = 3.0 * (z * h/rr2) * (u_inf - u1) - 1.5 * u1 + 3.0 * u2 - 0.5 * u3; // Write to global memory. write2D(d_u, r_coord, NzInterior+1, pitch, temp); } } } void boundary_update(double *d_u, const size_t pitch, const int NrInterior, const int NzInterior, const double h, const double u_inf) { // We have to update a total of 2 * NrInterior + 2 * NzIterior points // (the corners are unphyisical). // All boundary updates are idependent of each other, so they can be easily overlapped. // Launch therefore NrInterior + NzInterior total threads distributed as wished. dim3 threadBlock(256); dim3 gridBlock((NrInterior + NzInterior + threadBlock.x - 1)/threadBlock.x); hipLaunchKernelGGL(( boundary_kernel), dim3(gridBlock), dim3(threadBlock), 0, 0, d_u, pitch, NrInterior, NzInterior, h, u_inf); cudaCheckError(); // Finished. }
db32e499072edc994872d875b6b7c4f4fa243552.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <time.h> #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cudaErrorCheck.h" #include "tools.h" #include "textures.h" // Kernel. __global__ void boundary_kernel(double *d_u, const size_t pitch, const int NrInterior, const int NzInterior, const double h, const double u_inf); /******************************************************************** *** BOUNDARY UPDATE *** ********************************************************************/ __global__ void boundary_kernel(double *d_u, const size_t pitch, const int NrInterior, const int NzInterior, const double h, const double u_inf) { int tid = threadIdx.x; int bid = blockIdx.x; // Global offset goes from 0, 1, ..., NzInterior-1, NzInterior, ..., // NzInterior + NrInterior - 1. // As always, possibly overextends but there will be an out-of-bounds check. int offset = tid + bid * blockDim.x; // Variables to utilize. int r_coord, z_coord; double u1, u2, u3, r, z, rr2, temp; // Out-of bounds check. // Check if we are doing r or z boundary. if (offset < (NrInterior + NzInterior)) { // R boundary. if (offset < NzInterior) { // Axis boundary: parity condition. z_coord = offset + 1; // Fetch value. u1 = doubleTex2D(tex_u, 1, z_coord); // Write to global memory. write2D(d_u, 0, z_coord, pitch, u1); // Now do Robin boundary. // Fetch values. u1 = doubleTex2D(tex_u, NrInterior, z_coord); u2 = doubleTex2D(tex_u, NrInterior-1, z_coord); u3 = doubleTex2D(tex_u, NrInterior-2, z_coord); r = ((double)NrInterior - 0.5) * h; z = ((double)z_coord - 0.5) * h; rr2 = r * r + z * z; // Calculate using fourth-order Robin. temp = 3.0 * (r * h/rr2) * (u_inf - u1) - 1.5 * u1 + 3.0 * u2 - 0.5 * u3; // Write to global memory. write2D(d_u, NrInterior+1, z_coord, pitch, temp); } // Z boundary. else { offset -= NzInterior; r_coord = offset + 1; // Fetch value. u1 = doubleTex2D(tex_u, r_coord, 1); // Write to global memory. write2D(d_u, r_coord, 0, pitch, u1); // On the opposite edge we always have Robin. // Fetch values. u1 = doubleTex2D(tex_u, r_coord, NzInterior); u2 = doubleTex2D(tex_u, r_coord, NzInterior-1); u3 = doubleTex2D(tex_u, r_coord, NzInterior-2); r = ((double)r_coord - 0.5) * h; z = ((double)NzInterior - 0.5) * h; rr2 = r * r + z * z; // Calculate using Robin. temp = 3.0 * (z * h/rr2) * (u_inf - u1) - 1.5 * u1 + 3.0 * u2 - 0.5 * u3; // Write to global memory. write2D(d_u, r_coord, NzInterior+1, pitch, temp); } } } void boundary_update(double *d_u, const size_t pitch, const int NrInterior, const int NzInterior, const double h, const double u_inf) { // We have to update a total of 2 * NrInterior + 2 * NzIterior points // (the corners are unphyisical). // All boundary updates are idependent of each other, so they can be easily overlapped. // Launch therefore NrInterior + NzInterior total threads distributed as wished. dim3 threadBlock(256); dim3 gridBlock((NrInterior + NzInterior + threadBlock.x - 1)/threadBlock.x); boundary_kernel<<<gridBlock, threadBlock>>>(d_u, pitch, NrInterior, NzInterior, h, u_inf); cudaCheckError(); // Finished. }
bb5ee97e258a6db029a1085697f815dca1ab8f20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "eigen_cuda/eigen_cuda.hpp" #include <vector> __device__ int eigenJacobiMethod(float *a, float *v, int n, float eps = 1e-8, int iter_max = 100) { float *bim, *bjm; float bii, bij, bjj, bji; bim = new float[n]; bjm = new float[n]; for(int i = 0; i < n; ++i){ for(int j = 0; j < n; ++j){ v[i*n+j] = (i == j) ? 1.0 : 0.0; } } int cnt = 0; for(;;){ int i, j; float x = 0.0; for(int ia = 0; ia < n; ++ia){ for(int ja = 0; ja < n; ++ja){ int idx = ia*n+ja; if(ia != ja && fabs(a[idx]) > x){ i = ia; j = ja; x = fabs(a[idx]); } } } float aii = a[i*n+i]; float ajj = a[j*n+j]; float aij = a[i*n+j]; float alpha, beta; alpha = (aii-ajj)/2.0; beta = sqrt(alpha*alpha+aij*aij); float st, ct; ct = sqrt((1.0+fabs(alpha)/beta)/2.0); // sin st = (((aii-ajj) >= 0.0) ? 1.0 : -1.0)*aij/(2.0*beta*ct); // cos // A = PAP for(int m = 0; m < n; ++m){ if(m == i || m == j) continue; float aim = a[i*n+m]; float ajm = a[j*n+m]; bim[m] = aim*ct+ajm*st; bjm[m] = -aim*st+ajm*ct; } bii = aii*ct*ct+2.0*aij*ct*st+ajj*st*st; bij = 0.0; bjj = aii*st*st-2.0*aij*ct*st+ajj*ct*ct; bji = 0.0; for(int m = 0; m < n; ++m){ a[i*n+m] = a[m*n+i] = bim[m]; a[j*n+m] = a[m*n+j] = bjm[m]; } a[i*n+i] = bii; a[i*n+j] = bij; a[j*n+j] = bjj; a[j*n+i] = bji; // V = PV for(int m = 0; m < n; ++m){ float vmi = v[m*n+i]; float vmj = v[m*n+j]; bim[m] = vmi*ct+vmj*st; bjm[m] = -vmi*st+vmj*ct; } for(int m = 0; m < n; ++m){ v[m*n+i] = bim[m]; v[m*n+j] = bjm[m]; } float e = 0.0; for(int ja = 0; ja < n; ++ja){ for(int ia = 0; ia < n; ++ia){ if(ia != ja){ e += fabs(a[ja*n+ia]); } } } if(e < eps) break; cnt++; if(cnt > iter_max) break; } delete [] bim; delete [] bjm; return cnt; } __global__ void covarianceGPU(float* neighbor_points,float* matrix,int point_size) { // float x_average=0,y_average=0,z_average=0; for(int i=0;i<point_size*3;i+=3){ x_average+=neighbor_points[i]; y_average+=neighbor_points[i+1]; z_average+=neighbor_points[i+2]; } x_average/=point_size; y_average/=point_size; z_average/=point_size; // float sxx=0,sxy=0,sxz=0,syy=0,syz=0,szz=0; for(int i=0;i<point_size*3;i+=3){ sxx+=(neighbor_points[i]-x_average)*(neighbor_points[i]-x_average); syy+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+1]-y_average); szz+=(neighbor_points[i+2]-z_average)*(neighbor_points[i+2]-z_average); sxy+=(neighbor_points[i]-x_average)*(neighbor_points[i+1]-y_average); sxz+=(neighbor_points[i]-x_average)*(neighbor_points[i+2]-z_average); syz+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+2]-z_average); } sxx/=point_size; syy/=point_size; szz/=point_size; sxy/=point_size; sxz/=point_size; syz/=point_size; // matrix[0]=sxx;matrix[1]=sxy;matrix[2]=sxz; matrix[3]=sxy;matrix[4]=syy;matrix[5]=syz; matrix[6]=sxz;matrix[7]=syz;matrix[8]=szz; } __global__ void eigenGPU(float* neighbor_points,float* eigen_vector,float* eigen_value,int point_size) { // float x_average=0,y_average=0,z_average=0; for(int i=0;i<point_size*3;i+=3){ x_average+=neighbor_points[i]; y_average+=neighbor_points[i+1]; z_average+=neighbor_points[i+2]; } x_average/=point_size; y_average/=point_size; z_average/=point_size; // float sxx=0,sxy=0,sxz=0,syy=0,syz=0,szz=0; for(int i=0;i<point_size*3;i+=3){ sxx+=(neighbor_points[i]-x_average)*(neighbor_points[i]-x_average); syy+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+1]-y_average); szz+=(neighbor_points[i+2]-z_average)*(neighbor_points[i+2]-z_average); sxy+=(neighbor_points[i]-x_average)*(neighbor_points[i+1]-y_average); sxz+=(neighbor_points[i]-x_average)*(neighbor_points[i+2]-z_average); syz+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+2]-z_average); } sxx/=point_size; syy/=point_size; szz/=point_size; sxy/=point_size; sxz/=point_size; syz/=point_size; // float a[3*3]={ sxx,sxy,sxz, sxy,syy,syz, sxz,syz,szz, }; eigenJacobiMethod(a, eigen_vector, 3); eigen_value[0]=a[0]; eigen_value[1]=a[4]; eigen_value[2]=a[8]; } extern void covariance(std::vector<std::vector<float>> neighbor_points,float matrix[3][3]){ // std::vector<float> h_neighbor_points(neighbor_points.size() * 3); std::vector<float> h_matrix(3 * 3); float *d_neighbor_points, *d_matrix; // hipMalloc((void **)&d_neighbor_points, neighbor_points.size() * 3 * sizeof(float)); hipMalloc((void **)&d_matrix, 3 * 3 * sizeof(float)); // int k=0; for(int i=0;i<neighbor_points.size();i++){ for(int j=0;j<3;j++){ h_neighbor_points[k]=neighbor_points[i][j]; k++; } } // hipMemcpy(d_neighbor_points, &h_neighbor_points[0], neighbor_points.size() * 3 * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( covarianceGPU), dim3(1), dim3(1), 0, 0, d_neighbor_points,d_matrix,neighbor_points.size()); // hipMemcpy(&h_matrix[0], d_matrix, 3 * 3 * sizeof(float), hipMemcpyDeviceToHost); // k=0; for(int i=0;i<3;i++){ for(int j=0;j<3;j++){ matrix[i][j]=h_matrix[k]; k++; } } // hipFree(d_neighbor_points); hipFree(d_matrix); } extern void eigen(std::vector<std::vector<float>> neighbor_points,float eigen_vector[3][3],float eigen_value[3]){ // std::vector<float> h_neighbor_points(neighbor_points.size() * 3); std::vector<float> h_eigen_vector(3 * 3); std::vector<float> h_eigen_value(3); float *d_neighbor_points, *d_eigen_vector, *d_eigen_value; // hipMalloc((void **)&d_neighbor_points, neighbor_points.size() * 3 * sizeof(float)); hipMalloc((void **)&d_eigen_vector, 3 * 3 * sizeof(float)); hipMalloc((void **)&d_eigen_value, 3 * sizeof(float)); // int k=0; for(int i=0;i<neighbor_points.size();i++){ for(int j=0;j<3;j++){ h_neighbor_points[k]=neighbor_points[i][j]; k++; } } // hipMemcpy(d_neighbor_points, &h_neighbor_points[0], neighbor_points.size() * 3 * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( eigenGPU), dim3(1), dim3(1), 0, 0, d_neighbor_points,d_eigen_vector,d_eigen_value,neighbor_points.size()); // hipMemcpy(&h_eigen_vector[0], d_eigen_vector, 3 * 3 * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&h_eigen_value[0], d_eigen_value, 3 * sizeof(float), hipMemcpyDeviceToHost); // k=0; for(int i=0;i<3;i++){ for(int j=0;j<3;j++){ eigen_vector[i][j]=h_eigen_vector[k]; eigen_value[i]=h_eigen_value[i]; k++; } } // hipFree(d_neighbor_points); hipFree(d_eigen_vector); hipFree(d_eigen_value); }
bb5ee97e258a6db029a1085697f815dca1ab8f20.cu
#include <stdio.h> #include "eigen_cuda/eigen_cuda.hpp" #include <vector> __device__ int eigenJacobiMethod(float *a, float *v, int n, float eps = 1e-8, int iter_max = 100) { float *bim, *bjm; float bii, bij, bjj, bji; bim = new float[n]; bjm = new float[n]; for(int i = 0; i < n; ++i){ for(int j = 0; j < n; ++j){ v[i*n+j] = (i == j) ? 1.0 : 0.0; } } int cnt = 0; for(;;){ int i, j; float x = 0.0; for(int ia = 0; ia < n; ++ia){ for(int ja = 0; ja < n; ++ja){ int idx = ia*n+ja; if(ia != ja && fabs(a[idx]) > x){ i = ia; j = ja; x = fabs(a[idx]); } } } float aii = a[i*n+i]; float ajj = a[j*n+j]; float aij = a[i*n+j]; float alpha, beta; alpha = (aii-ajj)/2.0; beta = sqrt(alpha*alpha+aij*aij); float st, ct; ct = sqrt((1.0+fabs(alpha)/beta)/2.0); // sinθ st = (((aii-ajj) >= 0.0) ? 1.0 : -1.0)*aij/(2.0*beta*ct); // cosθ // A = PAPの計算 for(int m = 0; m < n; ++m){ if(m == i || m == j) continue; float aim = a[i*n+m]; float ajm = a[j*n+m]; bim[m] = aim*ct+ajm*st; bjm[m] = -aim*st+ajm*ct; } bii = aii*ct*ct+2.0*aij*ct*st+ajj*st*st; bij = 0.0; bjj = aii*st*st-2.0*aij*ct*st+ajj*ct*ct; bji = 0.0; for(int m = 0; m < n; ++m){ a[i*n+m] = a[m*n+i] = bim[m]; a[j*n+m] = a[m*n+j] = bjm[m]; } a[i*n+i] = bii; a[i*n+j] = bij; a[j*n+j] = bjj; a[j*n+i] = bji; // V = PVの計算 for(int m = 0; m < n; ++m){ float vmi = v[m*n+i]; float vmj = v[m*n+j]; bim[m] = vmi*ct+vmj*st; bjm[m] = -vmi*st+vmj*ct; } for(int m = 0; m < n; ++m){ v[m*n+i] = bim[m]; v[m*n+j] = bjm[m]; } float e = 0.0; for(int ja = 0; ja < n; ++ja){ for(int ia = 0; ia < n; ++ia){ if(ia != ja){ e += fabs(a[ja*n+ia]); } } } if(e < eps) break; cnt++; if(cnt > iter_max) break; } delete [] bim; delete [] bjm; return cnt; } __global__ void covarianceGPU(float* neighbor_points,float* matrix,int point_size) { //平均計算 float x_average=0,y_average=0,z_average=0; for(int i=0;i<point_size*3;i+=3){ x_average+=neighbor_points[i]; y_average+=neighbor_points[i+1]; z_average+=neighbor_points[i+2]; } x_average/=point_size; y_average/=point_size; z_average/=point_size; //要素計算 float sxx=0,sxy=0,sxz=0,syy=0,syz=0,szz=0; for(int i=0;i<point_size*3;i+=3){ sxx+=(neighbor_points[i]-x_average)*(neighbor_points[i]-x_average); syy+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+1]-y_average); szz+=(neighbor_points[i+2]-z_average)*(neighbor_points[i+2]-z_average); sxy+=(neighbor_points[i]-x_average)*(neighbor_points[i+1]-y_average); sxz+=(neighbor_points[i]-x_average)*(neighbor_points[i+2]-z_average); syz+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+2]-z_average); } sxx/=point_size; syy/=point_size; szz/=point_size; sxy/=point_size; sxz/=point_size; syz/=point_size; //出力 matrix[0]=sxx;matrix[1]=sxy;matrix[2]=sxz; matrix[3]=sxy;matrix[4]=syy;matrix[5]=syz; matrix[6]=sxz;matrix[7]=syz;matrix[8]=szz; } __global__ void eigenGPU(float* neighbor_points,float* eigen_vector,float* eigen_value,int point_size) { //平均計算 float x_average=0,y_average=0,z_average=0; for(int i=0;i<point_size*3;i+=3){ x_average+=neighbor_points[i]; y_average+=neighbor_points[i+1]; z_average+=neighbor_points[i+2]; } x_average/=point_size; y_average/=point_size; z_average/=point_size; //要素計算 float sxx=0,sxy=0,sxz=0,syy=0,syz=0,szz=0; for(int i=0;i<point_size*3;i+=3){ sxx+=(neighbor_points[i]-x_average)*(neighbor_points[i]-x_average); syy+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+1]-y_average); szz+=(neighbor_points[i+2]-z_average)*(neighbor_points[i+2]-z_average); sxy+=(neighbor_points[i]-x_average)*(neighbor_points[i+1]-y_average); sxz+=(neighbor_points[i]-x_average)*(neighbor_points[i+2]-z_average); syz+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+2]-z_average); } sxx/=point_size; syy/=point_size; szz/=point_size; sxy/=point_size; sxz/=point_size; syz/=point_size; //共分散行列 float a[3*3]={ sxx,sxy,sxz, sxy,syy,syz, sxz,syz,szz, }; eigenJacobiMethod(a, eigen_vector, 3); eigen_value[0]=a[0]; eigen_value[1]=a[4]; eigen_value[2]=a[8]; } extern void covariance(std::vector<std::vector<float>> neighbor_points,float matrix[3][3]){ //変数宣言 std::vector<float> h_neighbor_points(neighbor_points.size() * 3); std::vector<float> h_matrix(3 * 3); float *d_neighbor_points, *d_matrix; //メモリ確保 cudaMalloc((void **)&d_neighbor_points, neighbor_points.size() * 3 * sizeof(float)); cudaMalloc((void **)&d_matrix, 3 * 3 * sizeof(float)); //配列化 int k=0; for(int i=0;i<neighbor_points.size();i++){ for(int j=0;j<3;j++){ h_neighbor_points[k]=neighbor_points[i][j]; k++; } } //コピー cudaMemcpy(d_neighbor_points, &h_neighbor_points[0], neighbor_points.size() * 3 * sizeof(float), cudaMemcpyHostToDevice); covarianceGPU<<<1, 1>>>(d_neighbor_points,d_matrix,neighbor_points.size()); //配列にコピー cudaMemcpy(&h_matrix[0], d_matrix, 3 * 3 * sizeof(float), cudaMemcpyDeviceToHost); //行列化 k=0; for(int i=0;i<3;i++){ for(int j=0;j<3;j++){ matrix[i][j]=h_matrix[k]; k++; } } //メモリバラシ cudaFree(d_neighbor_points); cudaFree(d_matrix); } extern void eigen(std::vector<std::vector<float>> neighbor_points,float eigen_vector[3][3],float eigen_value[3]){ //変数宣言 std::vector<float> h_neighbor_points(neighbor_points.size() * 3); std::vector<float> h_eigen_vector(3 * 3); std::vector<float> h_eigen_value(3); float *d_neighbor_points, *d_eigen_vector, *d_eigen_value; //メモリ確保 cudaMalloc((void **)&d_neighbor_points, neighbor_points.size() * 3 * sizeof(float)); cudaMalloc((void **)&d_eigen_vector, 3 * 3 * sizeof(float)); cudaMalloc((void **)&d_eigen_value, 3 * sizeof(float)); //配列化 int k=0; for(int i=0;i<neighbor_points.size();i++){ for(int j=0;j<3;j++){ h_neighbor_points[k]=neighbor_points[i][j]; k++; } } //コピー cudaMemcpy(d_neighbor_points, &h_neighbor_points[0], neighbor_points.size() * 3 * sizeof(float), cudaMemcpyHostToDevice); eigenGPU<<<1, 1>>>(d_neighbor_points,d_eigen_vector,d_eigen_value,neighbor_points.size()); //配列にコピー cudaMemcpy(&h_eigen_vector[0], d_eigen_vector, 3 * 3 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&h_eigen_value[0], d_eigen_value, 3 * sizeof(float), cudaMemcpyDeviceToHost); //行列化 k=0; for(int i=0;i<3;i++){ for(int j=0;j<3;j++){ eigen_vector[i][j]=h_eigen_vector[k]; eigen_value[i]=h_eigen_value[i]; k++; } } //メモリバラシ cudaFree(d_neighbor_points); cudaFree(d_eigen_vector); cudaFree(d_eigen_value); }
05887ddd44bb5a9784ba2a8353d46687a3015d9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define DefineDevice #include "Define.h" #include <texture_fetch_functions.h> //To do: Construct again... extern "C" { __constant__ Parameters paras; } extern "C" __global__ void __raygen__RayAllocator() { uint2 index = make_uint2(optixGetLaunchIndex()); if (!paras.trans->eye) { hiprandState_t* statePtr = paras.randState + paras.size.x * index.y + index.x; curandStateMini state; getCurandState(&state, statePtr); RayData* rtData = (RayData*)optixGetSbtDataPointer(); RayTraceData answer{ {0.f, 0.f, 0.f}, 0, {0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}, 0, {0.f, 0.f, 0.f}, -1 }; unsigned int pd0, pd1; pP(&answer, pd0, pd1); //left eye float2 ahh = /*random(index, paras.size, 0) +*/ make_float2(index) - make_float2(paras.size) / 2.0f + make_float2(paras.trans->offset0Left, paras.trans->offset1Left); float3 d = normalize(make_float3(ahh, paras.trans->z0)); float3 rayDir = make_float3( dot(paras.trans->row0, d), dot(paras.trans->row1, d), dot(paras.trans->row2, d)); optixTrace(paras.handle, paras.trans->r0Left, rayDir, 0.0001f, 1e16f, 0.0f, OptixVisibilityMask(1), //OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, OPTIX_RAY_FLAG_NONE, RayRadiance, // SBT offset RayCount, // SBT stride RayRadiance, // missSBTIndex pd0, pd1, state.d, state.v[0], state.v[1], state.v[2], state.v[3], state.v[4]); paras.imageLeft[index.y * paras.size.x + index.x] = make_float4(answer.answer, 0.f); paras.albedoLeft[index.y * paras.size.x + index.x] = make_float4(answer.albedo, 0.f); paras.normalLeft[index.y * paras.size.x + index.x] = make_float4(answer.normal, 0.f); if (answer.firstHitIdx != -1) { float3 dirRight = answer.firstHitPos - paras.trans->r0Right; float rMax(sqrtf(dot(dirRight, dirRight))); float3 dirRightEyeSpace(transposeMult(paras.trans->row0, paras.trans->row1, paras.trans->row2, dirRight)); float2 dirRightScreenSpace{ dot(paras.trans->proj0Right,dirRightEyeSpace) / dirRightEyeSpace.z, dot(paras.trans->proj1Right,dirRightEyeSpace) / dirRightEyeSpace.z }; dirRightScreenSpace = (1 - dirRightScreenSpace) / 2; uint2 idxRight{ dirRightScreenSpace.x * paras.size.x, dirRightScreenSpace.y * paras.size.y }; if (dirRightScreenSpace.x <= 1 && dirRightScreenSpace.x >= -1 && dirRightScreenSpace.y <= 1 && dirRightScreenSpace.y >= -1) { dirRight /= rMax; float cosRight(dot(dirRight, answer.firstHitNorm)); if (cosRight * answer.firstHitAngle > 0) { if (!atomicCAS((unsigned int*)&paras.imageRight[idxRight.y * paras.size.x + idxRight.x].w, 0, 0)) { pd0 = 0; optixTrace(paras.handle, paras.trans->r0Right, dirRight, 0.0001f, rMax, 0.0f, OptixVisibilityMask(1), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RayOcclusion, RayCount, RayOcclusion, pd0); if (!pd0) { if (!atomicCAS((unsigned int*)&paras.imageRight[idxRight.y * paras.size.x + idxRight.x].w, 0, 1065353216)) { //to do: //float coeff(answer.firstHitAngle * cosRight); paras.imageRight[idxRight.y * paras.size.x + idxRight.x] = make_float4(answer.answer, 1.f); paras.albedoRight[idxRight.y * paras.size.x + idxRight.x] = make_float4(answer.albedo, 0.f); paras.normalRight[idxRight.y * paras.size.x + idxRight.x] = make_float4(answer.normal, 0.f); } } } } } } setCurandState(statePtr, &state); } else { if (paras.imageRight[index.y * paras.size.x + index.x].w == 1.f) paras.imageRight[index.y * paras.size.x + index.x].w = 0; else { hiprandState_t* statePtr = paras.randState + paras.size.x * index.y + index.x; curandStateMini state; getCurandState(&state, statePtr); RayData* rtData = (RayData*)optixGetSbtDataPointer(); RayTraceData answer{ {0.f, 0.f, 0.f}, 0, {0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}, 0, {0.f, 0.f, 0.f}, -1 }; unsigned int pd0, pd1; pP(&answer, pd0, pd1); //left eye float2 ahh = /*random(index, paras.size, 0) +*/ make_float2(index) - make_float2(paras.size) / 2.0f + make_float2(paras.trans->offset0Right, paras.trans->offset1Right); float3 d = normalize(make_float3(ahh, paras.trans->z0)); float3 rayDir = make_float3( dot(paras.trans->row0, d), dot(paras.trans->row1, d), dot(paras.trans->row2, d)); optixTrace(paras.handle, paras.trans->r0Right, rayDir, 0.0001f, 1e16f, 0.0f, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, RayRadiance, RayCount, RayRadiance, pd0, pd1, state.d, state.v[0], state.v[1], state.v[2], state.v[3], state.v[4]); paras.imageRight[index.y * paras.size.x + index.x] = make_float4(answer.answer, 0.f); paras.albedoRight[index.y * paras.size.x + index.x] = make_float4(answer.albedo, 0.f); paras.normalRight[index.y * paras.size.x + index.x] = make_float4(answer.normal, 0.f); setCurandState(statePtr, &state); } } } extern "C" __global__ void __closesthit__Radiance() { unsigned int pd0, pd1; pd0 = optixGetPayload_0(); pd1 = optixGetPayload_1(); RayTraceData* ray((RayTraceData*)uP(pd0, pd1)); if (ray->depth < paras.depthMax) { curandStateMini state(getCurandStateFromPayload()); CloseHitData* closeHitData = (CloseHitData*)optixGetSbtDataPointer(); int primIdx = optixGetPrimitiveIndex(); float3 n = closeHitData->normals[primIdx]; float3 answer{ 0 }; float3 color{ 0.1f, 0.9f, 0.9f }; float3 rayDir(optixGetWorldRayDirection()); float3 hitPoint(optixGetWorldRayOrigin() + rayDir * optixGetRayTmax()); float cosi1 = dot(rayDir, n); if (ray->depth == 0) { ray->albedo = color; ray->normal = transposeMult(paras.trans->row0, paras.trans->row1, paras.trans->row2, n); ray->firstHitPos = hitPoint; ray->firstHitAngle = cosi1; ray->firstHitNorm = n; ray->firstHitIdx = primIdx; } //if (rayData.depth > russian) //{ // if (random(seed) < 0.2f) { rayData.color = answer; return; } // else k /= 0.8f; //} if (cosi1 > 0) n = -n; unsigned int numRays(1); for (int c0(0); c0 < numRays; ++c0) { ray->depth += 1; optixTrace(paras.handle, hitPoint, randomDirectionCosN(n, 1.0f, &state), 0.0001f, 1e16f, 0.0f, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, RayRadiance, // SBT offset RayCount, // SBT stride RayRadiance, // missSBTIndex pd0, pd1, state.d, state.v[0], state.v[1], state.v[2], state.v[3], state.v[4]); answer += ray->answer * color; } ray->answer = answer / numRays; setCurandStateToPayload(&state); } ray->depth -= 1; } extern "C" __global__ void __closesthit__Occlusion() { optixSetPayload_0(1); } extern "C" __global__ void __miss__Radiance() { unsigned int pd0, pd1; pd0 = optixGetPayload_0(); pd1 = optixGetPayload_1(); RayTraceData* ray((RayTraceData*)uP(pd0, pd1)); float3 dir(optixGetWorldRayDirection()); float3 r(make_float3(texCubemap<float4>(paras.cubeTexture, dir.x, dir.y, dir.z))); if (ray->depth == 0) { ray->albedo = r; } ray->answer = r; ray->depth -= 1; }
05887ddd44bb5a9784ba2a8353d46687a3015d9f.cu
#define DefineDevice #include "Define.h" #include <texture_fetch_functions.h> //To do: Construct again... extern "C" { __constant__ Parameters paras; } extern "C" __global__ void __raygen__RayAllocator() { uint2 index = make_uint2(optixGetLaunchIndex()); if (!paras.trans->eye) { curandState* statePtr = paras.randState + paras.size.x * index.y + index.x; curandStateMini state; getCurandState(&state, statePtr); RayData* rtData = (RayData*)optixGetSbtDataPointer(); RayTraceData answer{ {0.f, 0.f, 0.f}, 0, {0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}, 0, {0.f, 0.f, 0.f}, -1 }; unsigned int pd0, pd1; pP(&answer, pd0, pd1); //left eye float2 ahh = /*random(index, paras.size, 0) +*/ make_float2(index) - make_float2(paras.size) / 2.0f + make_float2(paras.trans->offset0Left, paras.trans->offset1Left); float3 d = normalize(make_float3(ahh, paras.trans->z0)); float3 rayDir = make_float3( dot(paras.trans->row0, d), dot(paras.trans->row1, d), dot(paras.trans->row2, d)); optixTrace(paras.handle, paras.trans->r0Left, rayDir, 0.0001f, 1e16f, 0.0f, OptixVisibilityMask(1), //OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, OPTIX_RAY_FLAG_NONE, RayRadiance, // SBT offset RayCount, // SBT stride RayRadiance, // missSBTIndex pd0, pd1, state.d, state.v[0], state.v[1], state.v[2], state.v[3], state.v[4]); paras.imageLeft[index.y * paras.size.x + index.x] = make_float4(answer.answer, 0.f); paras.albedoLeft[index.y * paras.size.x + index.x] = make_float4(answer.albedo, 0.f); paras.normalLeft[index.y * paras.size.x + index.x] = make_float4(answer.normal, 0.f); if (answer.firstHitIdx != -1) { float3 dirRight = answer.firstHitPos - paras.trans->r0Right; float rMax(sqrtf(dot(dirRight, dirRight))); float3 dirRightEyeSpace(transposeMult(paras.trans->row0, paras.trans->row1, paras.trans->row2, dirRight)); float2 dirRightScreenSpace{ dot(paras.trans->proj0Right,dirRightEyeSpace) / dirRightEyeSpace.z, dot(paras.trans->proj1Right,dirRightEyeSpace) / dirRightEyeSpace.z }; dirRightScreenSpace = (1 - dirRightScreenSpace) / 2; uint2 idxRight{ dirRightScreenSpace.x * paras.size.x, dirRightScreenSpace.y * paras.size.y }; if (dirRightScreenSpace.x <= 1 && dirRightScreenSpace.x >= -1 && dirRightScreenSpace.y <= 1 && dirRightScreenSpace.y >= -1) { dirRight /= rMax; float cosRight(dot(dirRight, answer.firstHitNorm)); if (cosRight * answer.firstHitAngle > 0) { if (!atomicCAS((unsigned int*)&paras.imageRight[idxRight.y * paras.size.x + idxRight.x].w, 0, 0)) { pd0 = 0; optixTrace(paras.handle, paras.trans->r0Right, dirRight, 0.0001f, rMax, 0.0f, OptixVisibilityMask(1), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RayOcclusion, RayCount, RayOcclusion, pd0); if (!pd0) { if (!atomicCAS((unsigned int*)&paras.imageRight[idxRight.y * paras.size.x + idxRight.x].w, 0, 1065353216)) { //to do: //float coeff(answer.firstHitAngle * cosRight); paras.imageRight[idxRight.y * paras.size.x + idxRight.x] = make_float4(answer.answer, 1.f); paras.albedoRight[idxRight.y * paras.size.x + idxRight.x] = make_float4(answer.albedo, 0.f); paras.normalRight[idxRight.y * paras.size.x + idxRight.x] = make_float4(answer.normal, 0.f); } } } } } } setCurandState(statePtr, &state); } else { if (paras.imageRight[index.y * paras.size.x + index.x].w == 1.f) paras.imageRight[index.y * paras.size.x + index.x].w = 0; else { curandState* statePtr = paras.randState + paras.size.x * index.y + index.x; curandStateMini state; getCurandState(&state, statePtr); RayData* rtData = (RayData*)optixGetSbtDataPointer(); RayTraceData answer{ {0.f, 0.f, 0.f}, 0, {0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}, 0, {0.f, 0.f, 0.f}, -1 }; unsigned int pd0, pd1; pP(&answer, pd0, pd1); //left eye float2 ahh = /*random(index, paras.size, 0) +*/ make_float2(index) - make_float2(paras.size) / 2.0f + make_float2(paras.trans->offset0Right, paras.trans->offset1Right); float3 d = normalize(make_float3(ahh, paras.trans->z0)); float3 rayDir = make_float3( dot(paras.trans->row0, d), dot(paras.trans->row1, d), dot(paras.trans->row2, d)); optixTrace(paras.handle, paras.trans->r0Right, rayDir, 0.0001f, 1e16f, 0.0f, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, RayRadiance, RayCount, RayRadiance, pd0, pd1, state.d, state.v[0], state.v[1], state.v[2], state.v[3], state.v[4]); paras.imageRight[index.y * paras.size.x + index.x] = make_float4(answer.answer, 0.f); paras.albedoRight[index.y * paras.size.x + index.x] = make_float4(answer.albedo, 0.f); paras.normalRight[index.y * paras.size.x + index.x] = make_float4(answer.normal, 0.f); setCurandState(statePtr, &state); } } } extern "C" __global__ void __closesthit__Radiance() { unsigned int pd0, pd1; pd0 = optixGetPayload_0(); pd1 = optixGetPayload_1(); RayTraceData* ray((RayTraceData*)uP(pd0, pd1)); if (ray->depth < paras.depthMax) { curandStateMini state(getCurandStateFromPayload()); CloseHitData* closeHitData = (CloseHitData*)optixGetSbtDataPointer(); int primIdx = optixGetPrimitiveIndex(); float3 n = closeHitData->normals[primIdx]; float3 answer{ 0 }; float3 color{ 0.1f, 0.9f, 0.9f }; float3 rayDir(optixGetWorldRayDirection()); float3 hitPoint(optixGetWorldRayOrigin() + rayDir * optixGetRayTmax()); float cosi1 = dot(rayDir, n); if (ray->depth == 0) { ray->albedo = color; ray->normal = transposeMult(paras.trans->row0, paras.trans->row1, paras.trans->row2, n); ray->firstHitPos = hitPoint; ray->firstHitAngle = cosi1; ray->firstHitNorm = n; ray->firstHitIdx = primIdx; } //if (rayData.depth > russian) //{ // if (random(seed) < 0.2f) { rayData.color = answer; return; } // else k /= 0.8f; //} if (cosi1 > 0) n = -n; unsigned int numRays(1); for (int c0(0); c0 < numRays; ++c0) { ray->depth += 1; optixTrace(paras.handle, hitPoint, randomDirectionCosN(n, 1.0f, &state), 0.0001f, 1e16f, 0.0f, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, RayRadiance, // SBT offset RayCount, // SBT stride RayRadiance, // missSBTIndex pd0, pd1, state.d, state.v[0], state.v[1], state.v[2], state.v[3], state.v[4]); answer += ray->answer * color; } ray->answer = answer / numRays; setCurandStateToPayload(&state); } ray->depth -= 1; } extern "C" __global__ void __closesthit__Occlusion() { optixSetPayload_0(1); } extern "C" __global__ void __miss__Radiance() { unsigned int pd0, pd1; pd0 = optixGetPayload_0(); pd1 = optixGetPayload_1(); RayTraceData* ray((RayTraceData*)uP(pd0, pd1)); float3 dir(optixGetWorldRayDirection()); float3 r(make_float3(texCubemap<float4>(paras.cubeTexture, dir.x, dir.y, dir.z))); if (ray->depth == 0) { ray->albedo = r; } ray->answer = r; ray->depth -= 1; }
6ac26c46552857fa2777a352379344c9213dd6c2.hip
// !!! This is a file automatically generated by hipify!!! // Simple 1-Node Many-GPU-Core Monte-Carlo pi generator // On each core, generate a random x- and y- coordinate in [0,1) // Sum-square them, determine if <1 // Reduce on mean // Result is pi #include "hiprand/hiprand_kernel.h" #include <iostream> #include <stdio.h> #include <mpi.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/tuple.h> #include <thrust/random.h> const int thread_count=32; const int cycle_count=2; // Pseudo-object sample --------------- class Sample { public: __device__ Sample() : id(0),x(0),y(0) { } __device__ void seed(unsigned long seed, int index) { id=index; engine.seed(seed+index); engine(); } __device__ void generate() { x= uniform0to1(engine); y= uniform0to1(engine); } __host__ __device__ bool within() { return (pow(x,2)+pow(y,2))<1.0; } void __host__ __device__ display() { // Use C-Style io because CUDA 2.0 doesn't support C++ io on devices printf("%d : %f, %f (%d)\n",id,x,y,within()); } private: int id; thrust::default_random_engine engine; thrust::uniform_real_distribution<double> uniform0to1; double x; double y; }; //--------------- Pseudo-object CUDA sample array --- class Device_sampler { public: Device_sampler(int count,int rank) : samples(count), rank(rank) { // seed the samples from their thread number // make a zip of samples and their number thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(samples.begin(),thrust::make_counting_iterator(0))), thrust::make_zip_iterator(thrust::make_tuple(samples.end(),thrust::make_counting_iterator(count))), bind_seed(rank) ); } struct bind_seed { bind_seed(int rank):rank(rank){} __device__ void operator()(thrust::tuple<Sample&,int> t) { thrust::get<0>(t).seed(rank,thrust::get<1>(t)); } int rank; }; // std::mem_fun_ref does not bind as a device function // so we need to create our own lambda struct bind_generate { __device__ bool operator()(Sample &sample) const { sample.generate(); return sample.within(); } }; double result(){ return 4.0*thrust::transform_reduce( samples.begin(),samples.end(), bind_generate(), 0,thrust::plus<double>()) /static_cast<double>(samples.size()); } private: thrust::device_vector<Sample> samples; int rank; }; int main( int argc, char** argv) { MPI_Init(&argc, &argv); int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); double result_total=0.0; double result_mpi=0.0; Device_sampler device_sampler(thread_count,rank); for (int cycle=0;cycle<cycle_count;cycle++) { double result=device_sampler.result(); printf("Partial Result %i on rank %i: %f\n",cycle,rank,result); result_total+=result; } printf("Partial Result on rank %i: %f\n",rank,result_total/static_cast<double>(cycle_count)); // Reduce the MPI-Samples MPI_Reduce(&result_total,&result_mpi,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD); if (rank==0){ printf("Final Result over %i mpi processes: %f\n",size,result_mpi/static_cast<double>(cycle_count*size)); } MPI_Finalize(); return 0; }
6ac26c46552857fa2777a352379344c9213dd6c2.cu
// Simple 1-Node Many-GPU-Core Monte-Carlo pi generator // On each core, generate a random x- and y- coordinate in [0,1) // Sum-square them, determine if <1 // Reduce on mean // Result is pi #include "curand_kernel.h" #include <iostream> #include <stdio.h> #include <mpi.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/tuple.h> #include <thrust/random.h> const int thread_count=32; const int cycle_count=2; // Pseudo-object sample --------------- class Sample { public: __device__ Sample() : id(0),x(0),y(0) { } __device__ void seed(unsigned long seed, int index) { id=index; engine.seed(seed+index); engine(); } __device__ void generate() { x= uniform0to1(engine); y= uniform0to1(engine); } __host__ __device__ bool within() { return (pow(x,2)+pow(y,2))<1.0; } void __host__ __device__ display() { // Use C-Style io because CUDA 2.0 doesn't support C++ io on devices printf("%d : %f, %f (%d)\n",id,x,y,within()); } private: int id; thrust::default_random_engine engine; thrust::uniform_real_distribution<double> uniform0to1; double x; double y; }; //--------------- Pseudo-object CUDA sample array --- class Device_sampler { public: Device_sampler(int count,int rank) : samples(count), rank(rank) { // seed the samples from their thread number // make a zip of samples and their number thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(samples.begin(),thrust::make_counting_iterator(0))), thrust::make_zip_iterator(thrust::make_tuple(samples.end(),thrust::make_counting_iterator(count))), bind_seed(rank) ); } struct bind_seed { bind_seed(int rank):rank(rank){} __device__ void operator()(thrust::tuple<Sample&,int> t) { thrust::get<0>(t).seed(rank,thrust::get<1>(t)); } int rank; }; // std::mem_fun_ref does not bind as a device function // so we need to create our own lambda struct bind_generate { __device__ bool operator()(Sample &sample) const { sample.generate(); return sample.within(); } }; double result(){ return 4.0*thrust::transform_reduce( samples.begin(),samples.end(), bind_generate(), 0,thrust::plus<double>()) /static_cast<double>(samples.size()); } private: thrust::device_vector<Sample> samples; int rank; }; int main( int argc, char** argv) { MPI_Init(&argc, &argv); int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); double result_total=0.0; double result_mpi=0.0; Device_sampler device_sampler(thread_count,rank); for (int cycle=0;cycle<cycle_count;cycle++) { double result=device_sampler.result(); printf("Partial Result %i on rank %i: %f\n",cycle,rank,result); result_total+=result; } printf("Partial Result on rank %i: %f\n",rank,result_total/static_cast<double>(cycle_count)); // Reduce the MPI-Samples MPI_Reduce(&result_total,&result_mpi,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD); if (rank==0){ printf("Final Result over %i mpi processes: %f\n",size,result_mpi/static_cast<double>(cycle_count*size)); } MPI_Finalize(); return 0; }
6438bbbd8e616c4d77880b148f737fffff9dc81b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ /* * CUDA Device code for particle simulation. */ #ifndef _PARTICLES_KERNEL_H_ #define _PARTICLES_KERNEL_H_ #include "cutil_math.h" #include "math_constants.h" #include "particles_kernel.cuh" texture<float4, 3, hipReadModeElementType> noiseTex; // simulation parameters __constant__ SimParams params; // look up in 3D noise texture __device__ float3 noise3D(float3 p) { float4 n = tex3D(noiseTex, p.x, p.y, p.z); return make_float3(n.x, n.y, n.z); } __device__ float3 fractalSum3D(float3 p, int octaves, float lacunarity, float gain) { float freq = 1.0f, amp = 0.5f; float3 sum = make_float3(0.0f); for(int i=0; i<octaves; i++) { sum += noise3D(p*freq)*amp; freq *= lacunarity; amp *= gain; } return sum; } __device__ float3 turbulence3D(float3 p, int octaves, float lacunarity, float gain) { float freq = 1.0f, amp = 0.5f; float3 sum = make_float3(0.0f); for(int i=0; i<octaves; i++) { sum += fabs(noise3D(p*freq))*amp; freq *= lacunarity; amp *= gain; } return sum; } // integrate particle attributes __global__ void integrateD(float4* newPos, float4* newVel, float4* oldPos, float4* oldVel, float deltaTime, int numParticles) { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; if (index >= numParticles) return; volatile float4 posData = oldPos[index]; // ensure coalesced reads volatile float4 velData = oldVel[index]; float3 pos = make_float3(posData.x, posData.y, posData.z); float3 vel = make_float3(velData.x, velData.y, velData.z); // update particle age float age = posData.w; float lifetime = velData.w; if (age < lifetime) { age += deltaTime; } else { age = lifetime; } // apply accelerations //vel += params.gravity * deltaTime; // apply procedural noise //float3 noise = noise3D(pos*params.noiseFreq + params.time*params.noiseSpeed); //vel += noise * params.noiseAmp; // new position = old position + velocity * deltaTime pos += vel * deltaTime; //vel *= params.globalDamping; //if ((index%10)==1) //vel *=2; //else if((index%10)==2) //vel *=3; //else if((index%10)==3) //vel *=4; //else if((index%10)==4) //vel *=5; //else if((index%10)==5) //vel *=6; //else if((index%10)==6) //vel *=7; //else if((index%10)==7) //vel *=8; //else if((index%10)==8) //vel *=9; //else if((index%10)==9) //vel *=10; // store new position and velocity newPos[index] = make_float4(pos, age); newVel[index] = make_float4(vel, velData.w); } // calculate sort depth for each particle __global__ void calcDepthD(float4* pos, float* keys, uint *indices, float3 vector, int numParticles) { uint index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; if (index >= numParticles) return; volatile float4 p = pos[index]; float key = -dot(make_float3(p.x, p.y, p.z), vector); // project onto sort vector keys[index] = key; indices[index] = index; } #endif
6438bbbd8e616c4d77880b148f737fffff9dc81b.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ /* * CUDA Device code for particle simulation. */ #ifndef _PARTICLES_KERNEL_H_ #define _PARTICLES_KERNEL_H_ #include "cutil_math.h" #include "math_constants.h" #include "particles_kernel.cuh" texture<float4, 3, cudaReadModeElementType> noiseTex; // simulation parameters __constant__ SimParams params; // look up in 3D noise texture __device__ float3 noise3D(float3 p) { float4 n = tex3D(noiseTex, p.x, p.y, p.z); return make_float3(n.x, n.y, n.z); } __device__ float3 fractalSum3D(float3 p, int octaves, float lacunarity, float gain) { float freq = 1.0f, amp = 0.5f; float3 sum = make_float3(0.0f); for(int i=0; i<octaves; i++) { sum += noise3D(p*freq)*amp; freq *= lacunarity; amp *= gain; } return sum; } __device__ float3 turbulence3D(float3 p, int octaves, float lacunarity, float gain) { float freq = 1.0f, amp = 0.5f; float3 sum = make_float3(0.0f); for(int i=0; i<octaves; i++) { sum += fabs(noise3D(p*freq))*amp; freq *= lacunarity; amp *= gain; } return sum; } // integrate particle attributes __global__ void integrateD(float4* newPos, float4* newVel, float4* oldPos, float4* oldVel, float deltaTime, int numParticles) { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; if (index >= numParticles) return; volatile float4 posData = oldPos[index]; // ensure coalesced reads volatile float4 velData = oldVel[index]; float3 pos = make_float3(posData.x, posData.y, posData.z); float3 vel = make_float3(velData.x, velData.y, velData.z); // update particle age float age = posData.w; float lifetime = velData.w; if (age < lifetime) { age += deltaTime; } else { age = lifetime; } // apply accelerations //vel += params.gravity * deltaTime; // apply procedural noise //float3 noise = noise3D(pos*params.noiseFreq + params.time*params.noiseSpeed); //vel += noise * params.noiseAmp; // new position = old position + velocity * deltaTime pos += vel * deltaTime; //vel *= params.globalDamping; //if ((index%10)==1) //vel *=2; //else if((index%10)==2) //vel *=3; //else if((index%10)==3) //vel *=4; //else if((index%10)==4) //vel *=5; //else if((index%10)==5) //vel *=6; //else if((index%10)==6) //vel *=7; //else if((index%10)==7) //vel *=8; //else if((index%10)==8) //vel *=9; //else if((index%10)==9) //vel *=10; // store new position and velocity newPos[index] = make_float4(pos, age); newVel[index] = make_float4(vel, velData.w); } // calculate sort depth for each particle __global__ void calcDepthD(float4* pos, float* keys, uint *indices, float3 vector, int numParticles) { uint index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; if (index >= numParticles) return; volatile float4 p = pos[index]; float key = -dot(make_float3(p.x, p.y, p.z), vector); // project onto sort vector keys[index] = key; indices[index] = index; } #endif
b3441c57269565f2894da829fcda821fb17d0b8f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHTensorMath.h" #include "THHGeneral.h" #include "THHTensorRandom.h" #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> #define NB_THREADS_PER_BLOCK 256 void THCudaTensor_fill(THCudaTensor *self_, float value) { THCudaTensor *self = THCudaTensor_newContiguous(self_); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::fill(self_data, self_data+THCudaTensor_nElement(self), value); THCudaTensor_freeCopyTo(self, self_); } void THCudaTensor_zero(THCudaTensor *self_) { THCudaTensor *self = THCudaTensor_newContiguous(self_); hipMemset(THCudaTensor_data(self), 0, sizeof(float)*THCudaTensor_nElement(self)); THCudaTensor_freeCopyTo(self, self_); } struct addvalue_functor { const float value; addvalue_functor(float value_) : value(value_) {} __host__ __device__ float operator()(const float& x) const { return (x+value); } }; void THCudaTensor_add(THCudaTensor *self_, float value) { { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::transform(self_data, self_data+size, self_data, addvalue_functor(value)); THCudaTensor_freeCopyTo(self, self_); } } void THCudaTensor_mul(THCudaTensor *self_, float value) { THCudaTensor *self = THCudaTensor_newContiguous(self_); hipblasSscal(THCudaTensor_nElement(self), value, THCudaTensor_data(self), 1); THCublasCheck(); THCudaTensor_freeCopyTo(self, self_); } void THCudaTensor_div(THCudaTensor *self_, float value) { THCudaTensor *self = THCudaTensor_newContiguous(self_); hipblasSscal(THCudaTensor_nElement(self), 1/value, THCudaTensor_data(self), 1); THCublasCheck(); THCudaTensor_freeCopyTo(self, self_); } void THCudaTensor_cadd(THCudaTensor *self_, float value, THCudaTensor *src) { THArgCheck(THCudaTensor_nElement(self_) == THCudaTensor_nElement(src), 3, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); src = THCudaTensor_newContiguous(src); hipblasSaxpy(THCudaTensor_nElement(self), value, THCudaTensor_data(src), 1, THCudaTensor_data(self), 1); THCublasCheck(); THCudaTensor_free(src); THCudaTensor_freeCopyTo(self, self_); } } void THCudaTensor_cadd_tst(THCudaTensor *self_, THCudaTensor* src1, float value, THCudaTensor *src2) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); THCudaTensor_copy(self, src1); hipblasSaxpy(THCudaTensor_nElement(self), value, THCudaTensor_data(src2), 1, THCudaTensor_data(self), 1); THCublasCheck(); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } } void THCudaTensor_cmul(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src1_data(THCudaTensor_data(src1)); thrust::device_ptr<float> src2_data(THCudaTensor_data(src2)); thrust::transform(src2_data, src2_data+size, src1_data, self_data, thrust::multiplies<float>()); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } } void THCudaTensor_cdiv(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size does not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src1_data(THCudaTensor_data(src1)); thrust::device_ptr<float> src2_data(THCudaTensor_data(src2)); thrust::transform(src1_data, src1_data+size, src2_data, self_data, thrust::divides<float>()); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } } __global__ void THCudaTensor_kernel_addcmul(float *data, float value, float *src1, float *src2, long size) { long k = (((blockIdx.y * gridDim.x) + blockIdx.x) * blockDim.x) + threadIdx.x; if(k < size) data[k] += value*src1[k]*src2[k]; } void THCudaTensor_addcmul(THCudaTensor *self_, float value, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); int nBlockPerRow, nBlockPerColumn, nThreadPerBlock; THCudaGetGridSize(&nBlockPerRow, &nBlockPerColumn, &nThreadPerBlock, size); dim3 threads(nThreadPerBlock); dim3 grid(nBlockPerRow, nBlockPerColumn); hipLaunchKernelGGL(( THCudaTensor_kernel_addcmul), dim3(grid), dim3(threads), 0, 0, THCudaTensor_data(self), value, THCudaTensor_data(src1), THCudaTensor_data(src2), size); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) THError(hipGetErrorString(errcode)); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } } __global__ void THCudaTensor_kernel_addcdiv(float *data, float value, float *src1, float *src2, long size) { long k = (((blockIdx.y * gridDim.x) + blockIdx.x) * blockDim.x) + threadIdx.x; if(k < size) data[k] += value*src1[k]/src2[k]; } void THCudaTensor_addcdiv(THCudaTensor *self_, float value, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); int nBlockPerRow, nBlockPerColumn, nThreadPerBlock; THCudaGetGridSize(&nBlockPerRow, &nBlockPerColumn, &nThreadPerBlock, size); dim3 threads(nThreadPerBlock); dim3 grid(nBlockPerRow, nBlockPerColumn); hipLaunchKernelGGL(( THCudaTensor_kernel_addcdiv), dim3(grid), dim3(threads), 0, 0, THCudaTensor_data(self), value, THCudaTensor_data(src1), THCudaTensor_data(src2), size); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) THError(hipGetErrorString(errcode)); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } } float THCudaTensor_dot(THCudaTensor *self, THCudaTensor *src) { THArgCheck(THCudaTensor_nElement(self) == THCudaTensor_nElement(src), 2, "size do not match"); { self = THCudaTensor_newContiguous(self); src = THCudaTensor_newContiguous(src); float result = hipblasSdot(THCudaTensor_nElement(self), THCudaTensor_data(self), 1, THCudaTensor_data(src), 1); THCublasCheck(); THCudaTensor_free(src); THCudaTensor_free(self); return result; } } float THCudaTensor_minall(THCudaTensor *self) { self = THCudaTensor_newContiguous(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); float result = thrust::reduce(self_data, self_data+THCudaTensor_nElement(self), (float)(THInf), thrust::minimum<float>()); THCudaTensor_free(self); return result; } float THCudaTensor_maxall(THCudaTensor *self) { self = THCudaTensor_newContiguous(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); float result = thrust::reduce(self_data, self_data+THCudaTensor_nElement(self), (float)(-THInf), thrust::maximum<float>()); THCudaTensor_free(self); return result; } float THCudaTensor_sumall(THCudaTensor *self) { self = THCudaTensor_newContiguous(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); float result = thrust::reduce(self_data, self_data+THCudaTensor_nElement(self), (float)(0), thrust::plus<float>()); THCudaTensor_free(self); return result; } struct dim4 { unsigned arr[4]; __host__ dim4(unsigned init=0) { for(unsigned i=0; i<4; i++) { arr[i] = init; } } __host__ __device__ unsigned& operator[](const unsigned& idx) { return arr[idx]; } }; /* Reduce one of the outer dimensions of a tensor * * For an n-d tensor (n <= 4) where the reduction is *not* along the innermost * dimension: * * - block.x and grid.x make up the innermost dimension; * - The reduced dimension is looped over inside a block; and * - grid.y and grid.z are the remaining two dimensions (if any). * - block.y and block.z are not used as we're limited to 512 or 1024 threads * in the block. * * For sizes/strides, index 3 is the reduced dimension, while the remaining * indices are for the remaining dimensions with index 0 the innermost dimension. * * Reduction along the innermost dimension is handled in a separate kernel. */ template<class UnaryFunction, class BinaryFunction> __global__ void THCudaTensor_kernel_transformReduceOuterDim(float *tgt, float *src_, dim4 src_stride, dim4 tgt_stride, dim4 size, UnaryFunction unary_op, float init, BinaryFunction binary_op) { const size_t reduce = 3; for(unsigned z = blockIdx.z; z < size[2] ; z += gridDim.z) for(unsigned y = blockIdx.y; y < size[1] ; y += gridDim.y) for(unsigned col = blockIdx.x * blockDim.x + threadIdx.x; col < size[0]; col += blockDim.x * gridDim.x) { float *src = src_ + z * src_stride[2] + y * src_stride[1] + col; float acc = init; for(unsigned i=0; i < size[reduce]; i++) { acc = binary_op(acc, unary_op(*src)); src += src_stride[reduce]; } tgt[z * tgt_stride[2] + y * tgt_stride[1] + col] = float(acc); } } template<class UnaryFunction, class BinaryFunction> __host__ void THCudaTensor_transformReduceOuterDim(THCudaTensor *tgt, THCudaTensor *src, long rdim, UnaryFunction unary_op, float init, BinaryFunction binary_op) { const size_t reduce = 3; dim4 src_stride(0); dim4 tgt_stride(0); dim4 size(1); unsigned ndim = THCudaTensor_nDimension(src); for(unsigned idim=0, o=ndim-2; idim < ndim; idim++) { unsigned odim = idim == rdim ? reduce : o--; src_stride[odim] = THCudaTensor_stride(src, idim); tgt_stride[odim] = THCudaTensor_stride(tgt, idim); size[odim] = THCudaTensor_size(src, idim); } const unsigned nThreadPerBlock = 256; unsigned nBlockPerColumn = (size[0] + nThreadPerBlock - 1) / nThreadPerBlock; dim3 threads(nThreadPerBlock); unsigned maxGridDim = 1024; // anything < 64k is fine. The choice has no impact on performance. dim3 grid(min(maxGridDim, nBlockPerColumn), min(maxGridDim, size[1]), min(maxGridDim, size[2])); hipLaunchKernelGGL(( THCudaTensor_kernel_transformReduceOuterDim), dim3(grid), dim3(threads), 0, 0, THCudaTensor_data(tgt), THCudaTensor_data(src), src_stride, tgt_stride, size, unary_op, init, binary_op); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) { THError(hipGetErrorString(errcode)); } } /* Reduce the innermost dimension of a tensor * * For an n-d tensor (n <= 4) where the reduction is along the innermost dimension: * * - block.x is the innermost dimension, i.e. dimension 0; * - block.y and grid.y make up dimension 1; and * - grid.x and grid z are the remaining two outer dimensions (if any) * * Reduction along other dimensions is handled in a separate kernel. */ template<class UnaryFunction, class BinaryFunction> __global__ void THCudaTensor_kernel_transformReduceInnermostDim(float *tgt, float *src_, dim4 src_stride, dim4 tgt_stride, dim4 size, UnaryFunction unary_op, float init, BinaryFunction binary_op) { __shared__ float sbuf[16][32]; // 8kB for(unsigned z = blockIdx.z; z < size[3] ; z += gridDim.z) for(unsigned x = blockIdx.x; x < size[2] ; x += gridDim.x) for(unsigned bRow = blockIdx.y * blockDim.y; bRow < size[1]; bRow += blockDim.y * gridDim.y) { float acc = init; unsigned row = bRow + threadIdx.y; float *src = src_ + z * src_stride[3] + x * src_stride[2] + row * src_stride[1]; bool reducing = threadIdx.x < blockDim.y && bRow + threadIdx.x < size[1] && threadIdx.y == 0; for(unsigned bCol=0; bCol < size[0]; bCol += blockDim.x) { sbuf[threadIdx.y][threadIdx.x] = init; unsigned col = bCol + threadIdx.x; if(row < size[1] && col < size[0]) { sbuf[threadIdx.y][threadIdx.x] = unary_op(src[col]); } __syncthreads(); float* line = &sbuf[threadIdx.y][0]; for(unsigned s = 16; s > 1; s >>= 1) { if(row < size[1] && threadIdx.x < s) { line[threadIdx.x] = binary_op(line[threadIdx.x], line[threadIdx.x + s]); } __syncthreads(); } if(reducing) { sbuf[threadIdx.x][0] = binary_op(sbuf[threadIdx.x][0], sbuf[threadIdx.x][1]); acc = binary_op(acc, sbuf[threadIdx.x][0]); } __syncthreads(); } if(reducing) { unsigned row = bRow + threadIdx.x; unsigned tgt_offset = z * tgt_stride[3] + x * tgt_stride[2]; tgt[tgt_offset + row] = acc; } } } template<class UnaryFunction, class BinaryFunction> __host__ void THCudaTensor_transformReduceInnermostDim(THCudaTensor *tgt, THCudaTensor *src, UnaryFunction unary_op, float init, BinaryFunction binary_op) { dim4 src_stride(0); dim4 tgt_stride(0); dim4 size(1); unsigned ndim = THCudaTensor_nDimension(src); for(unsigned dim=0; dim < ndim; dim++) { unsigned odim = ndim - 1 - dim; src_stride[odim] = THCudaTensor_stride(src, dim); tgt_stride[odim] = THCudaTensor_stride(tgt, dim); size[odim] = THCudaTensor_size(src, dim); } dim3 threads(32, 16); unsigned nBlockPerRow = (size[1] + threads.y - 1) / threads.y; unsigned maxGridDim = 1024; // anything < 64k is fine. The choice has no impact on performance. dim3 grid(min(maxGridDim, size[2]), min(maxGridDim, nBlockPerRow), min(maxGridDim, size[3])); hipLaunchKernelGGL(( THCudaTensor_kernel_transformReduceInnermostDim), dim3(grid), dim3(threads), 0, 0, THCudaTensor_data(tgt), THCudaTensor_data(src), src_stride, tgt_stride, size, unary_op, init, binary_op); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) { THError(hipGetErrorString(errcode)); } } template<class UnaryFunction, class BinaryFunction> void THCudaTensor_transformReduceDim(THCudaTensor *self_, THCudaTensor *src, long dimension, UnaryFunction unary_op, float init, BinaryFunction binary_op) { THArgCheck(dimension >= 0 && dimension < THCudaTensor_nDimension(src), 3, "dimension out of range"); THArgCheck(THCudaTensor_nDimension(src) <= 4, 2, "too many dimensions (>4)"); THLongStorage *dim = THCudaTensor_newSizeOf(src); THLongStorage_set(dim, dimension, 1); THCudaTensor_resize(self_, dim, NULL); THLongStorage_free(dim); THCudaTensor *self = THCudaTensor_newContiguous(self_); src = THCudaTensor_newContiguous(src); if(dimension == THCudaTensor_nDimension(src)-1) { THCudaTensor_transformReduceInnermostDim(self, src, unary_op, init, binary_op); } else { THCudaTensor_transformReduceOuterDim(self, src, dimension, unary_op, init, binary_op); } THCudaTensor_free(src); THCudaTensor_freeCopyTo(self, self_); } template<class BinaryFunction> void THCudaTensor_reduceDim(THCudaTensor *self_, THCudaTensor *src, long dimension, float init, BinaryFunction binary_op) { THCudaTensor_transformReduceDim(self_, src, dimension, thrust::identity<float>(), init, binary_op); } void THCudaTensor_sum(THCudaTensor *self, THCudaTensor *src, long dimension) { return THCudaTensor_reduceDim(self, src, dimension, 0.0f, thrust::plus<float>()); } void THCudaTensor_max(THCudaTensor *self, THCudaTensor *src, long dimension) { const float minfloat32 = -3.402823466e+38f; return THCudaTensor_reduceDim(self, src, dimension, minfloat32, thrust::maximum<float>()); } void THCudaTensor_min(THCudaTensor *self, THCudaTensor *src, long dimension) { const float maxfloat32 = 3.402823466e+38f; return THCudaTensor_reduceDim(self, src, dimension, maxfloat32, thrust::minimum<float>()); } void THCudaTensor_addmv(THCudaTensor *self, float beta, float alpha, THCudaTensor *mat, THCudaTensor *vec) { if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected"); if( mat->size[1] != vec->size[0] ) THError("size mismatch"); if(self->nDimension != 1) THError("size mismatch"); if( self->size[0] != mat->size[0] ) THError("size mismatch"); if(mat->stride[0] == 1) { hipblasSgemv('n', mat->size[0], mat->size[1], alpha, THCudaTensor_data(mat), mat->stride[1], THCudaTensor_data(vec), vec->stride[0], beta, THCudaTensor_data(self), self->stride[0]); } else if(mat->stride[1] == 1) { hipblasSgemv('t', mat->size[1], mat->size[0], alpha, THCudaTensor_data(mat), mat->stride[0], THCudaTensor_data(vec), vec->stride[0], beta, THCudaTensor_data(self), self->stride[0]); } else { mat = THCudaTensor_newContiguous(mat); hipblasSgemv('t', mat->size[1], mat->size[0], alpha, THCudaTensor_data(mat), mat->stride[0], THCudaTensor_data(vec), vec->stride[0], beta, THCudaTensor_data(self), self->stride[0]); THCudaTensor_free(mat); } THCublasCheck(); } void THCudaTensor_addmm(THCudaTensor *self, float beta, float alpha, THCudaTensor *m1, THCudaTensor *m2) { char transpose, transpose_m1, transpose_m2; THCudaTensor *self_, *m1_, *m2_; if( (m1->nDimension != 2) || (m2->nDimension != 2) ) THError("matrix and matrix expected"); if(self->nDimension != 2) THError("size mismatch"); if( (self->size[0] != m1->size[0]) || (self->size[1] != m2->size[1]) || (m1->size[1] != m2->size[0]) ) THError("size mismatch"); /* self */ if ((self->stride[0] == 1) && (self->stride[1] > 1)) { transpose = 'n'; self_ = self; } else if(self->stride[1] == 1) { THCudaTensor *swap = m2; m2 = m1; m1 = swap; THCudaTensor_transpose(self, NULL, 0, 1); THCudaTensor_transpose(m1, NULL, 0, 1); THCudaTensor_transpose(m2, NULL, 0, 1); transpose = 't'; self_ = self; } else { transpose = 'n'; THCudaTensor_transpose(self, NULL, 0, 1); self_ = THCudaTensor_newClone(self); THCudaTensor_transpose(self, NULL, 0, 1); THCudaTensor_transpose(self_, NULL, 0, 1); } /* m1 */ if(m1->stride[0] == 1) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[1] == 1) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = 't'; m1_ = THCudaTensor_newContiguous(m1); } /* m2 */ if(m2->stride[0] == 1) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[1] == 1) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = 't'; m2_ = THCudaTensor_newContiguous(m2); } /* do the operation */ hipblasSgemm(transpose_m1, transpose_m2, self_->size[0], self_->size[1], m1_->size[1], alpha, THCudaTensor_data(m1_), (transpose_m1 == 'n' ? m1_->stride[1] : m1_->stride[0]), THCudaTensor_data(m2_), (transpose_m2 == 'n' ? m2_->stride[1] : m2_->stride[0]), beta, THCudaTensor_data(self_), self_->stride[1]); THCublasCheck(); /* free intermediate variables */ if(m1_ != m1) THCudaTensor_free(m1_); if(m2_ != m2) THCudaTensor_free(m2_); if(self_ != self) THCudaTensor_freeCopyTo(self_, self); if(transpose == 't') { THCudaTensor_transpose(self, NULL, 0, 1); THCudaTensor_transpose(m1, NULL, 0, 1); THCudaTensor_transpose(m2, NULL, 0, 1); } } void THCudaTensor_addr(THCudaTensor *self, float alpha, THCudaTensor *vec1, THCudaTensor *vec2) { if( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) THError("vector and vector expected"); if(self->nDimension != 2) THError("size mismatch"); if( (self->size[0] != vec1->size[0]) || (self->size[1] != vec2->size[0]) ) THError("size mismatch"); if(self->stride[0] == 1) { hipblasSger(vec1->size[0], vec2->size[0], alpha, THCudaTensor_data(vec1), vec1->stride[0], THCudaTensor_data(vec2), vec2->stride[0], THCudaTensor_data(self), self->stride[1]); } else if(self->stride[1] == 1) { hipblasSger(vec2->size[0], vec1->size[0], alpha, THCudaTensor_data(vec2), vec2->stride[0], THCudaTensor_data(vec1), vec1->stride[0], THCudaTensor_data(self), self->stride[0]); } else { THCudaTensor *cself = THCudaTensor_newClone(self); hipblasSger(vec2->size[0], vec1->size[0], alpha, THCudaTensor_data(vec2), vec2->stride[0], THCudaTensor_data(vec1), vec1->stride[0], THCudaTensor_data(cself), cself->stride[0]); THCudaTensor_freeCopyTo(cself, self); } THCublasCheck(); } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC) \ struct NAME##_functor \ { \ __host__ __device__ float operator()(const float& x) const \ { \ return CFUNC(x); \ } \ }; \ \ void THCudaTensor_##NAME(THCudaTensor *self_) \ { \ THCudaTensor *self = THCudaTensor_newContiguous(self_); \ long size = THCudaTensor_nElement(self); \ thrust::device_ptr<float> self_data(THCudaTensor_data(self)); \ \ thrust::transform(self_data, self_data+size, self_data, NAME##_functor()); \ \ THCudaTensor_freeCopyTo(self, self_); \ } IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log, log) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, log1p) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(exp, exp) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cos, cos) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(acos, acos) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cosh, cosh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sin, sin) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(asin, asin) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sinh, sinh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tan, tan) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(atan, atan) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tanh, tanh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sqrt, sqrt) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(ceil, ceil) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, floor) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(abs, fabs) struct pow_functor { const float value; pow_functor(float value_) : value(value_) {} __host__ __device__ float operator()(const float& x) const { return pow(x, value); } }; void THCudaTensor_pow(THCudaTensor *self_, THCudaTensor *src, float value) { THArgCheck(THCudaTensor_nElement(self_) == THCudaTensor_nElement(src), 2, "sizes do not match"); THCudaTensor *self = THCudaTensor_newContiguous(self_); src = THCudaTensor_newContiguous(src); long size = THCudaTensor_nElement(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src_data(THCudaTensor_data(src)); thrust::transform(src_data, src_data+size, self_data, pow_functor(value)); THCudaTensor_free(src); THCudaTensor_freeCopyTo(self, self_); } struct sign_functor { __device__ float operator()(const float &v) const { return (v > 0) - (v < 0); } }; void THCudaTensor_sign(THCudaTensor *self_, THCudaTensor *src) { THArgCheck(THCudaTensor_nElement(self_) == THCudaTensor_nElement(src), 2, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src = THCudaTensor_newContiguous(src); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src_data(THCudaTensor_data(src)); thrust::transform(src_data, src_data+size, self_data, sign_functor()); THCudaTensor_free(src); THCudaTensor_freeCopyTo(self, self_); } } float THCudaTensor_meanall(THCudaTensor *self) { THArgCheck(self->nDimension > 0, 1, "empty Tensor"); return THCudaTensor_sumall(self)/THCudaTensor_nElement(self); } void THCudaTensor_mean(THCudaTensor *self, THCudaTensor *src, long dim) { THCudaTensor_sum(self, src, dim); THCudaTensor_div(self, THCudaTensor_size(src, dim)); } struct square_functor { const float mean; square_functor(float mean_) : mean(mean_) {} __host__ __device__ float operator()(const float& x) const { return (x-mean)*(x-mean); } }; float THCudaTensor_varall(THCudaTensor *self) { self = THCudaTensor_newContiguous(self); long size = THCudaTensor_nElement(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); float mean = THCudaTensor_meanall(self); float result = thrust::transform_reduce(self_data, self_data+size, square_functor(mean), (float)0, thrust::plus<float>()); result = result/(THCudaTensor_nElement(self)-1); THCudaTensor_free(self); return result; } float THCudaTensor_stdall(THCudaTensor *self) { return sqrt(THCudaTensor_varall(self)); } template<class Op> void THCudaTensor_logicalValue(THCudaTensor *self_, THCudaTensor *src, Op op) { THCudaTensor_resizeAs(self_, src); THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src = THCudaTensor_newContiguous(src); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src_data(THCudaTensor_data(src)); thrust::transform(src_data, src_data+size, self_data, op); THCudaTensor_free(src); THCudaTensor_freeCopyTo(self, self_); } struct partial_less_functor { const float rhs; partial_less_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs < rhs;} }; void THCudaTensor_ltValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_less_functor(value)); } struct partial_greater_functor { const float rhs; partial_greater_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs > rhs;} }; void THCudaTensor_gtValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_greater_functor(value)); } struct partial_less_equal_functor { const float rhs; partial_less_equal_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs <= rhs;} }; void THCudaTensor_leValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_less_equal_functor(value)); } struct partial_greater_equal_functor { const float rhs; partial_greater_equal_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs >= rhs;} }; void THCudaTensor_geValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_greater_equal_functor(value)); } struct partial_equal_functor { const float rhs; partial_equal_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs == rhs;} }; void THCudaTensor_eqValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_equal_functor(value)); } struct partial_not_equal_functor { const float rhs; partial_not_equal_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs != rhs;} }; void THCudaTensor_neValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_not_equal_functor(value)); } template<class Op> void THCudaTensor_logicalTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2, Op op) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size do not match"); THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src1_data(THCudaTensor_data(src1)); thrust::device_ptr<float> src2_data(THCudaTensor_data(src2)); thrust::transform(src1_data, src1_data+size, src2_data, self_data, op); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } void THCudaTensor_ltTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::less<float>()); } void THCudaTensor_gtTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::greater<float>()); } void THCudaTensor_leTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::less_equal<float>()); } void THCudaTensor_geTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::greater_equal<float>()); } void THCudaTensor_eqTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::equal_to<float>()); } void THCudaTensor_neTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::not_equal_to<float>()); } struct norm_functor { const float exponent; norm_functor(float exponent_) : exponent(exponent_) {} __host__ __device__ float operator()(const float& x) const { return pow(fabs(x), exponent); } }; float THCudaTensor_normall(THCudaTensor *self, float value) { self = THCudaTensor_newContiguous(self); long size = THCudaTensor_nElement(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); float result; if(value == 0.0f) { result = thrust::transform_reduce(self_data, self_data+size, partial_not_equal_functor(0.0f), (float)0, thrust::plus<float>()); } else { result = thrust::transform_reduce(self_data, self_data+size, norm_functor(value), (float)0, thrust::plus<float>()); result = pow(result, (float)1.0/value); } THCudaTensor_free(self); return result; } void THCudaTensor_norm(THCudaTensor* self, THCudaTensor* src, float value, long dimension) { if(value == 0.0f) { THCudaTensor_transformReduceDim(self, src, dimension, partial_not_equal_functor(0.0f), (float)0, thrust::plus<float>()); } else { THCudaTensor_transformReduceDim(self, src, dimension, norm_functor(value), (float)0, thrust::plus<float>()); THCudaTensor_pow(self, self, 1/value); } } __global__ void THCudaTensor_kernel_renorm(float *data, const float value, const long size, const float maxnorm) { __shared__ float buffer[32]; long tx = threadIdx.x; long bx = blockIdx.x; long step = blockDim.x; float *row = data + size*bx; buffer[tx] = 0; // get norm of axis for (long i=tx; i<size; i+=step) { buffer[tx] += pow(fabs(row[i]), value); } // add (reduce) for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (tx < stride) buffer[tx] += buffer[tx+stride]; } // clip norms __syncthreads(); float norm = pow(buffer[0], 1/value); if (norm > maxnorm) { norm = maxnorm / (norm + 1e-7); // renormalize for (long i=tx; i<size; i+=step) { row[i] *= norm; } } } void THCudaTensor_renorm(THCudaTensor* self, THCudaTensor* src, float value, long dimension, float maxnorm) { THCudaTensor *self_; THCudaTensor *src_ = THCudaTensor_newTranspose(src, dimension, 0); THCudaTensor *data = THCudaTensor_newClone(src_); long size = THCudaTensor_nElement(data)/data->size[0]; THArgCheck(dimension >= 0 && dimension < THCudaTensor_nDimension(src), 3, "invalid dimension"); THArgCheck(value > 0, 2, "non-positive-norm not supported"); THArgCheck(THCudaTensor_nDimension(src) > 1, 1, "need at least 2 dimensions"); dim3 grid(data->size[0]); dim3 threads(32); hipLaunchKernelGGL(( THCudaTensor_kernel_renorm), dim3(grid), dim3(threads), 0, 0, THCudaTensor_data(data), value, size, maxnorm); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) THError(hipGetErrorString(errcode)); THCudaTensor_free(src_); self_ = THCudaTensor_newTranspose(data, dimension, 0); THCudaTensor_resizeAs(self, self_); THCudaTensor_freeCopyTo(self_, self); THCudaTensor_free(data); } struct dist_functor { const float exponent; dist_functor(float exponent_) : exponent(exponent_) {} __host__ __device__ float operator()(const float& x, const float& y) const { return pow(fabs(x-y), exponent); } }; float THCudaTensor_dist(THCudaTensor *self, THCudaTensor *src, float value) { self = THCudaTensor_newContiguous(self); long size = THCudaTensor_nElement(self); src = THCudaTensor_newContiguous(src); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src_data(THCudaTensor_data(src)); float result = thrust::inner_product(self_data, self_data+size, src_data, (float) 0,thrust::plus<float>(), dist_functor(value)); THCudaTensor_free(src); THCudaTensor_free(self); return pow(result, (float)1.0/value); } void THCudaTensor_rand(THCudaTensor *r_, THLongStorage *size) { THCudaTensor_resize(r_, size, NULL); THCudaTensor_uniform(r_, 0, 1); } void THCudaTensor_randn(THCudaTensor *r_, THLongStorage *size) { THCudaTensor_resize(r_, size, NULL); THCudaTensor_normal(r_, 0, 1); } __global__ void THCudaTensor_kernel_indexFill( float *tensor, long* stride, float *index, long src_nDim, int dim, long idx_size, long tensor_size, long size_dim, float val ) { int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; long flat_size = tensor_size / idx_size; if (thread_idx < flat_size) { long coeff = 0; for (int i=0; i<idx_size; i++) { int leftover = thread_idx; int srcIdx = 0; for (int d=0; d<src_nDim; d++) { if (d < dim) { coeff = leftover / (stride[d] / size_dim); leftover -= coeff * (stride[d] / size_dim); srcIdx += coeff * stride[d]; } else if (d > dim) { coeff = leftover / stride[d]; leftover -= coeff * stride[d]; srcIdx += coeff * stride[d]; } } tensor[srcIdx + (int)((index[i])-1)*stride[dim]] = val; } } } __global__ void THCudaTensor_kernel_indexCopy( float *res, float *src, long* res_stride, float *index, long res_nDim, int dim, long idx_size, long src_size, long size_dim ) { int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; long flat_size = src_size / idx_size; if (thread_idx < flat_size) { long coeff = 0; for (int i=0; i<idx_size; i++) { int leftover = thread_idx; int targetIdx = 0; int resIdx = 0; for (int d=0; d<res_nDim; d++) { if (d < dim) { long stride_d = res_stride[d] / size_dim; coeff = leftover / stride_d; leftover -= coeff * stride_d; targetIdx += coeff * stride_d * idx_size; resIdx += coeff * res_stride[d]; } else if (d > dim) { coeff = leftover / res_stride[d]; leftover -= coeff * res_stride[d]; targetIdx += coeff * res_stride[d]; resIdx += coeff * res_stride[d]; } } res[resIdx + ((int)(index[i])-1)*res_stride[dim]] = src[targetIdx + i*res_stride[dim]]; } } } void THCudaTensor_indexCopy(THCudaTensor *res_, int dim, THLongTensor *indices, THCudaTensor *src) { THCudaTensor *indices_; long *stride_; long nIndex = indices->size[0]; long nRes; THArgCheck(indices->nDimension == 1, 3, "expecting vector of indices"); THArgCheck(dim < src->nDimension, 4, "Indexing dim is out of bounds"); THArgCheck(src->nDimension > 0, 2, "Source tensor is empty"); THArgCheck(nIndex == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices"); indices_ = THCudaTensor_newWithSize1d(nIndex); THCudaTensor_copyLong(indices_, indices); nRes = THCudaTensor_nElement(res_); dim3 nthreads(16, 16); dim3 nblocks(ceil((float)nRes / nIndex / (16*16))); THCudaCheck(hipMalloc((void**)&stride_, res_->nDimension * sizeof(long))); THCudaCheck(hipMemcpy(stride_, res_->stride, res_->nDimension * sizeof(long), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( THCudaTensor_kernel_indexCopy), dim3(nblocks), dim3(nthreads), 0, 0, THCudaTensor_data(res_), THCudaTensor_data(src), stride_, THCudaTensor_data(indices_), res_->nDimension, dim, nIndex, THCudaTensor_nElement(src), res_->size[dim] ); THCudaCheck(hipFree(stride_)); THCudaTensor_free(indices_); } void THCudaTensor_indexFill(THCudaTensor *res_, int dim, THLongTensor *indices, float val) { THCudaTensor *indices_; long *stride_; long nIndex = indices->size[0]; long nRes; THArgCheck(indices->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < res_->nDimension,4,"Indexing dim is out of bounds"); THArgCheck(res_->nDimension > 0, 2, "Source tensor is empty"); indices_ = THCudaTensor_newWithSize1d(nIndex); THCudaTensor_copyLong(indices_, indices); nRes = THCudaTensor_nElement(res_) / res_->size[dim] * nIndex; dim3 nthreads(16, 16); dim3 nblocks(ceil((float)nRes / nIndex / (16*16))); THCudaCheck(hipMalloc((void**)&stride_, res_->nDimension * sizeof(long))); THCudaCheck(hipMemcpy(stride_, res_->stride, res_->nDimension * sizeof(long), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( THCudaTensor_kernel_indexFill), dim3(nblocks), dim3(nthreads), 0, 0, THCudaTensor_data(res_), stride_, THCudaTensor_data(indices_), res_->nDimension, dim, nIndex, nRes, res_->size[dim], val ); THCudaCheck(hipFree(stride_)); THCudaTensor_free(indices_); } __global__ void THCudaTensor_kernel_indexSelect( float *tensor, float *src, long* src_stride, float *index, long src_nDim, int dim, long idx_size, long tensor_size, long size_dim ) { int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; long flat_size = tensor_size / idx_size; if (thread_idx < flat_size) { long coeff = 0; for (int i=0; i<idx_size; i++) { int leftover = thread_idx; int targetIdx = 0; int srcIdx = 0; for (int d=0; d<src_nDim; d++) { if (d < dim) { long stride_d = src_stride[d] / size_dim; coeff = leftover / stride_d; leftover -= coeff * stride_d; targetIdx += coeff * stride_d * idx_size; srcIdx += coeff * src_stride[d]; } else if (d > dim) { coeff = leftover / src_stride[d]; leftover -= coeff * src_stride[d]; targetIdx += coeff * src_stride[d]; srcIdx += coeff * src_stride[d]; } } tensor[targetIdx + i*src_stride[dim]] = src[srcIdx + ((int)(index[i])-1)*src_stride[dim]]; } } } void THCudaTensor_indexSelect(THCudaTensor *res_, THCudaTensor *src, int dim, THLongTensor *indices) { THLongStorage *newSize; THCudaTensor *indices_; long *stride_; long nIndex = indices->size[0]; long nRes; THArgCheck(indices->nDimension == 1, 3, "expecting vector of indices"); THArgCheck(dim < src->nDimension, 4, "Indexing dim is out of bounds"); THArgCheck(src->nDimension > 0, 2, "Source tensor is empty"); newSize = THLongStorage_newWithSize(src->nDimension); THLongStorage_rawCopy(newSize, src->size); newSize->data[dim] = nIndex; THCudaTensor_resize(res_, newSize, NULL); THLongStorage_free(newSize); indices_ = THCudaTensor_newWithSize1d(nIndex); THCudaTensor_copyLong(indices_, indices); nRes = THCudaTensor_nElement(res_); dim3 nthreads(16, 16); dim3 nblocks(ceil((float)nRes / nIndex / (16*16))); THCudaCheck(hipMalloc((void**)&stride_, src->nDimension * sizeof(long))); THCudaCheck(hipMemcpy(stride_, src->stride, src->nDimension * sizeof(long), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( THCudaTensor_kernel_indexSelect), dim3(nblocks), dim3(nthreads), 0, 0, THCudaTensor_data(res_), THCudaTensor_data(src), stride_, THCudaTensor_data(indices_), src->nDimension, dim, indices->size[0], nRes, src->size[dim] ); THCudaCheck(hipFree(stride_)); THCudaTensor_free(indices_); }
b3441c57269565f2894da829fcda821fb17d0b8f.cu
#include "THCTensorMath.h" #include "THCGeneral.h" #include "THCTensorRandom.h" #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> #define NB_THREADS_PER_BLOCK 256 void THCudaTensor_fill(THCudaTensor *self_, float value) { THCudaTensor *self = THCudaTensor_newContiguous(self_); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::fill(self_data, self_data+THCudaTensor_nElement(self), value); THCudaTensor_freeCopyTo(self, self_); } void THCudaTensor_zero(THCudaTensor *self_) { THCudaTensor *self = THCudaTensor_newContiguous(self_); cudaMemset(THCudaTensor_data(self), 0, sizeof(float)*THCudaTensor_nElement(self)); THCudaTensor_freeCopyTo(self, self_); } struct addvalue_functor { const float value; addvalue_functor(float value_) : value(value_) {} __host__ __device__ float operator()(const float& x) const { return (x+value); } }; void THCudaTensor_add(THCudaTensor *self_, float value) { { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::transform(self_data, self_data+size, self_data, addvalue_functor(value)); THCudaTensor_freeCopyTo(self, self_); } } void THCudaTensor_mul(THCudaTensor *self_, float value) { THCudaTensor *self = THCudaTensor_newContiguous(self_); cublasSscal(THCudaTensor_nElement(self), value, THCudaTensor_data(self), 1); THCublasCheck(); THCudaTensor_freeCopyTo(self, self_); } void THCudaTensor_div(THCudaTensor *self_, float value) { THCudaTensor *self = THCudaTensor_newContiguous(self_); cublasSscal(THCudaTensor_nElement(self), 1/value, THCudaTensor_data(self), 1); THCublasCheck(); THCudaTensor_freeCopyTo(self, self_); } void THCudaTensor_cadd(THCudaTensor *self_, float value, THCudaTensor *src) { THArgCheck(THCudaTensor_nElement(self_) == THCudaTensor_nElement(src), 3, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); src = THCudaTensor_newContiguous(src); cublasSaxpy(THCudaTensor_nElement(self), value, THCudaTensor_data(src), 1, THCudaTensor_data(self), 1); THCublasCheck(); THCudaTensor_free(src); THCudaTensor_freeCopyTo(self, self_); } } void THCudaTensor_cadd_tst(THCudaTensor *self_, THCudaTensor* src1, float value, THCudaTensor *src2) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); THCudaTensor_copy(self, src1); cublasSaxpy(THCudaTensor_nElement(self), value, THCudaTensor_data(src2), 1, THCudaTensor_data(self), 1); THCublasCheck(); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } } void THCudaTensor_cmul(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src1_data(THCudaTensor_data(src1)); thrust::device_ptr<float> src2_data(THCudaTensor_data(src2)); thrust::transform(src2_data, src2_data+size, src1_data, self_data, thrust::multiplies<float>()); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } } void THCudaTensor_cdiv(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size does not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src1_data(THCudaTensor_data(src1)); thrust::device_ptr<float> src2_data(THCudaTensor_data(src2)); thrust::transform(src1_data, src1_data+size, src2_data, self_data, thrust::divides<float>()); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } } __global__ void THCudaTensor_kernel_addcmul(float *data, float value, float *src1, float *src2, long size) { long k = (((blockIdx.y * gridDim.x) + blockIdx.x) * blockDim.x) + threadIdx.x; if(k < size) data[k] += value*src1[k]*src2[k]; } void THCudaTensor_addcmul(THCudaTensor *self_, float value, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); int nBlockPerRow, nBlockPerColumn, nThreadPerBlock; THCudaGetGridSize(&nBlockPerRow, &nBlockPerColumn, &nThreadPerBlock, size); dim3 threads(nThreadPerBlock); dim3 grid(nBlockPerRow, nBlockPerColumn); THCudaTensor_kernel_addcmul<<<grid, threads>>>(THCudaTensor_data(self), value, THCudaTensor_data(src1), THCudaTensor_data(src2), size); cudaError errcode = cudaGetLastError(); if(errcode != cudaSuccess) THError(cudaGetErrorString(errcode)); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } } __global__ void THCudaTensor_kernel_addcdiv(float *data, float value, float *src1, float *src2, long size) { long k = (((blockIdx.y * gridDim.x) + blockIdx.x) * blockDim.x) + threadIdx.x; if(k < size) data[k] += value*src1[k]/src2[k]; } void THCudaTensor_addcdiv(THCudaTensor *self_, float value, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); int nBlockPerRow, nBlockPerColumn, nThreadPerBlock; THCudaGetGridSize(&nBlockPerRow, &nBlockPerColumn, &nThreadPerBlock, size); dim3 threads(nThreadPerBlock); dim3 grid(nBlockPerRow, nBlockPerColumn); THCudaTensor_kernel_addcdiv<<<grid, threads>>>(THCudaTensor_data(self), value, THCudaTensor_data(src1), THCudaTensor_data(src2), size); cudaError errcode = cudaGetLastError(); if(errcode != cudaSuccess) THError(cudaGetErrorString(errcode)); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } } float THCudaTensor_dot(THCudaTensor *self, THCudaTensor *src) { THArgCheck(THCudaTensor_nElement(self) == THCudaTensor_nElement(src), 2, "size do not match"); { self = THCudaTensor_newContiguous(self); src = THCudaTensor_newContiguous(src); float result = cublasSdot(THCudaTensor_nElement(self), THCudaTensor_data(self), 1, THCudaTensor_data(src), 1); THCublasCheck(); THCudaTensor_free(src); THCudaTensor_free(self); return result; } } float THCudaTensor_minall(THCudaTensor *self) { self = THCudaTensor_newContiguous(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); float result = thrust::reduce(self_data, self_data+THCudaTensor_nElement(self), (float)(THInf), thrust::minimum<float>()); THCudaTensor_free(self); return result; } float THCudaTensor_maxall(THCudaTensor *self) { self = THCudaTensor_newContiguous(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); float result = thrust::reduce(self_data, self_data+THCudaTensor_nElement(self), (float)(-THInf), thrust::maximum<float>()); THCudaTensor_free(self); return result; } float THCudaTensor_sumall(THCudaTensor *self) { self = THCudaTensor_newContiguous(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); float result = thrust::reduce(self_data, self_data+THCudaTensor_nElement(self), (float)(0), thrust::plus<float>()); THCudaTensor_free(self); return result; } struct dim4 { unsigned arr[4]; __host__ dim4(unsigned init=0) { for(unsigned i=0; i<4; i++) { arr[i] = init; } } __host__ __device__ unsigned& operator[](const unsigned& idx) { return arr[idx]; } }; /* Reduce one of the outer dimensions of a tensor * * For an n-d tensor (n <= 4) where the reduction is *not* along the innermost * dimension: * * - block.x and grid.x make up the innermost dimension; * - The reduced dimension is looped over inside a block; and * - grid.y and grid.z are the remaining two dimensions (if any). * - block.y and block.z are not used as we're limited to 512 or 1024 threads * in the block. * * For sizes/strides, index 3 is the reduced dimension, while the remaining * indices are for the remaining dimensions with index 0 the innermost dimension. * * Reduction along the innermost dimension is handled in a separate kernel. */ template<class UnaryFunction, class BinaryFunction> __global__ void THCudaTensor_kernel_transformReduceOuterDim(float *tgt, float *src_, dim4 src_stride, dim4 tgt_stride, dim4 size, UnaryFunction unary_op, float init, BinaryFunction binary_op) { const size_t reduce = 3; for(unsigned z = blockIdx.z; z < size[2] ; z += gridDim.z) for(unsigned y = blockIdx.y; y < size[1] ; y += gridDim.y) for(unsigned col = blockIdx.x * blockDim.x + threadIdx.x; col < size[0]; col += blockDim.x * gridDim.x) { float *src = src_ + z * src_stride[2] + y * src_stride[1] + col; float acc = init; for(unsigned i=0; i < size[reduce]; i++) { acc = binary_op(acc, unary_op(*src)); src += src_stride[reduce]; } tgt[z * tgt_stride[2] + y * tgt_stride[1] + col] = float(acc); } } template<class UnaryFunction, class BinaryFunction> __host__ void THCudaTensor_transformReduceOuterDim(THCudaTensor *tgt, THCudaTensor *src, long rdim, UnaryFunction unary_op, float init, BinaryFunction binary_op) { const size_t reduce = 3; dim4 src_stride(0); dim4 tgt_stride(0); dim4 size(1); unsigned ndim = THCudaTensor_nDimension(src); for(unsigned idim=0, o=ndim-2; idim < ndim; idim++) { unsigned odim = idim == rdim ? reduce : o--; src_stride[odim] = THCudaTensor_stride(src, idim); tgt_stride[odim] = THCudaTensor_stride(tgt, idim); size[odim] = THCudaTensor_size(src, idim); } const unsigned nThreadPerBlock = 256; unsigned nBlockPerColumn = (size[0] + nThreadPerBlock - 1) / nThreadPerBlock; dim3 threads(nThreadPerBlock); unsigned maxGridDim = 1024; // anything < 64k is fine. The choice has no impact on performance. dim3 grid(min(maxGridDim, nBlockPerColumn), min(maxGridDim, size[1]), min(maxGridDim, size[2])); THCudaTensor_kernel_transformReduceOuterDim<<<grid, threads>>>(THCudaTensor_data(tgt), THCudaTensor_data(src), src_stride, tgt_stride, size, unary_op, init, binary_op); cudaError errcode = cudaGetLastError(); if(errcode != cudaSuccess) { THError(cudaGetErrorString(errcode)); } } /* Reduce the innermost dimension of a tensor * * For an n-d tensor (n <= 4) where the reduction is along the innermost dimension: * * - block.x is the innermost dimension, i.e. dimension 0; * - block.y and grid.y make up dimension 1; and * - grid.x and grid z are the remaining two outer dimensions (if any) * * Reduction along other dimensions is handled in a separate kernel. */ template<class UnaryFunction, class BinaryFunction> __global__ void THCudaTensor_kernel_transformReduceInnermostDim(float *tgt, float *src_, dim4 src_stride, dim4 tgt_stride, dim4 size, UnaryFunction unary_op, float init, BinaryFunction binary_op) { __shared__ float sbuf[16][32]; // 8kB for(unsigned z = blockIdx.z; z < size[3] ; z += gridDim.z) for(unsigned x = blockIdx.x; x < size[2] ; x += gridDim.x) for(unsigned bRow = blockIdx.y * blockDim.y; bRow < size[1]; bRow += blockDim.y * gridDim.y) { float acc = init; unsigned row = bRow + threadIdx.y; float *src = src_ + z * src_stride[3] + x * src_stride[2] + row * src_stride[1]; bool reducing = threadIdx.x < blockDim.y && bRow + threadIdx.x < size[1] && threadIdx.y == 0; for(unsigned bCol=0; bCol < size[0]; bCol += blockDim.x) { sbuf[threadIdx.y][threadIdx.x] = init; unsigned col = bCol + threadIdx.x; if(row < size[1] && col < size[0]) { sbuf[threadIdx.y][threadIdx.x] = unary_op(src[col]); } __syncthreads(); float* line = &sbuf[threadIdx.y][0]; for(unsigned s = 16; s > 1; s >>= 1) { if(row < size[1] && threadIdx.x < s) { line[threadIdx.x] = binary_op(line[threadIdx.x], line[threadIdx.x + s]); } __syncthreads(); } if(reducing) { sbuf[threadIdx.x][0] = binary_op(sbuf[threadIdx.x][0], sbuf[threadIdx.x][1]); acc = binary_op(acc, sbuf[threadIdx.x][0]); } __syncthreads(); } if(reducing) { unsigned row = bRow + threadIdx.x; unsigned tgt_offset = z * tgt_stride[3] + x * tgt_stride[2]; tgt[tgt_offset + row] = acc; } } } template<class UnaryFunction, class BinaryFunction> __host__ void THCudaTensor_transformReduceInnermostDim(THCudaTensor *tgt, THCudaTensor *src, UnaryFunction unary_op, float init, BinaryFunction binary_op) { dim4 src_stride(0); dim4 tgt_stride(0); dim4 size(1); unsigned ndim = THCudaTensor_nDimension(src); for(unsigned dim=0; dim < ndim; dim++) { unsigned odim = ndim - 1 - dim; src_stride[odim] = THCudaTensor_stride(src, dim); tgt_stride[odim] = THCudaTensor_stride(tgt, dim); size[odim] = THCudaTensor_size(src, dim); } dim3 threads(32, 16); unsigned nBlockPerRow = (size[1] + threads.y - 1) / threads.y; unsigned maxGridDim = 1024; // anything < 64k is fine. The choice has no impact on performance. dim3 grid(min(maxGridDim, size[2]), min(maxGridDim, nBlockPerRow), min(maxGridDim, size[3])); THCudaTensor_kernel_transformReduceInnermostDim<<<grid, threads>>>(THCudaTensor_data(tgt), THCudaTensor_data(src), src_stride, tgt_stride, size, unary_op, init, binary_op); cudaError errcode = cudaGetLastError(); if(errcode != cudaSuccess) { THError(cudaGetErrorString(errcode)); } } template<class UnaryFunction, class BinaryFunction> void THCudaTensor_transformReduceDim(THCudaTensor *self_, THCudaTensor *src, long dimension, UnaryFunction unary_op, float init, BinaryFunction binary_op) { THArgCheck(dimension >= 0 && dimension < THCudaTensor_nDimension(src), 3, "dimension out of range"); THArgCheck(THCudaTensor_nDimension(src) <= 4, 2, "too many dimensions (>4)"); THLongStorage *dim = THCudaTensor_newSizeOf(src); THLongStorage_set(dim, dimension, 1); THCudaTensor_resize(self_, dim, NULL); THLongStorage_free(dim); THCudaTensor *self = THCudaTensor_newContiguous(self_); src = THCudaTensor_newContiguous(src); if(dimension == THCudaTensor_nDimension(src)-1) { THCudaTensor_transformReduceInnermostDim(self, src, unary_op, init, binary_op); } else { THCudaTensor_transformReduceOuterDim(self, src, dimension, unary_op, init, binary_op); } THCudaTensor_free(src); THCudaTensor_freeCopyTo(self, self_); } template<class BinaryFunction> void THCudaTensor_reduceDim(THCudaTensor *self_, THCudaTensor *src, long dimension, float init, BinaryFunction binary_op) { THCudaTensor_transformReduceDim(self_, src, dimension, thrust::identity<float>(), init, binary_op); } void THCudaTensor_sum(THCudaTensor *self, THCudaTensor *src, long dimension) { return THCudaTensor_reduceDim(self, src, dimension, 0.0f, thrust::plus<float>()); } void THCudaTensor_max(THCudaTensor *self, THCudaTensor *src, long dimension) { const float minfloat32 = -3.402823466e+38f; return THCudaTensor_reduceDim(self, src, dimension, minfloat32, thrust::maximum<float>()); } void THCudaTensor_min(THCudaTensor *self, THCudaTensor *src, long dimension) { const float maxfloat32 = 3.402823466e+38f; return THCudaTensor_reduceDim(self, src, dimension, maxfloat32, thrust::minimum<float>()); } void THCudaTensor_addmv(THCudaTensor *self, float beta, float alpha, THCudaTensor *mat, THCudaTensor *vec) { if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected"); if( mat->size[1] != vec->size[0] ) THError("size mismatch"); if(self->nDimension != 1) THError("size mismatch"); if( self->size[0] != mat->size[0] ) THError("size mismatch"); if(mat->stride[0] == 1) { cublasSgemv('n', mat->size[0], mat->size[1], alpha, THCudaTensor_data(mat), mat->stride[1], THCudaTensor_data(vec), vec->stride[0], beta, THCudaTensor_data(self), self->stride[0]); } else if(mat->stride[1] == 1) { cublasSgemv('t', mat->size[1], mat->size[0], alpha, THCudaTensor_data(mat), mat->stride[0], THCudaTensor_data(vec), vec->stride[0], beta, THCudaTensor_data(self), self->stride[0]); } else { mat = THCudaTensor_newContiguous(mat); cublasSgemv('t', mat->size[1], mat->size[0], alpha, THCudaTensor_data(mat), mat->stride[0], THCudaTensor_data(vec), vec->stride[0], beta, THCudaTensor_data(self), self->stride[0]); THCudaTensor_free(mat); } THCublasCheck(); } void THCudaTensor_addmm(THCudaTensor *self, float beta, float alpha, THCudaTensor *m1, THCudaTensor *m2) { char transpose, transpose_m1, transpose_m2; THCudaTensor *self_, *m1_, *m2_; if( (m1->nDimension != 2) || (m2->nDimension != 2) ) THError("matrix and matrix expected"); if(self->nDimension != 2) THError("size mismatch"); if( (self->size[0] != m1->size[0]) || (self->size[1] != m2->size[1]) || (m1->size[1] != m2->size[0]) ) THError("size mismatch"); /* self */ if ((self->stride[0] == 1) && (self->stride[1] > 1)) { transpose = 'n'; self_ = self; } else if(self->stride[1] == 1) { THCudaTensor *swap = m2; m2 = m1; m1 = swap; THCudaTensor_transpose(self, NULL, 0, 1); THCudaTensor_transpose(m1, NULL, 0, 1); THCudaTensor_transpose(m2, NULL, 0, 1); transpose = 't'; self_ = self; } else { transpose = 'n'; THCudaTensor_transpose(self, NULL, 0, 1); self_ = THCudaTensor_newClone(self); THCudaTensor_transpose(self, NULL, 0, 1); THCudaTensor_transpose(self_, NULL, 0, 1); } /* m1 */ if(m1->stride[0] == 1) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[1] == 1) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = 't'; m1_ = THCudaTensor_newContiguous(m1); } /* m2 */ if(m2->stride[0] == 1) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[1] == 1) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = 't'; m2_ = THCudaTensor_newContiguous(m2); } /* do the operation */ cublasSgemm(transpose_m1, transpose_m2, self_->size[0], self_->size[1], m1_->size[1], alpha, THCudaTensor_data(m1_), (transpose_m1 == 'n' ? m1_->stride[1] : m1_->stride[0]), THCudaTensor_data(m2_), (transpose_m2 == 'n' ? m2_->stride[1] : m2_->stride[0]), beta, THCudaTensor_data(self_), self_->stride[1]); THCublasCheck(); /* free intermediate variables */ if(m1_ != m1) THCudaTensor_free(m1_); if(m2_ != m2) THCudaTensor_free(m2_); if(self_ != self) THCudaTensor_freeCopyTo(self_, self); if(transpose == 't') { THCudaTensor_transpose(self, NULL, 0, 1); THCudaTensor_transpose(m1, NULL, 0, 1); THCudaTensor_transpose(m2, NULL, 0, 1); } } void THCudaTensor_addr(THCudaTensor *self, float alpha, THCudaTensor *vec1, THCudaTensor *vec2) { if( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) THError("vector and vector expected"); if(self->nDimension != 2) THError("size mismatch"); if( (self->size[0] != vec1->size[0]) || (self->size[1] != vec2->size[0]) ) THError("size mismatch"); if(self->stride[0] == 1) { cublasSger(vec1->size[0], vec2->size[0], alpha, THCudaTensor_data(vec1), vec1->stride[0], THCudaTensor_data(vec2), vec2->stride[0], THCudaTensor_data(self), self->stride[1]); } else if(self->stride[1] == 1) { cublasSger(vec2->size[0], vec1->size[0], alpha, THCudaTensor_data(vec2), vec2->stride[0], THCudaTensor_data(vec1), vec1->stride[0], THCudaTensor_data(self), self->stride[0]); } else { THCudaTensor *cself = THCudaTensor_newClone(self); cublasSger(vec2->size[0], vec1->size[0], alpha, THCudaTensor_data(vec2), vec2->stride[0], THCudaTensor_data(vec1), vec1->stride[0], THCudaTensor_data(cself), cself->stride[0]); THCudaTensor_freeCopyTo(cself, self); } THCublasCheck(); } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC) \ struct NAME##_functor \ { \ __host__ __device__ float operator()(const float& x) const \ { \ return CFUNC(x); \ } \ }; \ \ void THCudaTensor_##NAME(THCudaTensor *self_) \ { \ THCudaTensor *self = THCudaTensor_newContiguous(self_); \ long size = THCudaTensor_nElement(self); \ thrust::device_ptr<float> self_data(THCudaTensor_data(self)); \ \ thrust::transform(self_data, self_data+size, self_data, NAME##_functor()); \ \ THCudaTensor_freeCopyTo(self, self_); \ } IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log, log) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, log1p) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(exp, exp) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cos, cos) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(acos, acos) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cosh, cosh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sin, sin) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(asin, asin) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sinh, sinh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tan, tan) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(atan, atan) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tanh, tanh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sqrt, sqrt) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(ceil, ceil) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, floor) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(abs, fabs) struct pow_functor { const float value; pow_functor(float value_) : value(value_) {} __host__ __device__ float operator()(const float& x) const { return pow(x, value); } }; void THCudaTensor_pow(THCudaTensor *self_, THCudaTensor *src, float value) { THArgCheck(THCudaTensor_nElement(self_) == THCudaTensor_nElement(src), 2, "sizes do not match"); THCudaTensor *self = THCudaTensor_newContiguous(self_); src = THCudaTensor_newContiguous(src); long size = THCudaTensor_nElement(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src_data(THCudaTensor_data(src)); thrust::transform(src_data, src_data+size, self_data, pow_functor(value)); THCudaTensor_free(src); THCudaTensor_freeCopyTo(self, self_); } struct sign_functor { __device__ float operator()(const float &v) const { return (v > 0) - (v < 0); } }; void THCudaTensor_sign(THCudaTensor *self_, THCudaTensor *src) { THArgCheck(THCudaTensor_nElement(self_) == THCudaTensor_nElement(src), 2, "size do not match"); { THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src = THCudaTensor_newContiguous(src); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src_data(THCudaTensor_data(src)); thrust::transform(src_data, src_data+size, self_data, sign_functor()); THCudaTensor_free(src); THCudaTensor_freeCopyTo(self, self_); } } float THCudaTensor_meanall(THCudaTensor *self) { THArgCheck(self->nDimension > 0, 1, "empty Tensor"); return THCudaTensor_sumall(self)/THCudaTensor_nElement(self); } void THCudaTensor_mean(THCudaTensor *self, THCudaTensor *src, long dim) { THCudaTensor_sum(self, src, dim); THCudaTensor_div(self, THCudaTensor_size(src, dim)); } struct square_functor { const float mean; square_functor(float mean_) : mean(mean_) {} __host__ __device__ float operator()(const float& x) const { return (x-mean)*(x-mean); } }; float THCudaTensor_varall(THCudaTensor *self) { self = THCudaTensor_newContiguous(self); long size = THCudaTensor_nElement(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); float mean = THCudaTensor_meanall(self); float result = thrust::transform_reduce(self_data, self_data+size, square_functor(mean), (float)0, thrust::plus<float>()); result = result/(THCudaTensor_nElement(self)-1); THCudaTensor_free(self); return result; } float THCudaTensor_stdall(THCudaTensor *self) { return sqrt(THCudaTensor_varall(self)); } template<class Op> void THCudaTensor_logicalValue(THCudaTensor *self_, THCudaTensor *src, Op op) { THCudaTensor_resizeAs(self_, src); THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src = THCudaTensor_newContiguous(src); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src_data(THCudaTensor_data(src)); thrust::transform(src_data, src_data+size, self_data, op); THCudaTensor_free(src); THCudaTensor_freeCopyTo(self, self_); } struct partial_less_functor { const float rhs; partial_less_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs < rhs;} }; void THCudaTensor_ltValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_less_functor(value)); } struct partial_greater_functor { const float rhs; partial_greater_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs > rhs;} }; void THCudaTensor_gtValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_greater_functor(value)); } struct partial_less_equal_functor { const float rhs; partial_less_equal_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs <= rhs;} }; void THCudaTensor_leValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_less_equal_functor(value)); } struct partial_greater_equal_functor { const float rhs; partial_greater_equal_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs >= rhs;} }; void THCudaTensor_geValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_greater_equal_functor(value)); } struct partial_equal_functor { const float rhs; partial_equal_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs == rhs;} }; void THCudaTensor_eqValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_equal_functor(value)); } struct partial_not_equal_functor { const float rhs; partial_not_equal_functor(float rhs) : rhs(rhs) {} __host__ __device__ bool operator()(const float &lhs) const {return lhs != rhs;} }; void THCudaTensor_neValue(THCudaTensor *self_, THCudaTensor *src, float value) { THCudaTensor_logicalValue(self_, src, partial_not_equal_functor(value)); } template<class Op> void THCudaTensor_logicalTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2, Op op) { THCudaTensor_resizeAs(self_, src1); THArgCheck(THCudaTensor_nElement(src1) == THCudaTensor_nElement(src2), 3, "size do not match"); THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); src1 = THCudaTensor_newContiguous(src1); src2 = THCudaTensor_newContiguous(src2); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src1_data(THCudaTensor_data(src1)); thrust::device_ptr<float> src2_data(THCudaTensor_data(src2)); thrust::transform(src1_data, src1_data+size, src2_data, self_data, op); THCudaTensor_free(src1); THCudaTensor_free(src2); THCudaTensor_freeCopyTo(self, self_); } void THCudaTensor_ltTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::less<float>()); } void THCudaTensor_gtTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::greater<float>()); } void THCudaTensor_leTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::less_equal<float>()); } void THCudaTensor_geTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::greater_equal<float>()); } void THCudaTensor_eqTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::equal_to<float>()); } void THCudaTensor_neTensor(THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THCudaTensor_logicalTensor(self_, src1, src2, thrust::not_equal_to<float>()); } struct norm_functor { const float exponent; norm_functor(float exponent_) : exponent(exponent_) {} __host__ __device__ float operator()(const float& x) const { return pow(fabs(x), exponent); } }; float THCudaTensor_normall(THCudaTensor *self, float value) { self = THCudaTensor_newContiguous(self); long size = THCudaTensor_nElement(self); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); float result; if(value == 0.0f) { result = thrust::transform_reduce(self_data, self_data+size, partial_not_equal_functor(0.0f), (float)0, thrust::plus<float>()); } else { result = thrust::transform_reduce(self_data, self_data+size, norm_functor(value), (float)0, thrust::plus<float>()); result = pow(result, (float)1.0/value); } THCudaTensor_free(self); return result; } void THCudaTensor_norm(THCudaTensor* self, THCudaTensor* src, float value, long dimension) { if(value == 0.0f) { THCudaTensor_transformReduceDim(self, src, dimension, partial_not_equal_functor(0.0f), (float)0, thrust::plus<float>()); } else { THCudaTensor_transformReduceDim(self, src, dimension, norm_functor(value), (float)0, thrust::plus<float>()); THCudaTensor_pow(self, self, 1/value); } } __global__ void THCudaTensor_kernel_renorm(float *data, const float value, const long size, const float maxnorm) { __shared__ float buffer[32]; long tx = threadIdx.x; long bx = blockIdx.x; long step = blockDim.x; float *row = data + size*bx; buffer[tx] = 0; // get norm of axis for (long i=tx; i<size; i+=step) { buffer[tx] += pow(fabs(row[i]), value); } // add (reduce) for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (tx < stride) buffer[tx] += buffer[tx+stride]; } // clip norms __syncthreads(); float norm = pow(buffer[0], 1/value); if (norm > maxnorm) { norm = maxnorm / (norm + 1e-7); // renormalize for (long i=tx; i<size; i+=step) { row[i] *= norm; } } } void THCudaTensor_renorm(THCudaTensor* self, THCudaTensor* src, float value, long dimension, float maxnorm) { THCudaTensor *self_; THCudaTensor *src_ = THCudaTensor_newTranspose(src, dimension, 0); THCudaTensor *data = THCudaTensor_newClone(src_); long size = THCudaTensor_nElement(data)/data->size[0]; THArgCheck(dimension >= 0 && dimension < THCudaTensor_nDimension(src), 3, "invalid dimension"); THArgCheck(value > 0, 2, "non-positive-norm not supported"); THArgCheck(THCudaTensor_nDimension(src) > 1, 1, "need at least 2 dimensions"); dim3 grid(data->size[0]); dim3 threads(32); THCudaTensor_kernel_renorm<<<grid, threads>>>(THCudaTensor_data(data), value, size, maxnorm); cudaError errcode = cudaGetLastError(); if(errcode != cudaSuccess) THError(cudaGetErrorString(errcode)); THCudaTensor_free(src_); self_ = THCudaTensor_newTranspose(data, dimension, 0); THCudaTensor_resizeAs(self, self_); THCudaTensor_freeCopyTo(self_, self); THCudaTensor_free(data); } struct dist_functor { const float exponent; dist_functor(float exponent_) : exponent(exponent_) {} __host__ __device__ float operator()(const float& x, const float& y) const { return pow(fabs(x-y), exponent); } }; float THCudaTensor_dist(THCudaTensor *self, THCudaTensor *src, float value) { self = THCudaTensor_newContiguous(self); long size = THCudaTensor_nElement(self); src = THCudaTensor_newContiguous(src); thrust::device_ptr<float> self_data(THCudaTensor_data(self)); thrust::device_ptr<float> src_data(THCudaTensor_data(src)); float result = thrust::inner_product(self_data, self_data+size, src_data, (float) 0,thrust::plus<float>(), dist_functor(value)); THCudaTensor_free(src); THCudaTensor_free(self); return pow(result, (float)1.0/value); } void THCudaTensor_rand(THCudaTensor *r_, THLongStorage *size) { THCudaTensor_resize(r_, size, NULL); THCudaTensor_uniform(r_, 0, 1); } void THCudaTensor_randn(THCudaTensor *r_, THLongStorage *size) { THCudaTensor_resize(r_, size, NULL); THCudaTensor_normal(r_, 0, 1); } __global__ void THCudaTensor_kernel_indexFill( float *tensor, long* stride, float *index, long src_nDim, int dim, long idx_size, long tensor_size, long size_dim, float val ) { int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; long flat_size = tensor_size / idx_size; if (thread_idx < flat_size) { long coeff = 0; for (int i=0; i<idx_size; i++) { int leftover = thread_idx; int srcIdx = 0; for (int d=0; d<src_nDim; d++) { if (d < dim) { coeff = leftover / (stride[d] / size_dim); leftover -= coeff * (stride[d] / size_dim); srcIdx += coeff * stride[d]; } else if (d > dim) { coeff = leftover / stride[d]; leftover -= coeff * stride[d]; srcIdx += coeff * stride[d]; } } tensor[srcIdx + (int)((index[i])-1)*stride[dim]] = val; } } } __global__ void THCudaTensor_kernel_indexCopy( float *res, float *src, long* res_stride, float *index, long res_nDim, int dim, long idx_size, long src_size, long size_dim ) { int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; long flat_size = src_size / idx_size; if (thread_idx < flat_size) { long coeff = 0; for (int i=0; i<idx_size; i++) { int leftover = thread_idx; int targetIdx = 0; int resIdx = 0; for (int d=0; d<res_nDim; d++) { if (d < dim) { long stride_d = res_stride[d] / size_dim; coeff = leftover / stride_d; leftover -= coeff * stride_d; targetIdx += coeff * stride_d * idx_size; resIdx += coeff * res_stride[d]; } else if (d > dim) { coeff = leftover / res_stride[d]; leftover -= coeff * res_stride[d]; targetIdx += coeff * res_stride[d]; resIdx += coeff * res_stride[d]; } } res[resIdx + ((int)(index[i])-1)*res_stride[dim]] = src[targetIdx + i*res_stride[dim]]; } } } void THCudaTensor_indexCopy(THCudaTensor *res_, int dim, THLongTensor *indices, THCudaTensor *src) { THCudaTensor *indices_; long *stride_; long nIndex = indices->size[0]; long nRes; THArgCheck(indices->nDimension == 1, 3, "expecting vector of indices"); THArgCheck(dim < src->nDimension, 4, "Indexing dim is out of bounds"); THArgCheck(src->nDimension > 0, 2, "Source tensor is empty"); THArgCheck(nIndex == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices"); indices_ = THCudaTensor_newWithSize1d(nIndex); THCudaTensor_copyLong(indices_, indices); nRes = THCudaTensor_nElement(res_); dim3 nthreads(16, 16); dim3 nblocks(ceil((float)nRes / nIndex / (16*16))); THCudaCheck(cudaMalloc((void**)&stride_, res_->nDimension * sizeof(long))); THCudaCheck(cudaMemcpy(stride_, res_->stride, res_->nDimension * sizeof(long), cudaMemcpyHostToDevice)); THCudaTensor_kernel_indexCopy<<<nblocks, nthreads>>>( THCudaTensor_data(res_), THCudaTensor_data(src), stride_, THCudaTensor_data(indices_), res_->nDimension, dim, nIndex, THCudaTensor_nElement(src), res_->size[dim] ); THCudaCheck(cudaFree(stride_)); THCudaTensor_free(indices_); } void THCudaTensor_indexFill(THCudaTensor *res_, int dim, THLongTensor *indices, float val) { THCudaTensor *indices_; long *stride_; long nIndex = indices->size[0]; long nRes; THArgCheck(indices->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < res_->nDimension,4,"Indexing dim is out of bounds"); THArgCheck(res_->nDimension > 0, 2, "Source tensor is empty"); indices_ = THCudaTensor_newWithSize1d(nIndex); THCudaTensor_copyLong(indices_, indices); nRes = THCudaTensor_nElement(res_) / res_->size[dim] * nIndex; dim3 nthreads(16, 16); dim3 nblocks(ceil((float)nRes / nIndex / (16*16))); THCudaCheck(cudaMalloc((void**)&stride_, res_->nDimension * sizeof(long))); THCudaCheck(cudaMemcpy(stride_, res_->stride, res_->nDimension * sizeof(long), cudaMemcpyHostToDevice)); THCudaTensor_kernel_indexFill<<<nblocks, nthreads>>>( THCudaTensor_data(res_), stride_, THCudaTensor_data(indices_), res_->nDimension, dim, nIndex, nRes, res_->size[dim], val ); THCudaCheck(cudaFree(stride_)); THCudaTensor_free(indices_); } __global__ void THCudaTensor_kernel_indexSelect( float *tensor, float *src, long* src_stride, float *index, long src_nDim, int dim, long idx_size, long tensor_size, long size_dim ) { int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; long flat_size = tensor_size / idx_size; if (thread_idx < flat_size) { long coeff = 0; for (int i=0; i<idx_size; i++) { int leftover = thread_idx; int targetIdx = 0; int srcIdx = 0; for (int d=0; d<src_nDim; d++) { if (d < dim) { long stride_d = src_stride[d] / size_dim; coeff = leftover / stride_d; leftover -= coeff * stride_d; targetIdx += coeff * stride_d * idx_size; srcIdx += coeff * src_stride[d]; } else if (d > dim) { coeff = leftover / src_stride[d]; leftover -= coeff * src_stride[d]; targetIdx += coeff * src_stride[d]; srcIdx += coeff * src_stride[d]; } } tensor[targetIdx + i*src_stride[dim]] = src[srcIdx + ((int)(index[i])-1)*src_stride[dim]]; } } } void THCudaTensor_indexSelect(THCudaTensor *res_, THCudaTensor *src, int dim, THLongTensor *indices) { THLongStorage *newSize; THCudaTensor *indices_; long *stride_; long nIndex = indices->size[0]; long nRes; THArgCheck(indices->nDimension == 1, 3, "expecting vector of indices"); THArgCheck(dim < src->nDimension, 4, "Indexing dim is out of bounds"); THArgCheck(src->nDimension > 0, 2, "Source tensor is empty"); newSize = THLongStorage_newWithSize(src->nDimension); THLongStorage_rawCopy(newSize, src->size); newSize->data[dim] = nIndex; THCudaTensor_resize(res_, newSize, NULL); THLongStorage_free(newSize); indices_ = THCudaTensor_newWithSize1d(nIndex); THCudaTensor_copyLong(indices_, indices); nRes = THCudaTensor_nElement(res_); dim3 nthreads(16, 16); dim3 nblocks(ceil((float)nRes / nIndex / (16*16))); THCudaCheck(cudaMalloc((void**)&stride_, src->nDimension * sizeof(long))); THCudaCheck(cudaMemcpy(stride_, src->stride, src->nDimension * sizeof(long), cudaMemcpyHostToDevice)); THCudaTensor_kernel_indexSelect<<<nblocks, nthreads>>>( THCudaTensor_data(res_), THCudaTensor_data(src), stride_, THCudaTensor_data(indices_), src->nDimension, dim, indices->size[0], nRes, src->size[dim] ); THCudaCheck(cudaFree(stride_)); THCudaTensor_free(indices_); }
290d146705cb10d04b8722b8a87c64dd3dd4974e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/randperm_kernel.h" #ifdef __NVCC__ #include <hiprand/hiprand_kernel.h> #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hiprand_kernel.h> #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "gflags/gflags.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/randint_kernel.h" DECLARE_bool(use_curand); namespace phi { template <typename keyT, typename dataT> __global__ void SwapRepeatKernel(keyT* key_out_data, dataT* out_data, int n, uint64_t seed, uint64_t offset) { size_t idx = static_cast<size_t>(blockIdx.x * blockDim.x + threadIdx.x); if (idx >= n - 1) return; // out of range bool is_first_repeat = false; if (key_out_data[idx] == key_out_data[idx + 1]) { if (idx == 0) { is_first_repeat = true; } else if (key_out_data[idx] != key_out_data[idx - 1]) { is_first_repeat = true; } } if (!is_first_repeat) return; int repeat_size = 1; for (int i = idx; i < n; ++i) { if (key_out_data[i] == key_out_data[i + 1]) { ++repeat_size; } else { break; } } #ifdef __NVCC__ hiprandStatePhilox4_32_10_t state; hiprand_init(seed, idx, offset, &state); for (int i = repeat_size - 1; i > 0; i--) { uint32_t r = hiprand(&state) % (i + 1); #elif __HIPCC__ hiprandStatePhilox4_32_10_t state; hiprand_init(seed, idx, offset, &state); for (int i = repeat_size - 1; i > 0; i--) { uint32_t r = hiprand(&state) % (i + 1); #endif if (r != i) { dataT tmp = out_data[idx + i]; out_data[idx + i] = out_data[idx + r]; out_data[idx + r] = tmp; } } } template <typename T, typename Context> void RandpermKernel(const Context& dev_ctx, int n, DataType dtype, DenseTensor* out) { DenseTensor key; int seed = 0; RandintKernel<int, Context>(dev_ctx, std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), IntArray({n}), phi::DataType::INT32, &key); DenseTensor key_out = Empty<int, Context>(dev_ctx, IntArray({n})); DenseTensor range = Empty<T, Context>(dev_ctx, IntArray({n})); T* range_data = range.data<T>(); funcs::ForRange<Context> for_range(dev_ctx, n); for_range([range_data] __device__(size_t idx) { range_data[idx] = static_cast<T>(idx); }); out->Resize(phi::make_ddim({n})); T* out_data = dev_ctx.template Alloc<T>(out); // Refer to [Algorithm of randperm] https://osf.io/af2hy/ to // improve performance of radix sort. double n_d = static_cast<double>(n); int begin_bit = 0; int end_bit = ::ceil(std::log2(n_d - (6 * n_d * n_d + 1) / (12 * ::log(0.9)))); size_t temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs<int, T>(nullptr, temp_storage_bytes, key.data<int>(), key_out.data<int>(), range.data<T>(), out_data, n, begin_bit, end_bit < 32 ? end_bit : 32, dev_ctx.stream()); auto d_temp_storage = phi::memory_utils::Alloc( dev_ctx.GetPlace(), temp_storage_bytes, phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); hipcub::DeviceRadixSort::SortPairs<int, T>(d_temp_storage->ptr(), temp_storage_bytes, key.data<int>(), key_out.data<int>(), range.data<T>(), out_data, n, begin_bit, end_bit < 32 ? end_bit : 32, dev_ctx.stream()); auto gen_cuda = dev_ctx.GetGenerator(); auto seed_offset = gen_cuda->IncrementOffset(n); auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n); hipLaunchKernelGGL(( SwapRepeatKernel), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, dev_ctx.stream(), key_out.data<int>(), out_data, n, seed_offset.first, seed_offset.second); } } // namespace phi PD_REGISTER_KERNEL(randperm, GPU, ALL_LAYOUT, phi::RandpermKernel, float, double, int, int64_t, phi::dtype::float16, phi::dtype::bfloat16) {}
290d146705cb10d04b8722b8a87c64dd3dd4974e.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/randperm_kernel.h" #ifdef __NVCC__ #include <curand_kernel.h> #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hiprand_kernel.h> #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "gflags/gflags.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/randint_kernel.h" DECLARE_bool(use_curand); namespace phi { template <typename keyT, typename dataT> __global__ void SwapRepeatKernel(keyT* key_out_data, dataT* out_data, int n, uint64_t seed, uint64_t offset) { size_t idx = static_cast<size_t>(blockIdx.x * blockDim.x + threadIdx.x); if (idx >= n - 1) return; // out of range bool is_first_repeat = false; if (key_out_data[idx] == key_out_data[idx + 1]) { if (idx == 0) { is_first_repeat = true; } else if (key_out_data[idx] != key_out_data[idx - 1]) { is_first_repeat = true; } } if (!is_first_repeat) return; int repeat_size = 1; for (int i = idx; i < n; ++i) { if (key_out_data[i] == key_out_data[i + 1]) { ++repeat_size; } else { break; } } #ifdef __NVCC__ curandStatePhilox4_32_10_t state; curand_init(seed, idx, offset, &state); for (int i = repeat_size - 1; i > 0; i--) { uint32_t r = curand(&state) % (i + 1); #elif __HIPCC__ hiprandStatePhilox4_32_10_t state; hiprand_init(seed, idx, offset, &state); for (int i = repeat_size - 1; i > 0; i--) { uint32_t r = hiprand(&state) % (i + 1); #endif if (r != i) { dataT tmp = out_data[idx + i]; out_data[idx + i] = out_data[idx + r]; out_data[idx + r] = tmp; } } } template <typename T, typename Context> void RandpermKernel(const Context& dev_ctx, int n, DataType dtype, DenseTensor* out) { DenseTensor key; int seed = 0; RandintKernel<int, Context>(dev_ctx, std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), IntArray({n}), phi::DataType::INT32, &key); DenseTensor key_out = Empty<int, Context>(dev_ctx, IntArray({n})); DenseTensor range = Empty<T, Context>(dev_ctx, IntArray({n})); T* range_data = range.data<T>(); funcs::ForRange<Context> for_range(dev_ctx, n); for_range([range_data] __device__(size_t idx) { range_data[idx] = static_cast<T>(idx); }); out->Resize(phi::make_ddim({n})); T* out_data = dev_ctx.template Alloc<T>(out); // Refer to [Algorithm of randperm] https://osf.io/af2hy/ to // improve performance of radix sort. double n_d = static_cast<double>(n); int begin_bit = 0; int end_bit = std::ceil(std::log2(n_d - (6 * n_d * n_d + 1) / (12 * std::log(0.9)))); size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs<int, T>(nullptr, temp_storage_bytes, key.data<int>(), key_out.data<int>(), range.data<T>(), out_data, n, begin_bit, end_bit < 32 ? end_bit : 32, dev_ctx.stream()); auto d_temp_storage = phi::memory_utils::Alloc( dev_ctx.GetPlace(), temp_storage_bytes, phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); cub::DeviceRadixSort::SortPairs<int, T>(d_temp_storage->ptr(), temp_storage_bytes, key.data<int>(), key_out.data<int>(), range.data<T>(), out_data, n, begin_bit, end_bit < 32 ? end_bit : 32, dev_ctx.stream()); auto gen_cuda = dev_ctx.GetGenerator(); auto seed_offset = gen_cuda->IncrementOffset(n); auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n); SwapRepeatKernel<<<config.block_per_grid.x, config.thread_per_block.x, 0, dev_ctx.stream()>>>( key_out.data<int>(), out_data, n, seed_offset.first, seed_offset.second); } } // namespace phi PD_REGISTER_KERNEL(randperm, GPU, ALL_LAYOUT, phi::RandpermKernel, float, double, int, int64_t, phi::dtype::float16, phi::dtype::bfloat16) {}
3873ea23bd4f4f8ff9a351587c75e6bd1dd718ee.hip
// !!! This is a file automatically generated by hipify!!! /* * GPUSolver.cpp * * Author: Artur Kucia */ #include <cstdio> #include <cstdlib> #include <iostream> #include <hip/hip_runtime.h> #include <cstring> #include <cfloat> #include "GPUSolver.cuh" #include "Matrix.h" #include "Vector.h" GPUSolver::GPUSolver() { } GPUSolver::~GPUSolver() { } __global__ void kernel_jj(float *d_a, float *d_i, int n, int j){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i < n){ if ( d_a[j*n+j] == 0.0) for(int k=j+1; k<n; k++) if ( d_a[k*n+j] != 0.0){ d_a[j*n+i] += d_a[k*n+i]; d_i[j*n+i] += d_i[k*n+i]; break; } } } __global__ void normalize_row(float *d_a, float *d_i, int n, int j){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n){ if (i !=j){ d_i[j*n+i] /= d_a[j*n+j]; d_a[j*n+i] /= d_a[j*n+j]; } else{ d_i[j*n+i] /= d_a[j*n+j]; } } } __global__ void normalize_diagonal(float *d_a, float *d_i, int n, int j){ d_a[j*n+j] /= d_a[j*n+j]; } __global__ void reduce_row_i(float *d_a, float *d_i, int n, int j){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x<n && y< n) if (y != j){ d_i[y*n+x] -= d_i[j*n+x]*d_a[y*n+j]; } } __global__ void reduce_row_a(float *d_a, float *d_i, int n, int j){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x<n && y< n) if (y != j){ if( x != j) d_a[y*n+x] -= d_a[j*n+x]*d_a[y*n+j]; } } Matrix GPUSolver::solveGJ(Matrix A, Vector B) { Matrix A_copy(A); int n = A.getN(); int m = B.getM(); Matrix I(n); float *h_i = I.getArrayPointer(); float *h_a = A.getArrayPointer(); float *d_a; float *d_i; hipMalloc((void **)&d_a, n*n*sizeof(float)); hipMalloc((void **)&d_i, n*n*sizeof(float)); hipMemcpy(d_a, h_a, n*n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_i, h_i, n*n*sizeof(float), hipMemcpyHostToDevice); for(int i=0; i<n; i++){ dim3 block(256,1); dim3 grid((n + block.x - 1) / block.x, 1); hipLaunchKernelGGL(( kernel_jj) , dim3(grid), dim3(block), 0, 0, d_a, d_i, n, i); dim3 block1(256,1); dim3 grid1((n + block1.x - 1) / block1.x,1); hipLaunchKernelGGL(( normalize_row) , dim3(grid1),dim3(block1) , 0, 0, d_a, d_i, n, i); dim3 block2(16,16); dim3 grid2((n + block2.x - 1) / block2.x, (n + block2.y - 1) / block2.y); hipLaunchKernelGGL(( reduce_row_i) , dim3(grid2), dim3(block2) , 0, 0, d_a, d_i, n, i); hipLaunchKernelGGL(( reduce_row_a) , dim3(grid2), dim3(block2) , 0, 0, d_a, d_i, n, i); } hipMemcpy(h_a, d_a, n*n*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(h_i, d_i, n*n*sizeof(float), hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_i); return I.matMul(B); } __global__ void kernel_update_u(float *d_a, int n, int k){ int i = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0; if(i>k-1 && i<n){ for(int p=0;p<k;++p){ sum += d_a[i*n+p]*d_a[p*n+k]; } d_a[i*n+k] -= sum; } } __global__ void kernel_update_l(float *d_a, int n, int k){ int j = blockIdx.x * blockDim.x + threadIdx.x; float sum=0.0; if(j>k && j<n){ for(int p=0;p<k;++p){ sum+=d_a[k*n+p]*d_a[p*n+j]; } d_a[k*n+j]=(d_a[k*n+j]-sum)/d_a[k*n+k]; } } __global__ void kerneljj(float *d_a, float *d_b, int n, int j){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ if ( d_a[j*n+j] == 0.0){ for(int k=j+1; k<n; k++){ if (d_a[k*n+j] != 0.0){ d_a[j*n+i] += d_a[k*n+i]; if(i==0){ d_b[j] += d_b[k]; } break; } } } } } Matrix GPUSolver::solveLUD(Matrix A, Vector B){ int n = A.getN(); int m = B.getM(); Vector X(n); float *h_a = A.getArrayPointer(); float *h_b = B.getArrayPointer(); float *h_x = X.getArrayPointer(); float *h_y = new float[n]; for(int i=0; i<n; i++) h_y[i] = 0.0; float *d_a; float *d_b; hipMalloc((void **)&d_a, n*n*sizeof(float)); hipMalloc((void **)&d_b, n*sizeof(float)); hipMemcpy(d_a, h_a, n*n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, n*sizeof(float), hipMemcpyHostToDevice); for(int k=0;k<n;++k){ dim3 block2(256,1); dim3 grid2((n + block2.x - 1) / block2.x,1); hipLaunchKernelGGL(( kerneljj) , dim3(grid2), dim3(block2) , 0, 0, d_a, d_b, n, k); hipLaunchKernelGGL(( kernel_update_u) , dim3(grid2), dim3(block2) , 0, 0, d_a, n, k); hipLaunchKernelGGL(( kernel_update_l) , dim3(grid2), dim3(block2) , 0, 0, d_a, n, k); } hipMemcpy(h_a, d_a, n*n*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(h_b, d_b, n*sizeof(float), hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); for(int i=0;i<n;++i){ float sum = 0.0; for(int k=0;k<i;++k){ sum += h_a[i*n+k]*h_y[k]; } h_y[i]=(h_b[i]-sum)/h_a[i*n+i]; } for(int i=n-1; i>=0 ;--i){ float sum = 0.0; for(int k=i+1; k<n; ++k){ sum += h_a[i*n+k]*h_x[k]; } h_x[i] = (h_y[i]-sum); } delete [] h_y; return X; }
3873ea23bd4f4f8ff9a351587c75e6bd1dd718ee.cu
/* * GPUSolver.cpp * * Author: Artur Kucia */ #include <cstdio> #include <cstdlib> #include <iostream> #include <cuda.h> #include <cstring> #include <cfloat> #include "GPUSolver.cuh" #include "Matrix.h" #include "Vector.h" GPUSolver::GPUSolver() { } GPUSolver::~GPUSolver() { } __global__ void kernel_jj(float *d_a, float *d_i, int n, int j){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i < n){ if ( d_a[j*n+j] == 0.0) for(int k=j+1; k<n; k++) if ( d_a[k*n+j] != 0.0){ d_a[j*n+i] += d_a[k*n+i]; d_i[j*n+i] += d_i[k*n+i]; break; } } } __global__ void normalize_row(float *d_a, float *d_i, int n, int j){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n){ if (i !=j){ d_i[j*n+i] /= d_a[j*n+j]; d_a[j*n+i] /= d_a[j*n+j]; } else{ d_i[j*n+i] /= d_a[j*n+j]; } } } __global__ void normalize_diagonal(float *d_a, float *d_i, int n, int j){ d_a[j*n+j] /= d_a[j*n+j]; } __global__ void reduce_row_i(float *d_a, float *d_i, int n, int j){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x<n && y< n) if (y != j){ d_i[y*n+x] -= d_i[j*n+x]*d_a[y*n+j]; } } __global__ void reduce_row_a(float *d_a, float *d_i, int n, int j){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x<n && y< n) if (y != j){ if( x != j) d_a[y*n+x] -= d_a[j*n+x]*d_a[y*n+j]; } } Matrix GPUSolver::solveGJ(Matrix A, Vector B) { Matrix A_copy(A); int n = A.getN(); int m = B.getM(); Matrix I(n); float *h_i = I.getArrayPointer(); float *h_a = A.getArrayPointer(); float *d_a; float *d_i; cudaMalloc((void **)&d_a, n*n*sizeof(float)); cudaMalloc((void **)&d_i, n*n*sizeof(float)); cudaMemcpy(d_a, h_a, n*n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_i, h_i, n*n*sizeof(float), cudaMemcpyHostToDevice); for(int i=0; i<n; i++){ dim3 block(256,1); dim3 grid((n + block.x - 1) / block.x, 1); kernel_jj <<<grid, block>>> (d_a, d_i, n, i); dim3 block1(256,1); dim3 grid1((n + block1.x - 1) / block1.x,1); normalize_row <<< grid1,block1 >>> (d_a, d_i, n, i); dim3 block2(16,16); dim3 grid2((n + block2.x - 1) / block2.x, (n + block2.y - 1) / block2.y); reduce_row_i <<< grid2, block2 >>> (d_a, d_i, n, i); reduce_row_a <<< grid2, block2 >>> (d_a, d_i, n, i); } cudaMemcpy(h_a, d_a, n*n*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_i, d_i, n*n*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_i); return I.matMul(B); } __global__ void kernel_update_u(float *d_a, int n, int k){ int i = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0; if(i>k-1 && i<n){ for(int p=0;p<k;++p){ sum += d_a[i*n+p]*d_a[p*n+k]; } d_a[i*n+k] -= sum; } } __global__ void kernel_update_l(float *d_a, int n, int k){ int j = blockIdx.x * blockDim.x + threadIdx.x; float sum=0.0; if(j>k && j<n){ for(int p=0;p<k;++p){ sum+=d_a[k*n+p]*d_a[p*n+j]; } d_a[k*n+j]=(d_a[k*n+j]-sum)/d_a[k*n+k]; } } __global__ void kerneljj(float *d_a, float *d_b, int n, int j){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ if ( d_a[j*n+j] == 0.0){ for(int k=j+1; k<n; k++){ if (d_a[k*n+j] != 0.0){ d_a[j*n+i] += d_a[k*n+i]; if(i==0){ d_b[j] += d_b[k]; } break; } } } } } Matrix GPUSolver::solveLUD(Matrix A, Vector B){ int n = A.getN(); int m = B.getM(); Vector X(n); float *h_a = A.getArrayPointer(); float *h_b = B.getArrayPointer(); float *h_x = X.getArrayPointer(); float *h_y = new float[n]; for(int i=0; i<n; i++) h_y[i] = 0.0; float *d_a; float *d_b; cudaMalloc((void **)&d_a, n*n*sizeof(float)); cudaMalloc((void **)&d_b, n*sizeof(float)); cudaMemcpy(d_a, h_a, n*n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, n*sizeof(float), cudaMemcpyHostToDevice); for(int k=0;k<n;++k){ dim3 block2(256,1); dim3 grid2((n + block2.x - 1) / block2.x,1); kerneljj <<< grid2, block2 >>> (d_a, d_b, n, k); kernel_update_u <<< grid2, block2 >>> (d_a, n, k); kernel_update_l <<< grid2, block2 >>> (d_a, n, k); } cudaMemcpy(h_a, d_a, n*n*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_b, d_b, n*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); for(int i=0;i<n;++i){ float sum = 0.0; for(int k=0;k<i;++k){ sum += h_a[i*n+k]*h_y[k]; } h_y[i]=(h_b[i]-sum)/h_a[i*n+i]; } for(int i=n-1; i>=0 ;--i){ float sum = 0.0; for(int k=i+1; k<n; ++k){ sum += h_a[i*n+k]*h_x[k]; } h_x[i] = (h_y[i]-sum); } delete [] h_y; return X; }
a9c9a1e11424042ac253ce627e3f2e20ab5b6584.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CUDAKernel.h" texture<int4, hipTextureType2D, hipReadModeElementType> queryProfileTexture; #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __device__ int globalCounter = -1; __device__ inline void ONE_CELL_COMP_SCALAR(int& f,int& e,int& h,int& n, const int sub, const unsigned gapoe, const unsigned gape,int& S) { asm("add.sat.s32 %0, %1, %2;" : "=r"(h): "r"(h), "r"(sub)); //h = h + sub; asm("max.s32 %0, %1, %2;" : "=r"(h): "r"(h), "r"(f)); //h = max(h, f); asm("max.s32 %0, %1, %2;" : "=r"(h): "r"(h), "r"(e)); //h = max(h, e); asm("max.s32 %0, %1, %2;" : "=r"(S): "r"(h), "r"(S)); //S = max(h, S); asm("mov.s32 %0, %1;" : "=r"(n): "r"(h)); //n = h; asm("sub.sat.s32 %0, %1, %2;" : "=r"(f): "r"(f), "r"(gape)); //f = f - gape; asm("sub.sat.s32 %0, %1, %2;" : "=r"(e): "r"(e), "r"(gape)); //e = e - gape; asm("sub.sat.s32 %0, %1, %2;" : "=r"(h): "r"(h), "r"(gapoe)); //h = h - gapoe; asm("max.s32 %0, %1, %2;" : "=r"(f): "r"(h), "r"(f)); //f = max(h, f); asm("max.s32 %0, %1, %2;" : "=r"(e): "r"(h), "r"(e)); //e = max(h, e); } __device__ inline void BLOCK_COMP_QP(int4* f, int4* h, int4* n, int& e, int& maxHH, int& bh, const int& sa, const int& sb, const int gapoe, const int gape){ int4 sub; sub = tex2D(queryProfileTexture, sb, sa); ONE_CELL_COMP_SCALAR(f[0].x, e, bh, n[0].x, sub.x, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[0].y, e, h[0].x, n[0].y, sub.y, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[0].w, e, h[0].y, n[0].w, sub.w, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[0].z, e, h[0].w, n[0].z, sub.z, gapoe, gape, maxHH); sub = tex2D(queryProfileTexture, sb + 1, sa); ONE_CELL_COMP_SCALAR(f[1].x, e, h[0].z, n[1].x, sub.x, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[1].y, e, h[1].x, n[1].y, sub.y, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[1].w, e, h[1].y, n[1].w, sub.w, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[1].z, e, h[1].w, n[1].z, sub.z, gapoe, gape, maxHH); sub = tex2D(queryProfileTexture, sb + 2, sa); ONE_CELL_COMP_SCALAR(f[2].x, e, h[1].z, n[2].x, sub.x, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[2].y, e, h[2].x, n[2].y, sub.y, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[2].w, e, h[2].y, n[2].w, sub.w, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[2].z, e, h[2].w, n[2].z, sub.z, gapoe, gape, maxHH); sub = tex2D(queryProfileTexture, sb + 3, sa); ONE_CELL_COMP_SCALAR(f[3].x, e, h[2].z, n[3].x, sub.x, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[3].y, e, h[3].x, n[3].y, sub.y, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[3].w, e, h[3].y, n[3].w, sub.w, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[3].z, e, h[3].w, n[3].z, sub.z, gapoe, gape, maxHH); bh = h[3].z; } #define cudaGapOE 12 #define cudaGapExtend 2 #define QUERY_LENGTH_ALIGNED 16 #define DEPTH 4 __device__ int computeScalarQP(const int qlen, const unsigned* dbseq, const unsigned dblen, short4 *global, size_t pitch ) { //if(dblen >= 3072) // return 0x0; int lane = threadIdx.x % 32; //thread index inside a warp int4 sa; int sb; /* int4 f[3], h[3], n[3], bh, be; */ int4 f[4], h[4], n[4], bh; int4 init4= {0x80000000, 0x80000000, 0x80000000, 0x80000000}; short4 szero4 = { 0, 0, 0, 0 }; unsigned int gapoe = cudaGapOE; //copy into register unsigned int gape = cudaGapExtend; int dblenquad = dblen >> 2; //initialize for (int i = 0; i < dblenquad * 2; i++) { global[i * pitch] = szero4; } int maxHH = 0x80000000; int4 e = init4; for (int i = 0; i < qlen; i += QUERY_LENGTH_ALIGNED) { h[0] = n[0] = f[0] = init4; h[1] = n[1] = f[1] = init4; h[2] = n[2] = f[2] = init4; h[3] = n[3] = f[3] = init4; int iquad = i >> 2; int seqIdx = lane - warpSize; for (int jquad = 0; jquad != dblenquad; jquad++) { unsigned pac4 = dbseq[seqIdx += warpSize]; sa.x = pac4 & 0xFF; pac4 >>= 8; sa.y = pac4 & 0xFF; pac4 >>= 8; sa.w = pac4 & 0xFF; pac4 >>= 8; sa.z = pac4 & 0xFF; sb = iquad; int gIdx = jquad * 2; short4 loadH = global[gIdx]; short4 loadE = global[gIdx + 1]; bh = make_int4(loadH.x | 0x80000000, loadH.y | 0x80000000, loadH.w | 0x80000000, loadH.z | 0x80000000); e = make_int4(loadE.x | 0x80000000, loadE.y | 0x80000000, loadE.w | 0x80000000, loadE.z | 0x80000000); //compute 4 columns in the matrix BLOCK_COMP_QP(f, h, n, e.x, maxHH, bh.x, sa.x, sb, gapoe, gape); BLOCK_COMP_QP(f, n, h, e.y, maxHH, bh.y, sa.y, sb, gapoe, gape); BLOCK_COMP_QP(f, h, n, e.w, maxHH, bh.w, sa.w, sb, gapoe, gape); BLOCK_COMP_QP(f, n, h, e.z, maxHH, bh.z, sa.z, sb, gapoe, gape); short4 saveH, saveE; saveH = make_short4(min(bh.x, 0x8000FFFF) & 0xFFFF, min(bh.y, 0x8000FFFF) & 0xFFFF, min(bh.w, 0x8000FFFF) & 0xFFFF, min(bh.z, 0x8000FFFF) & 0xFFFF); /*max score will never overflow if H is not overflow so we don't check E*/ saveE = make_short4(e.x & 0xFFFF, e.y & 0xFFFF, e.w & 0xFFFF, e.z & 0xFFFF); global[gIdx ] = saveH; global[gIdx + 1] = saveE; } } return maxHH ^ 0x80000000; } __global__ void Compute( unsigned* databaseSequence, unsigned* deviceMap, unsigned batchNum, unsigned queryLen, int *result, int4* globalArray, size_t globalPitch ) { int lane = threadIdx.x % warpSize; int thread_id = blockIdx.x * blockDim.x + threadIdx.x; int warp_id = thread_id >> 5; int curWarpIdx = warp_id; short4 global[3072]; /*atomic add should be deprecated*/ if (lane == 0) { atomicAdd(&globalCounter, 1); } while (curWarpIdx < batchNum) { unsigned pos = deviceMap[curWarpIdx * 2]; unsigned dblen = deviceMap[curWarpIdx * 2 + 1]; unsigned *dbseq = databaseSequence + pos; int score = 0; //score = computeScalarQP(queryLen, dbseq, dblen, globalArray + thread_id, globalPitch); score = computeScalarQP(queryLen, dbseq, dblen, global, 1); result[curWarpIdx * warpSize + lane] = score; //result[curWarpIdx * warpSize + lane] = score - 0x80000000; //update shared index in a warp if (lane == 0) { curWarpIdx = atomicAdd(&globalCounter, 1); } //update local index of each thread curWarpIdx = __shfl(curWarpIdx, 0); } globalCounter = -1; //reset global variables for next call } void kernelLaunch( int numBlocks, int numThreads, hipStream_t& stream, unsigned* deviceBuffer, unsigned* deviceMap, unsigned batchNum, unsigned queryLen, int* devResult, int4* globalArray, size_t globalPitch ) { hipLaunchKernelGGL(( Compute), dim3(numBlocks), dim3(numThreads), 0, stream, deviceBuffer, deviceMap, batchNum, queryLen, devResult, globalArray, globalPitch ); } void bindQueryPrf(hipArray_t cu_array){ queryProfileTexture.addressMode[0] = hipAddressModeClamp; queryProfileTexture.addressMode[1] = hipAddressModeClamp; queryProfileTexture.filterMode = hipFilterModePoint; queryProfileTexture.normalized = false; hipBindTextureToArray(queryProfileTexture, cu_array, queryProfileTexture.channelDesc); }
a9c9a1e11424042ac253ce627e3f2e20ab5b6584.cu
#include "CUDAKernel.h" texture<int4, cudaTextureType2D, cudaReadModeElementType> queryProfileTexture; #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __device__ int globalCounter = -1; __device__ inline void ONE_CELL_COMP_SCALAR(int& f,int& e,int& h,int& n, const int sub, const unsigned gapoe, const unsigned gape,int& S) { asm("add.sat.s32 %0, %1, %2;" : "=r"(h): "r"(h), "r"(sub)); //h = h + sub; asm("max.s32 %0, %1, %2;" : "=r"(h): "r"(h), "r"(f)); //h = max(h, f); asm("max.s32 %0, %1, %2;" : "=r"(h): "r"(h), "r"(e)); //h = max(h, e); asm("max.s32 %0, %1, %2;" : "=r"(S): "r"(h), "r"(S)); //S = max(h, S); asm("mov.s32 %0, %1;" : "=r"(n): "r"(h)); //n = h; asm("sub.sat.s32 %0, %1, %2;" : "=r"(f): "r"(f), "r"(gape)); //f = f - gape; asm("sub.sat.s32 %0, %1, %2;" : "=r"(e): "r"(e), "r"(gape)); //e = e - gape; asm("sub.sat.s32 %0, %1, %2;" : "=r"(h): "r"(h), "r"(gapoe)); //h = h - gapoe; asm("max.s32 %0, %1, %2;" : "=r"(f): "r"(h), "r"(f)); //f = max(h, f); asm("max.s32 %0, %1, %2;" : "=r"(e): "r"(h), "r"(e)); //e = max(h, e); } __device__ inline void BLOCK_COMP_QP(int4* f, int4* h, int4* n, int& e, int& maxHH, int& bh, const int& sa, const int& sb, const int gapoe, const int gape){ int4 sub; sub = tex2D(queryProfileTexture, sb, sa); ONE_CELL_COMP_SCALAR(f[0].x, e, bh, n[0].x, sub.x, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[0].y, e, h[0].x, n[0].y, sub.y, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[0].w, e, h[0].y, n[0].w, sub.w, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[0].z, e, h[0].w, n[0].z, sub.z, gapoe, gape, maxHH); sub = tex2D(queryProfileTexture, sb + 1, sa); ONE_CELL_COMP_SCALAR(f[1].x, e, h[0].z, n[1].x, sub.x, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[1].y, e, h[1].x, n[1].y, sub.y, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[1].w, e, h[1].y, n[1].w, sub.w, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[1].z, e, h[1].w, n[1].z, sub.z, gapoe, gape, maxHH); sub = tex2D(queryProfileTexture, sb + 2, sa); ONE_CELL_COMP_SCALAR(f[2].x, e, h[1].z, n[2].x, sub.x, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[2].y, e, h[2].x, n[2].y, sub.y, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[2].w, e, h[2].y, n[2].w, sub.w, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[2].z, e, h[2].w, n[2].z, sub.z, gapoe, gape, maxHH); sub = tex2D(queryProfileTexture, sb + 3, sa); ONE_CELL_COMP_SCALAR(f[3].x, e, h[2].z, n[3].x, sub.x, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[3].y, e, h[3].x, n[3].y, sub.y, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[3].w, e, h[3].y, n[3].w, sub.w, gapoe, gape, maxHH); ONE_CELL_COMP_SCALAR(f[3].z, e, h[3].w, n[3].z, sub.z, gapoe, gape, maxHH); bh = h[3].z; } #define cudaGapOE 12 #define cudaGapExtend 2 #define QUERY_LENGTH_ALIGNED 16 #define DEPTH 4 __device__ int computeScalarQP(const int qlen, const unsigned* dbseq, const unsigned dblen, short4 *global, size_t pitch ) { //if(dblen >= 3072) // return 0x0; int lane = threadIdx.x % 32; //thread index inside a warp int4 sa; int sb; /* int4 f[3], h[3], n[3], bh, be; */ int4 f[4], h[4], n[4], bh; int4 init4= {0x80000000, 0x80000000, 0x80000000, 0x80000000}; short4 szero4 = { 0, 0, 0, 0 }; unsigned int gapoe = cudaGapOE; //copy into register unsigned int gape = cudaGapExtend; int dblenquad = dblen >> 2; //initialize for (int i = 0; i < dblenquad * 2; i++) { global[i * pitch] = szero4; } int maxHH = 0x80000000; int4 e = init4; for (int i = 0; i < qlen; i += QUERY_LENGTH_ALIGNED) { h[0] = n[0] = f[0] = init4; h[1] = n[1] = f[1] = init4; h[2] = n[2] = f[2] = init4; h[3] = n[3] = f[3] = init4; int iquad = i >> 2; int seqIdx = lane - warpSize; for (int jquad = 0; jquad != dblenquad; jquad++) { unsigned pac4 = dbseq[seqIdx += warpSize]; sa.x = pac4 & 0xFF; pac4 >>= 8; sa.y = pac4 & 0xFF; pac4 >>= 8; sa.w = pac4 & 0xFF; pac4 >>= 8; sa.z = pac4 & 0xFF; sb = iquad; int gIdx = jquad * 2; short4 loadH = global[gIdx]; short4 loadE = global[gIdx + 1]; bh = make_int4(loadH.x | 0x80000000, loadH.y | 0x80000000, loadH.w | 0x80000000, loadH.z | 0x80000000); e = make_int4(loadE.x | 0x80000000, loadE.y | 0x80000000, loadE.w | 0x80000000, loadE.z | 0x80000000); //compute 4 columns in the matrix BLOCK_COMP_QP(f, h, n, e.x, maxHH, bh.x, sa.x, sb, gapoe, gape); BLOCK_COMP_QP(f, n, h, e.y, maxHH, bh.y, sa.y, sb, gapoe, gape); BLOCK_COMP_QP(f, h, n, e.w, maxHH, bh.w, sa.w, sb, gapoe, gape); BLOCK_COMP_QP(f, n, h, e.z, maxHH, bh.z, sa.z, sb, gapoe, gape); short4 saveH, saveE; saveH = make_short4(min(bh.x, 0x8000FFFF) & 0xFFFF, min(bh.y, 0x8000FFFF) & 0xFFFF, min(bh.w, 0x8000FFFF) & 0xFFFF, min(bh.z, 0x8000FFFF) & 0xFFFF); /*max score will never overflow if H is not overflow so we don't check E*/ saveE = make_short4(e.x & 0xFFFF, e.y & 0xFFFF, e.w & 0xFFFF, e.z & 0xFFFF); global[gIdx ] = saveH; global[gIdx + 1] = saveE; } } return maxHH ^ 0x80000000; } __global__ void Compute( unsigned* databaseSequence, unsigned* deviceMap, unsigned batchNum, unsigned queryLen, int *result, int4* globalArray, size_t globalPitch ) { int lane = threadIdx.x % warpSize; int thread_id = blockIdx.x * blockDim.x + threadIdx.x; int warp_id = thread_id >> 5; int curWarpIdx = warp_id; short4 global[3072]; /*atomic add should be deprecated*/ if (lane == 0) { atomicAdd(&globalCounter, 1); } while (curWarpIdx < batchNum) { unsigned pos = deviceMap[curWarpIdx * 2]; unsigned dblen = deviceMap[curWarpIdx * 2 + 1]; unsigned *dbseq = databaseSequence + pos; int score = 0; //score = computeScalarQP(queryLen, dbseq, dblen, globalArray + thread_id, globalPitch); score = computeScalarQP(queryLen, dbseq, dblen, global, 1); result[curWarpIdx * warpSize + lane] = score; //result[curWarpIdx * warpSize + lane] = score - 0x80000000; //update shared index in a warp if (lane == 0) { curWarpIdx = atomicAdd(&globalCounter, 1); } //update local index of each thread curWarpIdx = __shfl(curWarpIdx, 0); } globalCounter = -1; //reset global variables for next call } void kernelLaunch( int numBlocks, int numThreads, cudaStream_t& stream, unsigned* deviceBuffer, unsigned* deviceMap, unsigned batchNum, unsigned queryLen, int* devResult, int4* globalArray, size_t globalPitch ) { Compute<<<numBlocks, numThreads, 0, stream>>>( deviceBuffer, deviceMap, batchNum, queryLen, devResult, globalArray, globalPitch ); } void bindQueryPrf(cudaArray_t cu_array){ queryProfileTexture.addressMode[0] = cudaAddressModeClamp; queryProfileTexture.addressMode[1] = cudaAddressModeClamp; queryProfileTexture.filterMode = cudaFilterModePoint; queryProfileTexture.normalized = false; cudaBindTextureToArray(queryProfileTexture, cu_array, queryProfileTexture.channelDesc); }
8d706ee0693c3f09aaabad2bf98d51df798096fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <thrust/device_ptr.h> #include <thrust/transform.h> #include <vector> #include "common.h" #include "inplace_abn.h" // Checks #ifndef AT_CHECK #define AT_CHECK AT_ASSERT #endif #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) // Utilities void get_dims(at::Tensor x, int64_t& num, int64_t& chn, int64_t& sp) { num = x.size(0); chn = x.size(1); sp = 1; for (int64_t i = 2; i < x.ndimension(); ++i) sp *= x.size(i); } // Operations for reduce template<typename T> struct SumOp { __device__ SumOp(const T *t, int c, int s) : tensor(t), chn(c), sp(s) {} __device__ __forceinline__ T operator()(int batch, int plane, int n) { return tensor[(batch * chn + plane) * sp + n]; } const T *tensor; const int chn; const int sp; }; template<typename T> struct VarOp { __device__ VarOp(T m, const T *t, int c, int s) : mean(m), tensor(t), chn(c), sp(s) {} __device__ __forceinline__ T operator()(int batch, int plane, int n) { T val = tensor[(batch * chn + plane) * sp + n]; return (val - mean) * (val - mean); } const T mean; const T *tensor; const int chn; const int sp; }; template<typename T> struct GradOp { __device__ GradOp(T _weight, T _bias, const T *_z, const T *_dz, int c, int s) : weight(_weight), bias(_bias), z(_z), dz(_dz), chn(c), sp(s) {} __device__ __forceinline__ Pair<T> operator()(int batch, int plane, int n) { T _y = (z[(batch * chn + plane) * sp + n] - bias) / weight; T _dz = dz[(batch * chn + plane) * sp + n]; return Pair<T>(_dz, _y * _dz); } const T weight; const T bias; const T *z; const T *dz; const int chn; const int sp; }; /*********** * mean_var ***********/ template<typename T> __global__ void mean_var_kernel(const T *x, T *mean, T *var, int num, int chn, int sp) { int plane = blockIdx.x; T norm = T(1) / T(num * sp); T _mean = reduce<T, SumOp<T>>(SumOp<T>(x, chn, sp), plane, num, chn, sp) * norm; __syncthreads(); T _var = reduce<T, VarOp<T>>(VarOp<T>(_mean, x, chn, sp), plane, num, chn, sp) * norm; if (threadIdx.x == 0) { mean[plane] = _mean; var[plane] = _var; } } std::vector<at::Tensor> mean_var_cuda(at::Tensor x) { CHECK_INPUT(x); // Extract dimensions int64_t num, chn, sp; get_dims(x, num, chn, sp); // Prepare output tensors auto mean = at::empty({chn}, x.type()); auto var = at::empty({chn}, x.type()); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); AT_DISPATCH_FLOATING_TYPES(x.type(), "mean_var_cuda", ([&] { hipLaunchKernelGGL(( mean_var_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, x.data<scalar_t>(), mean.data<scalar_t>(), var.data<scalar_t>(), num, chn, sp); })); return {mean, var}; } /********** * forward **********/ template<typename T> __global__ void forward_kernel(T *x, const T *mean, const T *var, const T *weight, const T *bias, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; T _mean = mean[plane]; T _var = var[plane]; T _weight = affine ? abs(weight[plane]) + eps : T(1); T _bias = affine ? bias[plane] : T(0); T mul = rsqrt(_var + eps) * _weight; for (int batch = 0; batch < num; ++batch) { for (int n = threadIdx.x; n < sp; n += blockDim.x) { T _x = x[(batch * chn + plane) * sp + n]; T _y = (_x - _mean) * mul + _bias; x[(batch * chn + plane) * sp + n] = _y; } } } at::Tensor forward_cuda(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, bool affine, float eps) { CHECK_INPUT(x); CHECK_INPUT(mean); CHECK_INPUT(var); CHECK_INPUT(weight); CHECK_INPUT(bias); // Extract dimensions int64_t num, chn, sp; get_dims(x, num, chn, sp); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); AT_DISPATCH_FLOATING_TYPES(x.type(), "forward_cuda", ([&] { hipLaunchKernelGGL(( forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, x.data<scalar_t>(), mean.data<scalar_t>(), var.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), affine, eps, num, chn, sp); })); return x; } /*********** * edz_eydz ***********/ template<typename T> __global__ void edz_eydz_kernel(const T *z, const T *dz, const T *weight, const T *bias, T *edz, T *eydz, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; T _weight = affine ? abs(weight[plane]) + eps : 1.f; T _bias = affine ? bias[plane] : 0.f; Pair<T> res = reduce<Pair<T>, GradOp<T>>(GradOp<T>(_weight, _bias, z, dz, chn, sp), plane, num, chn, sp); __syncthreads(); if (threadIdx.x == 0) { edz[plane] = res.v1; eydz[plane] = res.v2; } } std::vector<at::Tensor> edz_eydz_cuda(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, bool affine, float eps) { CHECK_INPUT(z); CHECK_INPUT(dz); CHECK_INPUT(weight); CHECK_INPUT(bias); // Extract dimensions int64_t num, chn, sp; get_dims(z, num, chn, sp); auto edz = at::empty({chn}, z.type()); auto eydz = at::empty({chn}, z.type()); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); AT_DISPATCH_FLOATING_TYPES(z.type(), "edz_eydz_cuda", ([&] { hipLaunchKernelGGL(( edz_eydz_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, z.data<scalar_t>(), dz.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), edz.data<scalar_t>(), eydz.data<scalar_t>(), affine, eps, num, chn, sp); })); return {edz, eydz}; } /*********** * backward ***********/ template<typename T> __global__ void backward_kernel(const T *z, const T *dz, const T *var, const T *weight, const T *bias, const T *edz, const T *eydz, T *dx, T *dweight, T *dbias, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; T _weight = affine ? abs(weight[plane]) + eps : 1.f; T _bias = affine ? bias[plane] : 0.f; T _var = var[plane]; T _edz = edz[plane]; T _eydz = eydz[plane]; T _mul = _weight * rsqrt(_var + eps); T count = T(num * sp); for (int batch = 0; batch < num; ++batch) { for (int n = threadIdx.x; n < sp; n += blockDim.x) { T _dz = dz[(batch * chn + plane) * sp + n]; T _y = (z[(batch * chn + plane) * sp + n] - _bias) / _weight; dx[(batch * chn + plane) * sp + n] = (_dz - _edz / count - _y * _eydz / count) * _mul; } } if (threadIdx.x == 0) { if (affine) { dweight[plane] = weight[plane] > 0 ? _eydz : -_eydz; dbias[plane] = _edz; } } } std::vector<at::Tensor> backward_cuda(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, at::Tensor edz, at::Tensor eydz, bool affine, float eps) { CHECK_INPUT(z); CHECK_INPUT(dz); CHECK_INPUT(var); CHECK_INPUT(weight); CHECK_INPUT(bias); CHECK_INPUT(edz); CHECK_INPUT(eydz); // Extract dimensions int64_t num, chn, sp; get_dims(z, num, chn, sp); auto dx = at::zeros_like(z); auto dweight = at::zeros_like(weight); auto dbias = at::zeros_like(bias); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); AT_DISPATCH_FLOATING_TYPES(z.type(), "backward_cuda", ([&] { hipLaunchKernelGGL(( backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, z.data<scalar_t>(), dz.data<scalar_t>(), var.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), edz.data<scalar_t>(), eydz.data<scalar_t>(), dx.data<scalar_t>(), dweight.data<scalar_t>(), dbias.data<scalar_t>(), affine, eps, num, chn, sp); })); return {dx, dweight, dbias}; } /************** * activations **************/ template<typename T> inline void leaky_relu_backward_impl(T *z, T *dz, float slope, int64_t count) { // Create thrust pointers thrust::device_ptr<T> th_z = thrust::device_pointer_cast(z); thrust::device_ptr<T> th_dz = thrust::device_pointer_cast(dz); thrust::transform_if(th_dz, th_dz + count, th_z, th_dz, [slope] __device__ (const T& dz) { return dz * slope; }, [] __device__ (const T& z) { return z < 0; }); thrust::transform_if(th_z, th_z + count, th_z, [slope] __device__ (const T& z) { return z / slope; }, [] __device__ (const T& z) { return z < 0; }); } void leaky_relu_backward_cuda(at::Tensor z, at::Tensor dz, float slope) { CHECK_INPUT(z); CHECK_INPUT(dz); int64_t count = z.numel(); AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cuda", ([&] { leaky_relu_backward_impl<scalar_t>(z.data<scalar_t>(), dz.data<scalar_t>(), slope, count); })); } template<typename T> inline void elu_backward_impl(T *z, T *dz, int64_t count) { // Create thrust pointers thrust::device_ptr<T> th_z = thrust::device_pointer_cast(z); thrust::device_ptr<T> th_dz = thrust::device_pointer_cast(dz); thrust::transform_if(th_dz, th_dz + count, th_z, th_z, th_dz, [] __device__ (const T& dz, const T& z) { return dz * (z + 1.); }, [] __device__ (const T& z) { return z < 0; }); thrust::transform_if(th_z, th_z + count, th_z, [] __device__ (const T& z) { return log1p(z); }, [] __device__ (const T& z) { return z < 0; }); } void elu_backward_cuda(at::Tensor z, at::Tensor dz) { CHECK_INPUT(z); CHECK_INPUT(dz); int64_t count = z.numel(); AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cuda", ([&] { elu_backward_impl<scalar_t>(z.data<scalar_t>(), dz.data<scalar_t>(), count); })); }
8d706ee0693c3f09aaabad2bf98d51df798096fb.cu
#include <ATen/ATen.h> #include <thrust/device_ptr.h> #include <thrust/transform.h> #include <vector> #include "common.h" #include "inplace_abn.h" // Checks #ifndef AT_CHECK #define AT_CHECK AT_ASSERT #endif #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) // Utilities void get_dims(at::Tensor x, int64_t& num, int64_t& chn, int64_t& sp) { num = x.size(0); chn = x.size(1); sp = 1; for (int64_t i = 2; i < x.ndimension(); ++i) sp *= x.size(i); } // Operations for reduce template<typename T> struct SumOp { __device__ SumOp(const T *t, int c, int s) : tensor(t), chn(c), sp(s) {} __device__ __forceinline__ T operator()(int batch, int plane, int n) { return tensor[(batch * chn + plane) * sp + n]; } const T *tensor; const int chn; const int sp; }; template<typename T> struct VarOp { __device__ VarOp(T m, const T *t, int c, int s) : mean(m), tensor(t), chn(c), sp(s) {} __device__ __forceinline__ T operator()(int batch, int plane, int n) { T val = tensor[(batch * chn + plane) * sp + n]; return (val - mean) * (val - mean); } const T mean; const T *tensor; const int chn; const int sp; }; template<typename T> struct GradOp { __device__ GradOp(T _weight, T _bias, const T *_z, const T *_dz, int c, int s) : weight(_weight), bias(_bias), z(_z), dz(_dz), chn(c), sp(s) {} __device__ __forceinline__ Pair<T> operator()(int batch, int plane, int n) { T _y = (z[(batch * chn + plane) * sp + n] - bias) / weight; T _dz = dz[(batch * chn + plane) * sp + n]; return Pair<T>(_dz, _y * _dz); } const T weight; const T bias; const T *z; const T *dz; const int chn; const int sp; }; /*********** * mean_var ***********/ template<typename T> __global__ void mean_var_kernel(const T *x, T *mean, T *var, int num, int chn, int sp) { int plane = blockIdx.x; T norm = T(1) / T(num * sp); T _mean = reduce<T, SumOp<T>>(SumOp<T>(x, chn, sp), plane, num, chn, sp) * norm; __syncthreads(); T _var = reduce<T, VarOp<T>>(VarOp<T>(_mean, x, chn, sp), plane, num, chn, sp) * norm; if (threadIdx.x == 0) { mean[plane] = _mean; var[plane] = _var; } } std::vector<at::Tensor> mean_var_cuda(at::Tensor x) { CHECK_INPUT(x); // Extract dimensions int64_t num, chn, sp; get_dims(x, num, chn, sp); // Prepare output tensors auto mean = at::empty({chn}, x.type()); auto var = at::empty({chn}, x.type()); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); AT_DISPATCH_FLOATING_TYPES(x.type(), "mean_var_cuda", ([&] { mean_var_kernel<scalar_t><<<blocks, threads>>>( x.data<scalar_t>(), mean.data<scalar_t>(), var.data<scalar_t>(), num, chn, sp); })); return {mean, var}; } /********** * forward **********/ template<typename T> __global__ void forward_kernel(T *x, const T *mean, const T *var, const T *weight, const T *bias, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; T _mean = mean[plane]; T _var = var[plane]; T _weight = affine ? abs(weight[plane]) + eps : T(1); T _bias = affine ? bias[plane] : T(0); T mul = rsqrt(_var + eps) * _weight; for (int batch = 0; batch < num; ++batch) { for (int n = threadIdx.x; n < sp; n += blockDim.x) { T _x = x[(batch * chn + plane) * sp + n]; T _y = (_x - _mean) * mul + _bias; x[(batch * chn + plane) * sp + n] = _y; } } } at::Tensor forward_cuda(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, bool affine, float eps) { CHECK_INPUT(x); CHECK_INPUT(mean); CHECK_INPUT(var); CHECK_INPUT(weight); CHECK_INPUT(bias); // Extract dimensions int64_t num, chn, sp; get_dims(x, num, chn, sp); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); AT_DISPATCH_FLOATING_TYPES(x.type(), "forward_cuda", ([&] { forward_kernel<scalar_t><<<blocks, threads>>>( x.data<scalar_t>(), mean.data<scalar_t>(), var.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), affine, eps, num, chn, sp); })); return x; } /*********** * edz_eydz ***********/ template<typename T> __global__ void edz_eydz_kernel(const T *z, const T *dz, const T *weight, const T *bias, T *edz, T *eydz, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; T _weight = affine ? abs(weight[plane]) + eps : 1.f; T _bias = affine ? bias[plane] : 0.f; Pair<T> res = reduce<Pair<T>, GradOp<T>>(GradOp<T>(_weight, _bias, z, dz, chn, sp), plane, num, chn, sp); __syncthreads(); if (threadIdx.x == 0) { edz[plane] = res.v1; eydz[plane] = res.v2; } } std::vector<at::Tensor> edz_eydz_cuda(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, bool affine, float eps) { CHECK_INPUT(z); CHECK_INPUT(dz); CHECK_INPUT(weight); CHECK_INPUT(bias); // Extract dimensions int64_t num, chn, sp; get_dims(z, num, chn, sp); auto edz = at::empty({chn}, z.type()); auto eydz = at::empty({chn}, z.type()); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); AT_DISPATCH_FLOATING_TYPES(z.type(), "edz_eydz_cuda", ([&] { edz_eydz_kernel<scalar_t><<<blocks, threads>>>( z.data<scalar_t>(), dz.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), edz.data<scalar_t>(), eydz.data<scalar_t>(), affine, eps, num, chn, sp); })); return {edz, eydz}; } /*********** * backward ***********/ template<typename T> __global__ void backward_kernel(const T *z, const T *dz, const T *var, const T *weight, const T *bias, const T *edz, const T *eydz, T *dx, T *dweight, T *dbias, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; T _weight = affine ? abs(weight[plane]) + eps : 1.f; T _bias = affine ? bias[plane] : 0.f; T _var = var[plane]; T _edz = edz[plane]; T _eydz = eydz[plane]; T _mul = _weight * rsqrt(_var + eps); T count = T(num * sp); for (int batch = 0; batch < num; ++batch) { for (int n = threadIdx.x; n < sp; n += blockDim.x) { T _dz = dz[(batch * chn + plane) * sp + n]; T _y = (z[(batch * chn + plane) * sp + n] - _bias) / _weight; dx[(batch * chn + plane) * sp + n] = (_dz - _edz / count - _y * _eydz / count) * _mul; } } if (threadIdx.x == 0) { if (affine) { dweight[plane] = weight[plane] > 0 ? _eydz : -_eydz; dbias[plane] = _edz; } } } std::vector<at::Tensor> backward_cuda(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, at::Tensor edz, at::Tensor eydz, bool affine, float eps) { CHECK_INPUT(z); CHECK_INPUT(dz); CHECK_INPUT(var); CHECK_INPUT(weight); CHECK_INPUT(bias); CHECK_INPUT(edz); CHECK_INPUT(eydz); // Extract dimensions int64_t num, chn, sp; get_dims(z, num, chn, sp); auto dx = at::zeros_like(z); auto dweight = at::zeros_like(weight); auto dbias = at::zeros_like(bias); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); AT_DISPATCH_FLOATING_TYPES(z.type(), "backward_cuda", ([&] { backward_kernel<scalar_t><<<blocks, threads>>>( z.data<scalar_t>(), dz.data<scalar_t>(), var.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), edz.data<scalar_t>(), eydz.data<scalar_t>(), dx.data<scalar_t>(), dweight.data<scalar_t>(), dbias.data<scalar_t>(), affine, eps, num, chn, sp); })); return {dx, dweight, dbias}; } /************** * activations **************/ template<typename T> inline void leaky_relu_backward_impl(T *z, T *dz, float slope, int64_t count) { // Create thrust pointers thrust::device_ptr<T> th_z = thrust::device_pointer_cast(z); thrust::device_ptr<T> th_dz = thrust::device_pointer_cast(dz); thrust::transform_if(th_dz, th_dz + count, th_z, th_dz, [slope] __device__ (const T& dz) { return dz * slope; }, [] __device__ (const T& z) { return z < 0; }); thrust::transform_if(th_z, th_z + count, th_z, [slope] __device__ (const T& z) { return z / slope; }, [] __device__ (const T& z) { return z < 0; }); } void leaky_relu_backward_cuda(at::Tensor z, at::Tensor dz, float slope) { CHECK_INPUT(z); CHECK_INPUT(dz); int64_t count = z.numel(); AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cuda", ([&] { leaky_relu_backward_impl<scalar_t>(z.data<scalar_t>(), dz.data<scalar_t>(), slope, count); })); } template<typename T> inline void elu_backward_impl(T *z, T *dz, int64_t count) { // Create thrust pointers thrust::device_ptr<T> th_z = thrust::device_pointer_cast(z); thrust::device_ptr<T> th_dz = thrust::device_pointer_cast(dz); thrust::transform_if(th_dz, th_dz + count, th_z, th_z, th_dz, [] __device__ (const T& dz, const T& z) { return dz * (z + 1.); }, [] __device__ (const T& z) { return z < 0; }); thrust::transform_if(th_z, th_z + count, th_z, [] __device__ (const T& z) { return log1p(z); }, [] __device__ (const T& z) { return z < 0; }); } void elu_backward_cuda(at::Tensor z, at::Tensor dz) { CHECK_INPUT(z); CHECK_INPUT(dz); int64_t count = z.numel(); AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cuda", ([&] { elu_backward_impl<scalar_t>(z.data<scalar_t>(), dz.data<scalar_t>(), count); })); }
bcfa8d028993bb83a6bd671902ce538efacd6a6f.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <stdio.h> #include <hip/hip_runtime.h> /* * An example of using CUDA callbacks to trigger work on the host after the * completion of asynchronous work on the device. In this example, n_streams * CUDA streams are created and 4 kernels are launched asynchronously in each. * Then, a callback is added at the completion of those asynchronous kernels * that prints diagnostic information. */ #define N 100000 #define NSTREAM 4 void /*CUDART_CB*/ my_callback(hipStream_t stream, hipError_t status, void *data) { printf("callback from stream %d\n", *((int *)data)); } __global__ void kernel_1() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_2() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_3() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_4() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } //CUDA // int main(int argc, char **argv) { // int n_streams = NSTREAM; if (argc > 1) n_streams = atoi(argv[1]); //0 int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("> %s Starting...\n", argv[0]); printf("> Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // check if device support hyper-q if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5)) { if (deviceProp.concurrentKernels == 0) { printf("> GPU does not support concurrent kernel execution (SM 3.5 " "or higher required)\n"); printf("> CUDA kernel runs will be serialized\n"); } else { printf("> GPU does not support HyperQ\n"); printf("> CUDA kernel runs will have limited concurrency\n"); } } printf("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // // set up max connectioin char * iname = "CUDA_DEVICE_MAX_CONNECTIONS"; setenv (iname, "8", 1); char *ivalue = getenv (iname); printf ("> %s = %s\n", iname, ivalue); printf ("> with streams = %d\n", n_streams); // Allocate and initialize an array of stream handles hipStream_t *streams = (hipStream_t *) malloc(n_streams * sizeof( hipStream_t)); for (int i = 0 ; i < n_streams ; i++) { CHECK(hipStreamCreate(&(streams[i]))); } dim3 block (1); dim3 grid (1); hipEvent_t start_event, stop_event; CHECK(hipEventCreate(&start_event)); CHECK(hipEventCreate(&stop_event)); int stream_ids[n_streams]; CHECK(hipEventRecord(start_event, 0)); for (int i = 0; i < n_streams; i++) { stream_ids[i] = i; hipLaunchKernelGGL(( kernel_1), dim3(grid), dim3(block), 0, streams[i], ); hipLaunchKernelGGL(( kernel_2), dim3(grid), dim3(block), 0, streams[i], ); hipLaunchKernelGGL(( kernel_3), dim3(grid), dim3(block), 0, streams[i], ); hipLaunchKernelGGL(( kernel_4), dim3(grid), dim3(block), 0, streams[i], ); // CHECK(hipStreamAddCallback(streams[i], my_callback, (void *)(stream_ids + i), 0)); } CHECK(hipEventRecord(stop_event, 0)); CHECK(hipEventSynchronize(stop_event)); float elapsed_time; CHECK(hipEventElapsedTime(&elapsed_time, start_event, stop_event)); printf("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f); // release all stream for (int i = 0 ; i < n_streams ; i++) { CHECK(hipStreamDestroy(streams[i])); } free(streams); /* * hipDeviceReset must be called before exiting in order for profiling and * tracing tools such as Nsight and Visual Profiler to show complete traces. */ CHECK(hipDeviceReset()); return 0; }
bcfa8d028993bb83a6bd671902ce538efacd6a6f.cu
#include "../common/common.h" #include <stdio.h> #include <cuda_runtime.h> /* * An example of using CUDA callbacks to trigger work on the host after the * completion of asynchronous work on the device. In this example, n_streams * CUDA streams are created and 4 kernels are launched asynchronously in each. * Then, a callback is added at the completion of those asynchronous kernels * that prints diagnostic information. */ #define N 100000 #define NSTREAM 4 void /*CUDART_CB*/ my_callback(cudaStream_t stream, cudaError_t status, void *data) { printf("callback from stream %d\n", *((int *)data)); } __global__ void kernel_1() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_2() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_3() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_4() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } //感觉今天学到的CUDA知识差不多了,把这个例子看完。 //主要我还要结合实践来学,光看书就容易烦。 int main(int argc, char **argv) { //定义一下流的数量 int n_streams = NSTREAM; if (argc > 1) n_streams = atoi(argv[1]); //获得0设备的信息 int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("> %s Starting...\n", argv[0]); printf("> Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // check if device support hyper-q if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5)) { if (deviceProp.concurrentKernels == 0) { printf("> GPU does not support concurrent kernel execution (SM 3.5 " "or higher required)\n"); printf("> CUDA kernel runs will be serialized\n"); } else { printf("> GPU does not support HyperQ\n"); printf("> CUDA kernel runs will have limited concurrency\n"); } } printf("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); //配置一下硬件队列的数量 // set up max connectioin char * iname = "CUDA_DEVICE_MAX_CONNECTIONS"; setenv (iname, "8", 1); char *ivalue = getenv (iname); printf ("> %s = %s\n", iname, ivalue); printf ("> with streams = %d\n", n_streams); // Allocate and initialize an array of stream handles cudaStream_t *streams = (cudaStream_t *) malloc(n_streams * sizeof( cudaStream_t)); for (int i = 0 ; i < n_streams ; i++) { CHECK(cudaStreamCreate(&(streams[i]))); } dim3 block (1); dim3 grid (1); cudaEvent_t start_event, stop_event; CHECK(cudaEventCreate(&start_event)); CHECK(cudaEventCreate(&stop_event)); int stream_ids[n_streams]; CHECK(cudaEventRecord(start_event, 0)); for (int i = 0; i < n_streams; i++) { stream_ids[i] = i; kernel_1<<<grid, block, 0, streams[i]>>>(); kernel_2<<<grid, block, 0, streams[i]>>>(); kernel_3<<<grid, block, 0, streams[i]>>>(); kernel_4<<<grid, block, 0, streams[i]>>>(); //这个有点神奇哦,这个回调函数的执行时机是什么时候? CHECK(cudaStreamAddCallback(streams[i], my_callback, (void *)(stream_ids + i), 0)); } CHECK(cudaEventRecord(stop_event, 0)); CHECK(cudaEventSynchronize(stop_event)); float elapsed_time; CHECK(cudaEventElapsedTime(&elapsed_time, start_event, stop_event)); printf("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f); // release all stream for (int i = 0 ; i < n_streams ; i++) { CHECK(cudaStreamDestroy(streams[i])); } free(streams); /* * cudaDeviceReset must be called before exiting in order for profiling and * tracing tools such as Nsight and Visual Profiler to show complete traces. */ CHECK(cudaDeviceReset()); return 0; }
acf09a12d3f95d088021520fa32b2493b4e9e45b.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <time.h> #include <string> #include <vector> #include <sstream> #include <hip/hip_runtime.h> #include <math.h> #include <fstream> // Libreria para leer archivos #include <typeinfo> // for 'typeid' to work #include <tuple> using namespace std; // tuple (elem , posElem) vector<tuple<int , int>> getRemainingMultiples(int* Hit_State,int N){ int i,j; int elem; int posElem; vector<tuple<int, int>> M; tuple<int, int> tup; /* 1 -> not multiple 2 -> multiple per row 3 -> multiple per column 4 -> multiple per row and column 5 -> not paintable 6 -> paintable // Eliminado */ for(i = 0; i < N; i++ ){ for(j = 0; j < N; j++){ posElem = i + j*N; elem = Hit_State[posElem]; tup = make_tuple(elem,posElem); switch(elem) { case 2: M.push_back(tup); break; case 3: M.push_back(tup); break; case 4: M.push_back(tup); break; default: break; } } } return M; } int main( ){ tuple<int, int> tup1, tup2; tup1 = make_tuple(1,3); tup2 = make_tuple(1,5); vector<tuple<int, int>> M; M.push_back(tup1); M.push_back(tup2); /* for( int i = 0; i < M.size() ; i++){ cout << "tuple["<< i <<"] = (" << get<0>(M[i]) <<" ," << get<1>(M[i]) << ") " << endl; } */ int N = 10; string* Hitori = new string[N*N]; Hitori[0] = "dsadadasdas"; cout << "EL VALOR DE GIROTIRO : "<< Hitori[0] << endl; return 0; }
acf09a12d3f95d088021520fa32b2493b4e9e45b.cu
#include <iostream> #include <time.h> #include <string> #include <vector> #include <sstream> #include <cuda_runtime.h> #include <math.h> #include <fstream> // Libreria para leer archivos #include <typeinfo> // for 'typeid' to work #include <tuple> using namespace std; // tuple (elem , posElem) vector<tuple<int , int>> getRemainingMultiples(int* Hit_State,int N){ int i,j; int elem; int posElem; vector<tuple<int, int>> M; tuple<int, int> tup; /* 1 -> not multiple 2 -> multiple per row 3 -> multiple per column 4 -> multiple per row and column 5 -> not paintable 6 -> paintable // Eliminado */ for(i = 0; i < N; i++ ){ for(j = 0; j < N; j++){ posElem = i + j*N; elem = Hit_State[posElem]; tup = make_tuple(elem,posElem); switch(elem) { case 2: M.push_back(tup); break; case 3: M.push_back(tup); break; case 4: M.push_back(tup); break; default: break; } } } return M; } int main( ){ tuple<int, int> tup1, tup2; tup1 = make_tuple(1,3); tup2 = make_tuple(1,5); vector<tuple<int, int>> M; M.push_back(tup1); M.push_back(tup2); /* for( int i = 0; i < M.size() ; i++){ cout << "tuple["<< i <<"] = (" << get<0>(M[i]) <<" ," << get<1>(M[i]) << ") " << endl; } */ int N = 10; string* Hitori = new string[N*N]; Hitori[0] = "dsadadasdas"; cout << "EL VALOR DE GIROTIRO : "<< Hitori[0] << endl; return 0; }
fbccfe2d5f12c194e6005874cbff59c8181717ef.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> int global_i = 2; extern "C" int foo(int); extern "C" int get_global(); extern "C" int set_global(int); extern "C" int array_trans_i(int* array, int n); extern "C" int array_trans_l(long* array, int n); extern "C" int array_trans_f(float* array, int n); int get_global(){ return global_i; } int array_trans_i(int* array, int n){ for(int i = 0; i < n; i++){ printf("%d\n", array[i]); array[i] = array[i] * 2; } return 0; } int array_trans_l(long* array, int n){ for(int i = 0; i < n; i++){ printf("%ld\n", array[i]); array[i] = array[i] * 2; } return 0; } int array_trans_f(float* array, int n){ for(int i = 0; i < n; i++){ printf("%.1f\n", array[i]); array[i] = array[i] * 2; } return 0; } int set_global(int i){ global_i = i; return (int) 11; } __global__ void gpu(float *A, float *B, int N){ int ib = blockDim.x * blockIdx.x + threadIdx.x; if (ib < N){ B[ib] = A[ib] * A[ib]; } } int foo(int a){ int N = 10; float A[N]; float B[N]; for(int i = 0; i < N; i++){ A[i] = a + i; } int threadsPerBlock = 20; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; float *GA, *GB; hipMalloc((void**)&GA, N * sizeof(float)); hipMalloc((void**)&GB, N * sizeof(float)); hipMemcpy(GA, A, N * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( gpu), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, GA, GB, N); hipMemcpy(B, GB, N * sizeof(float), hipMemcpyDeviceToHost); float sum = 0; for(int i = 0; i < N; i++){ sum += B[i]; } hipFree(A); printf("from cuda"); return (int) sum; }
fbccfe2d5f12c194e6005874cbff59c8181717ef.cu
#include <cuda_runtime.h> #include <stdlib.h> #include <stdio.h> int global_i = 2; extern "C" int foo(int); extern "C" int get_global(); extern "C" int set_global(int); extern "C" int array_trans_i(int* array, int n); extern "C" int array_trans_l(long* array, int n); extern "C" int array_trans_f(float* array, int n); int get_global(){ return global_i; } int array_trans_i(int* array, int n){ for(int i = 0; i < n; i++){ printf("%d\n", array[i]); array[i] = array[i] * 2; } return 0; } int array_trans_l(long* array, int n){ for(int i = 0; i < n; i++){ printf("%ld\n", array[i]); array[i] = array[i] * 2; } return 0; } int array_trans_f(float* array, int n){ for(int i = 0; i < n; i++){ printf("%.1f\n", array[i]); array[i] = array[i] * 2; } return 0; } int set_global(int i){ global_i = i; return (int) 11; } __global__ void gpu(float *A, float *B, int N){ int ib = blockDim.x * blockIdx.x + threadIdx.x; if (ib < N){ B[ib] = A[ib] * A[ib]; } } int foo(int a){ int N = 10; float A[N]; float B[N]; for(int i = 0; i < N; i++){ A[i] = a + i; } int threadsPerBlock = 20; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; float *GA, *GB; cudaMalloc((void**)&GA, N * sizeof(float)); cudaMalloc((void**)&GB, N * sizeof(float)); cudaMemcpy(GA, A, N * sizeof(float), cudaMemcpyHostToDevice); gpu<<<blocksPerGrid, threadsPerBlock>>>(GA, GB, N); cudaMemcpy(B, GB, N * sizeof(float), cudaMemcpyDeviceToHost); float sum = 0; for(int i = 0; i < N; i++){ sum += B[i]; } cudaFree(A); printf("from cuda"); return (int) sum; }
d845a565874c21cc1ad9eb4e648ee89caab2c585.hip
// !!! This is a file automatically generated by hipify!!! // from // https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d_kernel.cu // Copyright (c) 2019, NVIDIA Corporation. All rights reserved. // // This work is made available under the Nvidia Source Code License-NC. // To view a copy of this license, visit // https://nvlabs.github.io/stylegan2/license.html #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPContext.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <torch/types.h> #include <ATen/hip/HIPApplyUtils.cuh> static __host__ __device__ __forceinline__ int floor_div(int a, int b) { int c = a / b; if (c * b > a) { c--; } return c; } struct UpFirDn2DKernelParams { int up_x; int up_y; int down_x; int down_y; int pad_x0; int pad_x1; int pad_y0; int pad_y1; int major_dim; int in_h; int in_w; int minor_dim; int kernel_h; int kernel_w; int out_h; int out_w; int loop_major; int loop_x; }; template <typename scalar_t> __global__ void upfirdn2d_kernel_large(scalar_t *out, const scalar_t *input, const scalar_t *kernel, const UpFirDn2DKernelParams p) { int minor_idx = blockIdx.x * blockDim.x + threadIdx.x; int out_y = minor_idx / p.minor_dim; minor_idx -= out_y * p.minor_dim; int out_x_base = blockIdx.y * p.loop_x * blockDim.y + threadIdx.y; int major_idx_base = blockIdx.z * p.loop_major; if (out_x_base >= p.out_w || out_y >= p.out_h || major_idx_base >= p.major_dim) { return; } int mid_y = out_y * p.down_y + p.up_y - 1 - p.pad_y0; int in_y = min(max(floor_div(mid_y, p.up_y), 0), p.in_h); int h = min(max(floor_div(mid_y + p.kernel_h, p.up_y), 0), p.in_h) - in_y; int kernel_y = mid_y + p.kernel_h - (in_y + 1) * p.up_y; for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major && major_idx < p.major_dim; loop_major++, major_idx++) { for (int loop_x = 0, out_x = out_x_base; loop_x < p.loop_x && out_x < p.out_w; loop_x++, out_x += blockDim.y) { int mid_x = out_x * p.down_x + p.up_x - 1 - p.pad_x0; int in_x = min(max(floor_div(mid_x, p.up_x), 0), p.in_w); int w = min(max(floor_div(mid_x + p.kernel_w, p.up_x), 0), p.in_w) - in_x; int kernel_x = mid_x + p.kernel_w - (in_x + 1) * p.up_x; const scalar_t *x_p = &input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx]; const scalar_t *k_p = &kernel[kernel_y * p.kernel_w + kernel_x]; int x_px = p.minor_dim; int k_px = -p.up_x; int x_py = p.in_w * p.minor_dim; int k_py = -p.up_y * p.kernel_w; scalar_t v = 0.0f; for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { v += static_cast<scalar_t>(*x_p) * static_cast<scalar_t>(*k_p); x_p += x_px; k_p += k_px; } x_p += x_py - w * x_px; k_p += k_py - w * k_px; } out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v; } } } template <typename scalar_t, int up_x, int up_y, int down_x, int down_y, int kernel_h, int kernel_w, int tile_out_h, int tile_out_w> __global__ void upfirdn2d_kernel(scalar_t *out, const scalar_t *input, const scalar_t *kernel, const UpFirDn2DKernelParams p) { const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1; const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1; __shared__ volatile float sk[kernel_h][kernel_w]; __shared__ volatile float sx[tile_in_h][tile_in_w]; int minor_idx = blockIdx.x; int tile_out_y = minor_idx / p.minor_dim; minor_idx -= tile_out_y * p.minor_dim; tile_out_y *= tile_out_h; int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w; int major_idx_base = blockIdx.z * p.loop_major; if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | major_idx_base >= p.major_dim) { return; } for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; tap_idx += blockDim.x) { int ky = tap_idx / kernel_w; int kx = tap_idx - ky * kernel_w; scalar_t v = 0.0; if (kx < p.kernel_w & ky < p.kernel_h) { v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)]; } sk[ky][kx] = v; } for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major & major_idx < p.major_dim; loop_major++, major_idx++) { for (int loop_x = 0, tile_out_x = tile_out_x_base; loop_x < p.loop_x & tile_out_x < p.out_w; loop_x++, tile_out_x += tile_out_w) { int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0; int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0; int tile_in_x = floor_div(tile_mid_x, up_x); int tile_in_y = floor_div(tile_mid_y, up_y); __syncthreads(); for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; in_idx += blockDim.x) { int rel_in_y = in_idx / tile_in_w; int rel_in_x = in_idx - rel_in_y * tile_in_w; int in_x = rel_in_x + tile_in_x; int in_y = rel_in_y + tile_in_y; scalar_t v = 0.0; if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) { v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx]; } sx[rel_in_y][rel_in_x] = v; } __syncthreads(); for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; out_idx += blockDim.x) { int rel_out_y = out_idx / tile_out_w; int rel_out_x = out_idx - rel_out_y * tile_out_w; int out_x = rel_out_x + tile_out_x; int out_y = rel_out_y + tile_out_y; int mid_x = tile_mid_x + rel_out_x * down_x; int mid_y = tile_mid_y + rel_out_y * down_y; int in_x = floor_div(mid_x, up_x); int in_y = floor_div(mid_y, up_y); int rel_in_x = in_x - tile_in_x; int rel_in_y = in_y - tile_in_y; int kernel_x = (in_x + 1) * up_x - mid_x - 1; int kernel_y = (in_y + 1) * up_y - mid_y - 1; scalar_t v = 0.0; #pragma unroll for (int y = 0; y < kernel_h / up_y; y++) #pragma unroll for (int x = 0; x < kernel_w / up_x; x++) v += sx[rel_in_y + y][rel_in_x + x] * sk[kernel_y + y * up_y][kernel_x + x * up_x]; if (out_x < p.out_w & out_y < p.out_h) { out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v; } } } } } torch::Tensor upfirdn2d_op(const torch::Tensor &input, const torch::Tensor &kernel, int up_x, int up_y, int down_x, int down_y, int pad_x0, int pad_x1, int pad_y0, int pad_y1) { int curDevice = -1; hipGetDevice(&curDevice); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice); UpFirDn2DKernelParams p; auto x = input.contiguous(); auto k = kernel.contiguous(); p.major_dim = x.size(0); p.in_h = x.size(1); p.in_w = x.size(2); p.minor_dim = x.size(3); p.kernel_h = k.size(0); p.kernel_w = k.size(1); p.up_x = up_x; p.up_y = up_y; p.down_x = down_x; p.down_y = down_y; p.pad_x0 = pad_x0; p.pad_x1 = pad_x1; p.pad_y0 = pad_y0; p.pad_y1 = pad_y1; p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / p.down_y; p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / p.down_x; auto out = at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options()); int mode = -1; int tile_out_h = -1; int tile_out_w = -1; if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 1; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 3 && p.kernel_w <= 3) { mode = 2; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 3; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 4; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 5; tile_out_h = 8; tile_out_w = 32; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 6; tile_out_h = 8; tile_out_w = 32; } dim3 block_size; dim3 grid_size; if (tile_out_h > 0 && tile_out_w > 0) { p.loop_major = (p.major_dim - 1) / 16384 + 1; p.loop_x = 1; block_size = dim3(32 * 8, 1, 1); grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim, (p.out_w - 1) / (p.loop_x * tile_out_w) + 1, (p.major_dim - 1) / p.loop_major + 1); } else { p.loop_major = (p.major_dim - 1) / 16384 + 1; p.loop_x = 4; block_size = dim3(4, 32, 1); grid_size = dim3((p.out_h * p.minor_dim - 1) / block_size.x + 1, (p.out_w - 1) / (p.loop_x * block_size.y) + 1, (p.major_dim - 1) / p.loop_major + 1); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { switch (mode) { case 1: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64>) , dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; case 2: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64>) , dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; case 3: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64>) , dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; case 4: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64>) , dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; case 5: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>) , dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; case 6: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>) , dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; default: hipLaunchKernelGGL(( upfirdn2d_kernel_large<scalar_t>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); } }); return out; }
d845a565874c21cc1ad9eb4e648ee89caab2c585.cu
// from // https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d_kernel.cu // Copyright (c) 2019, NVIDIA Corporation. All rights reserved. // // This work is made available under the Nvidia Source Code License-NC. // To view a copy of this license, visit // https://nvlabs.github.io/stylegan2/license.html #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> #include <torch/types.h> #include <ATen/cuda/CUDAApplyUtils.cuh> static __host__ __device__ __forceinline__ int floor_div(int a, int b) { int c = a / b; if (c * b > a) { c--; } return c; } struct UpFirDn2DKernelParams { int up_x; int up_y; int down_x; int down_y; int pad_x0; int pad_x1; int pad_y0; int pad_y1; int major_dim; int in_h; int in_w; int minor_dim; int kernel_h; int kernel_w; int out_h; int out_w; int loop_major; int loop_x; }; template <typename scalar_t> __global__ void upfirdn2d_kernel_large(scalar_t *out, const scalar_t *input, const scalar_t *kernel, const UpFirDn2DKernelParams p) { int minor_idx = blockIdx.x * blockDim.x + threadIdx.x; int out_y = minor_idx / p.minor_dim; minor_idx -= out_y * p.minor_dim; int out_x_base = blockIdx.y * p.loop_x * blockDim.y + threadIdx.y; int major_idx_base = blockIdx.z * p.loop_major; if (out_x_base >= p.out_w || out_y >= p.out_h || major_idx_base >= p.major_dim) { return; } int mid_y = out_y * p.down_y + p.up_y - 1 - p.pad_y0; int in_y = min(max(floor_div(mid_y, p.up_y), 0), p.in_h); int h = min(max(floor_div(mid_y + p.kernel_h, p.up_y), 0), p.in_h) - in_y; int kernel_y = mid_y + p.kernel_h - (in_y + 1) * p.up_y; for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major && major_idx < p.major_dim; loop_major++, major_idx++) { for (int loop_x = 0, out_x = out_x_base; loop_x < p.loop_x && out_x < p.out_w; loop_x++, out_x += blockDim.y) { int mid_x = out_x * p.down_x + p.up_x - 1 - p.pad_x0; int in_x = min(max(floor_div(mid_x, p.up_x), 0), p.in_w); int w = min(max(floor_div(mid_x + p.kernel_w, p.up_x), 0), p.in_w) - in_x; int kernel_x = mid_x + p.kernel_w - (in_x + 1) * p.up_x; const scalar_t *x_p = &input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx]; const scalar_t *k_p = &kernel[kernel_y * p.kernel_w + kernel_x]; int x_px = p.minor_dim; int k_px = -p.up_x; int x_py = p.in_w * p.minor_dim; int k_py = -p.up_y * p.kernel_w; scalar_t v = 0.0f; for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { v += static_cast<scalar_t>(*x_p) * static_cast<scalar_t>(*k_p); x_p += x_px; k_p += k_px; } x_p += x_py - w * x_px; k_p += k_py - w * k_px; } out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v; } } } template <typename scalar_t, int up_x, int up_y, int down_x, int down_y, int kernel_h, int kernel_w, int tile_out_h, int tile_out_w> __global__ void upfirdn2d_kernel(scalar_t *out, const scalar_t *input, const scalar_t *kernel, const UpFirDn2DKernelParams p) { const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1; const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1; __shared__ volatile float sk[kernel_h][kernel_w]; __shared__ volatile float sx[tile_in_h][tile_in_w]; int minor_idx = blockIdx.x; int tile_out_y = minor_idx / p.minor_dim; minor_idx -= tile_out_y * p.minor_dim; tile_out_y *= tile_out_h; int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w; int major_idx_base = blockIdx.z * p.loop_major; if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | major_idx_base >= p.major_dim) { return; } for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; tap_idx += blockDim.x) { int ky = tap_idx / kernel_w; int kx = tap_idx - ky * kernel_w; scalar_t v = 0.0; if (kx < p.kernel_w & ky < p.kernel_h) { v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)]; } sk[ky][kx] = v; } for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major & major_idx < p.major_dim; loop_major++, major_idx++) { for (int loop_x = 0, tile_out_x = tile_out_x_base; loop_x < p.loop_x & tile_out_x < p.out_w; loop_x++, tile_out_x += tile_out_w) { int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0; int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0; int tile_in_x = floor_div(tile_mid_x, up_x); int tile_in_y = floor_div(tile_mid_y, up_y); __syncthreads(); for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; in_idx += blockDim.x) { int rel_in_y = in_idx / tile_in_w; int rel_in_x = in_idx - rel_in_y * tile_in_w; int in_x = rel_in_x + tile_in_x; int in_y = rel_in_y + tile_in_y; scalar_t v = 0.0; if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) { v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx]; } sx[rel_in_y][rel_in_x] = v; } __syncthreads(); for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; out_idx += blockDim.x) { int rel_out_y = out_idx / tile_out_w; int rel_out_x = out_idx - rel_out_y * tile_out_w; int out_x = rel_out_x + tile_out_x; int out_y = rel_out_y + tile_out_y; int mid_x = tile_mid_x + rel_out_x * down_x; int mid_y = tile_mid_y + rel_out_y * down_y; int in_x = floor_div(mid_x, up_x); int in_y = floor_div(mid_y, up_y); int rel_in_x = in_x - tile_in_x; int rel_in_y = in_y - tile_in_y; int kernel_x = (in_x + 1) * up_x - mid_x - 1; int kernel_y = (in_y + 1) * up_y - mid_y - 1; scalar_t v = 0.0; #pragma unroll for (int y = 0; y < kernel_h / up_y; y++) #pragma unroll for (int x = 0; x < kernel_w / up_x; x++) v += sx[rel_in_y + y][rel_in_x + x] * sk[kernel_y + y * up_y][kernel_x + x * up_x]; if (out_x < p.out_w & out_y < p.out_h) { out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v; } } } } } torch::Tensor upfirdn2d_op(const torch::Tensor &input, const torch::Tensor &kernel, int up_x, int up_y, int down_x, int down_y, int pad_x0, int pad_x1, int pad_y0, int pad_y1) { int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); UpFirDn2DKernelParams p; auto x = input.contiguous(); auto k = kernel.contiguous(); p.major_dim = x.size(0); p.in_h = x.size(1); p.in_w = x.size(2); p.minor_dim = x.size(3); p.kernel_h = k.size(0); p.kernel_w = k.size(1); p.up_x = up_x; p.up_y = up_y; p.down_x = down_x; p.down_y = down_y; p.pad_x0 = pad_x0; p.pad_x1 = pad_x1; p.pad_y0 = pad_y0; p.pad_y1 = pad_y1; p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / p.down_y; p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / p.down_x; auto out = at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options()); int mode = -1; int tile_out_h = -1; int tile_out_w = -1; if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 1; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 3 && p.kernel_w <= 3) { mode = 2; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 3; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 4; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 5; tile_out_h = 8; tile_out_w = 32; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 6; tile_out_h = 8; tile_out_w = 32; } dim3 block_size; dim3 grid_size; if (tile_out_h > 0 && tile_out_w > 0) { p.loop_major = (p.major_dim - 1) / 16384 + 1; p.loop_x = 1; block_size = dim3(32 * 8, 1, 1); grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim, (p.out_w - 1) / (p.loop_x * tile_out_w) + 1, (p.major_dim - 1) / p.loop_major + 1); } else { p.loop_major = (p.major_dim - 1) / 16384 + 1; p.loop_x = 4; block_size = dim3(4, 32, 1); grid_size = dim3((p.out_h * p.minor_dim - 1) / block_size.x + 1, (p.out_w - 1) / (p.loop_x * block_size.y) + 1, (p.major_dim - 1) / p.loop_major + 1); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { switch (mode) { case 1: upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64> <<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; case 2: upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64> <<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; case 3: upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64> <<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; case 4: upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64> <<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; case 5: upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32> <<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; case 6: upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32> <<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); break; default: upfirdn2d_kernel_large<scalar_t><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p); } }); return out; }
d7e6e980857e6fef41cd25cdc697d77b3612477b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Test.h" void Manage_Memory(int phase, int tid, float **h_u, float **h_ul, float **d_u, float **d_un){ hipError_t Error; size_t global= (int)NX*sizeof(float); size_t local = (int)SNX*sizeof(float); if (phase==0) { // Allocate domain variable on host (master thread) *h_u = (float*)malloc(global); } if (phase==1) { // first allocate the local domains! *h_ul = (float*)malloc(local); // Allocate subdomain variables on host (All Threads) Error = hipSetDevice(tid); if (DEBUG) printf("CUDA error (hipSetDevice) in thread %d = %s\n",tid,hipGetErrorString(Error)); Error = hipMalloc((void**)d_u,local); if (DEBUG) printf("CUDA error (hipMalloc d_u) in thread %d = %s\n",tid,hipGetErrorString(Error)); Error = hipMalloc((void**)d_un,local); if (DEBUG) printf("CUDA error (hipMalloc d_un) in thread %d = %s\n",tid,hipGetErrorString(Error)); Error = hipDeviceSynchronize(); if (DEBUG) printf("CUDA error (Mem. Management Synchronize) in thread %d = %s\n", tid, hipGetErrorString(Error)); } if (phase==2) { // Free the local domain in host threads free(*h_ul); // Free the local domain in devices Error = hipFree(*d_u); if (DEBUG) printf("CUDA error (hipFree d_u) in thread %d = %s\n",tid,hipGetErrorString(Error)); Error = hipFree(*d_un); if (DEBUG) printf("CUDA error (hipFree d_un) in thread %d = %s\n",tid,hipGetErrorString(Error)); } if (phase==3) { // Free the whole domain variables (master thread) free(*h_u); } } void Manage_Comms(int phase, int tid, float **h_ul, float **d_u){ hipError_t Error; size_t local = (int)SNX*sizeof(float); if (phase==1) { // Communicate data from thread to host local domain if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %d) :::\n",phase,tid); Error = hipMemcpy(*h_ul,*d_u,local,hipMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",hipGetErrorString(Error)); } if (phase==2) { // Communicate data from host local domain to thread if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %d) :::\n",phase,tid); Error = hipMemcpy(*d_u,*h_ul,local,hipMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",hipGetErrorString(Error)); } } __global__ void Set_GPU_IC(int tid, float *u0){ // Set domain initial condition in local threads int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < SNX) { if (tid == 0) { u0[i] = 0.15; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } else if (tid == 1) { u0[i] = 0.30; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } else if (tid == 2) { u0[i] = 0.45; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } else if (tid == 3) { u0[i] = 0.60; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } else if (tid == 4) { u0[i] = 0.75; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } else if (tid == 5) { u0[i] = 0.90; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } } } void Call_GPU_Init(int tid, float **u0){ // Load the initial condition int threads = 128; int blocks = (SNX + threads - 1)/threads; hipLaunchKernelGGL(( Set_GPU_IC), dim3(blocks),dim3(threads), 0, 0, tid,*u0); if (DEBUG) printf("CUDA error (Set_GPU_IC) in thread %d = %s\n",tid,hipGetErrorString(hipPeekAtLastError())); } void Update_Domain(int tid, float *u, float *ul){ // Explicitly copy data arrays for (int i = 0; i < SNX; i++) { if (i+tid*SNX < NX) { u[i+tid*SNX] = ul[i]; } else { u[i+tid*SNX] = 1.0; } } } void Call_Update(int tid, float **u, float **ul){ // produce explicitly: u=un; Update_Domain(tid,*u,*ul); } void Save_Results(float *u){ // print result to txt file FILE *pFile = fopen("result.txt", "w"); if (pFile != NULL) { for (int i = 0; i < NX; i++) { fprintf(pFile, "%d\t %g\n",i,u[i]); } fclose(pFile); } else { printf("Unable to save to file\n"); } }
d7e6e980857e6fef41cd25cdc697d77b3612477b.cu
#include "Test.h" void Manage_Memory(int phase, int tid, float **h_u, float **h_ul, float **d_u, float **d_un){ cudaError_t Error; size_t global= (int)NX*sizeof(float); size_t local = (int)SNX*sizeof(float); if (phase==0) { // Allocate domain variable on host (master thread) *h_u = (float*)malloc(global); } if (phase==1) { // first allocate the local domains! *h_ul = (float*)malloc(local); // Allocate subdomain variables on host (All Threads) Error = cudaSetDevice(tid); if (DEBUG) printf("CUDA error (cudaSetDevice) in thread %d = %s\n",tid,cudaGetErrorString(Error)); Error = cudaMalloc((void**)d_u,local); if (DEBUG) printf("CUDA error (cudaMalloc d_u) in thread %d = %s\n",tid,cudaGetErrorString(Error)); Error = cudaMalloc((void**)d_un,local); if (DEBUG) printf("CUDA error (cudaMalloc d_un) in thread %d = %s\n",tid,cudaGetErrorString(Error)); Error = cudaDeviceSynchronize(); if (DEBUG) printf("CUDA error (Mem. Management Synchronize) in thread %d = %s\n", tid, cudaGetErrorString(Error)); } if (phase==2) { // Free the local domain in host threads free(*h_ul); // Free the local domain in devices Error = cudaFree(*d_u); if (DEBUG) printf("CUDA error (cudaFree d_u) in thread %d = %s\n",tid,cudaGetErrorString(Error)); Error = cudaFree(*d_un); if (DEBUG) printf("CUDA error (cudaFree d_un) in thread %d = %s\n",tid,cudaGetErrorString(Error)); } if (phase==3) { // Free the whole domain variables (master thread) free(*h_u); } } void Manage_Comms(int phase, int tid, float **h_ul, float **d_u){ cudaError_t Error; size_t local = (int)SNX*sizeof(float); if (phase==1) { // Communicate data from thread to host local domain if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %d) :::\n",phase,tid); Error = cudaMemcpy(*h_ul,*d_u,local,cudaMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",cudaGetErrorString(Error)); } if (phase==2) { // Communicate data from host local domain to thread if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %d) :::\n",phase,tid); Error = cudaMemcpy(*d_u,*h_ul,local,cudaMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",cudaGetErrorString(Error)); } } __global__ void Set_GPU_IC(int tid, float *u0){ // Set domain initial condition in local threads int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < SNX) { if (tid == 0) { u0[i] = 0.15; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } else if (tid == 1) { u0[i] = 0.30; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } else if (tid == 2) { u0[i] = 0.45; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } else if (tid == 3) { u0[i] = 0.60; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } else if (tid == 4) { u0[i] = 0.75; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } else if (tid == 5) { u0[i] = 0.90; if (i<2) printf("IC data %d, %g\n",i,u0[i]); } } } void Call_GPU_Init(int tid, float **u0){ // Load the initial condition int threads = 128; int blocks = (SNX + threads - 1)/threads; Set_GPU_IC<<<blocks,threads>>>(tid,*u0); if (DEBUG) printf("CUDA error (Set_GPU_IC) in thread %d = %s\n",tid,cudaGetErrorString(cudaPeekAtLastError())); } void Update_Domain(int tid, float *u, float *ul){ // Explicitly copy data arrays for (int i = 0; i < SNX; i++) { if (i+tid*SNX < NX) { u[i+tid*SNX] = ul[i]; } else { u[i+tid*SNX] = 1.0; } } } void Call_Update(int tid, float **u, float **ul){ // produce explicitly: u=un; Update_Domain(tid,*u,*ul); } void Save_Results(float *u){ // print result to txt file FILE *pFile = fopen("result.txt", "w"); if (pFile != NULL) { for (int i = 0; i < NX; i++) { fprintf(pFile, "%d\t %g\n",i,u[i]); } fclose(pFile); } else { printf("Unable to save to file\n"); } }
b7101a38b698c623a9f2465c77e0949868534107.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <math.h> #ifdef DEBUG #define CUDA_CALL(F) if( (F) != hipSuccess ) \ {printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \ __FILE__,__LINE__); exit(-1);} #define CUDA_CHECK() if( (hipPeekAtLastError()) != hipSuccess ) \ {printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \ __FILE__,__LINE__-1); exit(-1);} #else #define CUDA_CALL(F) (F) #define CUDA_CHECK() #endif /* definitions of threadblock size in X and Y directions */ #define THREADS_PER_BLOCK_X 32 #define THREADS_PER_BLOCK_Y 4 #define ELEMENTS_PER_THREAD 8 /* definition of matrix linear dimension */ #define SIZE 4096 /* macro to index a 1D memory array with 2D indices in column-major order */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* CUDA kernel for shared memory matrix transpose */ __global__ void smem_cuda_transpose_opt( const int m, const double *a, double *c ) { /* declare a shared memory array */ __shared__ double smemArray[THREADS_PER_BLOCK_X][THREADS_PER_BLOCK_Y*ELEMENTS_PER_THREAD+1]; { int aoff = INDX( blockDim.x * blockIdx.x + threadIdx.x, blockDim.x * blockIdx.y + threadIdx.y, m ); for( int i = 0; i < ELEMENTS_PER_THREAD; i++ ) smemArray[threadIdx.x][threadIdx.y + i*blockDim.y] = a[ aoff + i*blockDim.y*m ]; } /* synchronize */ __syncthreads(); /* write the result */ { int coff = INDX( blockDim.x * blockIdx.y + threadIdx.x, blockDim.x * blockIdx.x + threadIdx.y, m ); #pragma unroll for( int i = 0; i < ELEMENTS_PER_THREAD; i++ ) c[coff + i*blockDim.y*m] = smemArray[threadIdx.y + i*blockDim.y][threadIdx.x]; } return; } /* end naive_cuda_transpose */ void host_transpose( const int m, double const * const a, double *c ) { /* * naive matrix transpose goes here. */ for( int j = 0; j < m; j++ ) { for( int i = 0; i < m; i++ ) { c[INDX(i,j,m)] = a[INDX(j,i,m)]; } /* end for i */ } /* end for j */ } /* end host_dgemm */ int main( int argc, char *argv[] ) { int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); /* declaring pointers for array */ double *h_a, *h_c; double *d_a, *d_c; size_t numbytes = (size_t) size * (size_t) size * sizeof( double ); /* allocating host memory */ h_a = (double *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc h_a\n"); return 911; } h_c = (double *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc h_c\n"); return 911; } /* allocating device memory */ CUDA_CALL( hipMalloc( (void**) &d_a, numbytes ) ); CUDA_CALL( hipMalloc( (void**) &d_c, numbytes ) ); /* set result matrices to zero */ memset( h_c, 0, numbytes ); CUDA_CALL( hipMemset( d_c, 0, numbytes ) ); fprintf( stdout, "Total memory required per matrix is %lf MB\n", (double) numbytes / 1000000.0 ); /* initialize input matrix with random value */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); // h_a[i] = double( i ); } /* end for */ /* copy input matrix from host to device */ CUDA_CALL( hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice ) ); /* create and start timer */ hipEvent_t start, stop; CUDA_CALL( hipEventCreate( &start ) ); CUDA_CALL( hipEventCreate( &stop ) ); CUDA_CALL( hipEventRecord( start, 0 ) ); /* call naive cpu transpose function */ host_transpose( size, h_a, h_c ); /* stop CPU timer */ CUDA_CALL( hipEventRecord( stop, 0 ) ); CUDA_CALL( hipEventSynchronize( stop ) ); float elapsedTime; CUDA_CALL( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print CPU timing information */ fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GB/s\n", 8.0 * 2.0 * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* setup threadblock size and grid sizes */ dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 ); dim3 blocks( ( size / THREADS_PER_BLOCK_X ) , ( size / ( THREADS_PER_BLOCK_Y * ELEMENTS_PER_THREAD ) ) , 1 ); CUDA_CALL( hipDeviceSetSharedMemConfig( hipSharedMemBankSizeEightByte ) ); /* start timers */ CUDA_CALL( hipEventRecord( start, 0 ) ); /* call naive GPU transpose kernel */ hipLaunchKernelGGL(( smem_cuda_transpose_opt), dim3(blocks), dim3(threads) , 0, 0, size, d_a, d_c ); CUDA_CHECK(); CUDA_CALL( hipDeviceSynchronize() ); /* stop the timers */ CUDA_CALL( hipEventRecord( stop, 0 ) ); CUDA_CALL( hipEventSynchronize( stop ) ); CUDA_CALL( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU timing information */ fprintf(stdout, "Total time GPU is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GB/s\n", 8.0 * 2.0 * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy data from device to host */ CUDA_CALL( hipMemset( d_a, 0, numbytes ) ); CUDA_CALL( hipMemcpy( h_a, d_c, numbytes, hipMemcpyDeviceToHost ) ); /* compare GPU to CPU for correctness */ for( int j = 0; j < size; j++ ) { for( int i = 0; i < size; i++ ) { if( h_c[INDX(i,j,size)] != h_a[INDX(i,j,size)] ) { printf("Error in element %d,%d\n", i,j ); printf("Host %f, device %f\n",h_c[INDX(i,j,size)], h_a[INDX(i,j,size)]); } } /* end for i */ } /* end for j */ /* free the memory */ free( h_a ); free( h_c ); CUDA_CALL( hipFree( d_a ) ); CUDA_CALL( hipFree( d_c ) ); CUDA_CALL( hipDeviceReset() ); return 0; }
b7101a38b698c623a9f2465c77e0949868534107.cu
/* * Copyright 2014 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <math.h> #ifdef DEBUG #define CUDA_CALL(F) if( (F) != cudaSuccess ) \ {printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \ __FILE__,__LINE__); exit(-1);} #define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \ {printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \ __FILE__,__LINE__-1); exit(-1);} #else #define CUDA_CALL(F) (F) #define CUDA_CHECK() #endif /* definitions of threadblock size in X and Y directions */ #define THREADS_PER_BLOCK_X 32 #define THREADS_PER_BLOCK_Y 4 #define ELEMENTS_PER_THREAD 8 /* definition of matrix linear dimension */ #define SIZE 4096 /* macro to index a 1D memory array with 2D indices in column-major order */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* CUDA kernel for shared memory matrix transpose */ __global__ void smem_cuda_transpose_opt( const int m, const double *a, double *c ) { /* declare a shared memory array */ __shared__ double smemArray[THREADS_PER_BLOCK_X][THREADS_PER_BLOCK_Y*ELEMENTS_PER_THREAD+1]; { int aoff = INDX( blockDim.x * blockIdx.x + threadIdx.x, blockDim.x * blockIdx.y + threadIdx.y, m ); for( int i = 0; i < ELEMENTS_PER_THREAD; i++ ) smemArray[threadIdx.x][threadIdx.y + i*blockDim.y] = a[ aoff + i*blockDim.y*m ]; } /* synchronize */ __syncthreads(); /* write the result */ { int coff = INDX( blockDim.x * blockIdx.y + threadIdx.x, blockDim.x * blockIdx.x + threadIdx.y, m ); #pragma unroll for( int i = 0; i < ELEMENTS_PER_THREAD; i++ ) c[coff + i*blockDim.y*m] = smemArray[threadIdx.y + i*blockDim.y][threadIdx.x]; } return; } /* end naive_cuda_transpose */ void host_transpose( const int m, double const * const a, double *c ) { /* * naive matrix transpose goes here. */ for( int j = 0; j < m; j++ ) { for( int i = 0; i < m; i++ ) { c[INDX(i,j,m)] = a[INDX(j,i,m)]; } /* end for i */ } /* end for j */ } /* end host_dgemm */ int main( int argc, char *argv[] ) { int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); /* declaring pointers for array */ double *h_a, *h_c; double *d_a, *d_c; size_t numbytes = (size_t) size * (size_t) size * sizeof( double ); /* allocating host memory */ h_a = (double *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc h_a\n"); return 911; } h_c = (double *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc h_c\n"); return 911; } /* allocating device memory */ CUDA_CALL( cudaMalloc( (void**) &d_a, numbytes ) ); CUDA_CALL( cudaMalloc( (void**) &d_c, numbytes ) ); /* set result matrices to zero */ memset( h_c, 0, numbytes ); CUDA_CALL( cudaMemset( d_c, 0, numbytes ) ); fprintf( stdout, "Total memory required per matrix is %lf MB\n", (double) numbytes / 1000000.0 ); /* initialize input matrix with random value */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); // h_a[i] = double( i ); } /* end for */ /* copy input matrix from host to device */ CUDA_CALL( cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice ) ); /* create and start timer */ cudaEvent_t start, stop; CUDA_CALL( cudaEventCreate( &start ) ); CUDA_CALL( cudaEventCreate( &stop ) ); CUDA_CALL( cudaEventRecord( start, 0 ) ); /* call naive cpu transpose function */ host_transpose( size, h_a, h_c ); /* stop CPU timer */ CUDA_CALL( cudaEventRecord( stop, 0 ) ); CUDA_CALL( cudaEventSynchronize( stop ) ); float elapsedTime; CUDA_CALL( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print CPU timing information */ fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GB/s\n", 8.0 * 2.0 * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* setup threadblock size and grid sizes */ dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 ); dim3 blocks( ( size / THREADS_PER_BLOCK_X ) , ( size / ( THREADS_PER_BLOCK_Y * ELEMENTS_PER_THREAD ) ) , 1 ); CUDA_CALL( cudaDeviceSetSharedMemConfig( cudaSharedMemBankSizeEightByte ) ); /* start timers */ CUDA_CALL( cudaEventRecord( start, 0 ) ); /* call naive GPU transpose kernel */ smem_cuda_transpose_opt<<< blocks, threads >>>( size, d_a, d_c ); CUDA_CHECK(); CUDA_CALL( cudaDeviceSynchronize() ); /* stop the timers */ CUDA_CALL( cudaEventRecord( stop, 0 ) ); CUDA_CALL( cudaEventSynchronize( stop ) ); CUDA_CALL( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU timing information */ fprintf(stdout, "Total time GPU is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GB/s\n", 8.0 * 2.0 * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy data from device to host */ CUDA_CALL( cudaMemset( d_a, 0, numbytes ) ); CUDA_CALL( cudaMemcpy( h_a, d_c, numbytes, cudaMemcpyDeviceToHost ) ); /* compare GPU to CPU for correctness */ for( int j = 0; j < size; j++ ) { for( int i = 0; i < size; i++ ) { if( h_c[INDX(i,j,size)] != h_a[INDX(i,j,size)] ) { printf("Error in element %d,%d\n", i,j ); printf("Host %f, device %f\n",h_c[INDX(i,j,size)], h_a[INDX(i,j,size)]); } } /* end for i */ } /* end for j */ /* free the memory */ free( h_a ); free( h_c ); CUDA_CALL( cudaFree( d_a ) ); CUDA_CALL( cudaFree( d_c ) ); CUDA_CALL( cudaDeviceReset() ); return 0; }
8fef5c9fa864de48ee34b33637c57b232f3e02d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @brief * array_ops * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/array_ops.h" #include "k2/csrc/macros.h" namespace k2 { // See documentation in header of what this is supposed to do. // This is similar to the template Append() defined in ops_inl.h, // but with changes largely about adding `data_offsets`, and // subtracting one from the dims of all but the last array. Array1<int32_t> SpliceRowSplits(int32_t num_arrays, const Array1<int32_t> **src) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_arrays, 0); ContextPtr &c = src[0]->Context(); // row_splits_vec is the exclusive-sum of the modified dimensions of // the arrays in `src`. `Modified` means: is subtracted from the dims // of all but the last array. std::vector<int32_t> row_splits_vec(num_arrays + 1); int32_t sum = 0, max_dim = 0; row_splits_vec[0] = sum; // `last_elem_ptrs_vec` contains, for each of the arrays in `num_array`, a // pointer to the last element in that array. std::vector<const int32_t *> last_elem_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) { K2_CHECK_GE(src[i]->Dim(), 1); int32_t dim = src[i]->Dim() - (i + 1 < num_arrays ? 1 : 0); if (dim > max_dim) max_dim = dim; sum += dim; row_splits_vec[i + 1] = sum; last_elem_ptrs_vec[i] = src[i]->Data() + src[i]->Dim() - 1; } int32_t ans_size = sum; Array1<int32_t> ans(c, ans_size); int32_t *ans_data = ans.Data(); Array1<const int32_t *> last_elems_ptrs(c, last_elem_ptrs_vec); Array1<int32_t> data_offsets(c, num_arrays); // note as data_offsets.Dim() == last_elem_ptrs.Dim(), so the last element of // last_elem_ptrs.Dim() will not be summed to data_offsets, it's OK as we // don't need that value since we would not drop the last element of the last // array. ExclusiveSumDeref(last_elems_ptrs, &data_offsets); int32_t *data_offsets_data = data_offsets.Data(); if (c->GetDeviceType() == kCpu) { // a simple loop is faster, although the other branches should still work on // CPU. for (int32_t i = 0; i < num_arrays; i++) { int32_t this_dim = src[i]->Dim(); const int32_t *this_src_data = src[i]->Data(); int32_t data_offset = data_offsets_data[i]; for (int32_t j = 0; j < this_dim; j++) { ans_data[j] = this_src_data[j] + data_offset; } // notice `this_dim - 1` here, it means we will overwrite the copy of last // element of src[i] when copying elements in src[i+1] in the next // for-loop, it generates the same result with dropping the last element // of src[i] as last-elment-of-src[i] == src[i+1]->Data()[0] (equals 0) + // data_offsets_data[i+1]. ans_data += this_dim - 1; } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits(c, row_splits_vec); const int32_t *row_splits_data = row_splits.Data(); std::vector<const int32_t *> src_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) src_ptrs_vec[i] = src[i]->Data(); Array1<const int32_t *> src_ptrs(c, src_ptrs_vec); const int32_t **src_ptrs_data = src_ptrs.Data(); int32_t avg_input_size = ans_size / num_arrays; if (max_dim < 2 * avg_input_size + 512) { // here, 2 is a heuristic factor. We're saying, "if the max length of any // of the source arrays is not too much larger than the average length of // the source arrays." The `+ 512` is an additional heuristic factor, as // we care less about launching too many GPU threads if the number of // elements being processed is small. What we're saying is that the // arrays' sizes are fairly balanced, so we launch with a simple // rectangular kernel. K2_EVAL2( c, num_arrays, max_dim, lambda_set_data, (int32_t i, int32_t j)->void { int32_t row_start = row_splits_data[i], row_end = row_splits_data[i + 1]; const int32_t *src_ptr = src_ptrs_data[i]; // not we have dropped the last element of src[i] in // row_splits_data, so here it will not be copied. if (j < row_end - row_start) { ans_data[row_start + j] = src_ptr[j] + data_offsets_data[i]; } }); } else { int32_t block_dim = 256; while (block_dim * 4 < avg_input_size && block_dim < 8192) block_dim *= 2; // `index_map` will map from 'new index' to 'old index', with 0 <= // old_index < num_arrays... we handle each source array with multiple // blocks. // The elements of `index_map` will be of the form: // old_index + (block_of_this_array << 32). // where `old_index` is an index into `src` and `block_of_this_array` // tells us which block it is, as in 0, 1, 2, 3... // there won't be very many blocks, so it's not a problem to enumerate // them on CPU. std::vector<uint64_t> index_map; index_map.reserve((2 * ans_size) / block_dim); for (int32_t i = 0; i < num_arrays; i++) { int32_t this_array_size = src[i]->Dim(); int32_t this_num_blocks = NumBlocks(this_array_size, block_dim); for (int32_t j = 0; j < this_num_blocks; j++) { index_map.push_back((static_cast<uint64_t>(j) << 32) + static_cast<uint64_t>(i)); } } Array1<uint64_t> index_map_gpu(c, index_map); const uint64_t *index_map_data = index_map_gpu.Data(); K2_EVAL2( c, index_map_gpu.Dim(), block_dim, lambda_set_data_blocks, (int32_t i, int32_t j) { uint64_t index = index_map_data[i]; uint32_t orig_i = static_cast<uint32_t>(index), block_index = static_cast<uint32_t>(index >> 32); int32_t row_start = row_splits_data[orig_i], row_end = row_splits_data[orig_i + 1], orig_j = (block_index * block_dim) + j; const int32_t *src_ptr = src_ptrs_data[orig_i]; if (orig_j < row_end - row_start) { ans_data[row_start + orig_j] = src_ptr[orig_j] + data_offsets_data[orig_i]; } }); } } return ans; } bool ValidateRowIds(const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &ctx = row_ids.Context(); const int32_t *data = row_ids.Data(); int32_t dim = row_ids.Dim(); if (dim == 0) return true; // will treat this as valid // note `row_ids[0]` may copy memory from device to host if (row_ids[0] < 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_ids, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. K2_EVAL( ctx, dim - 1, lambda_check_row_ids, (int32_t i)->void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }); return (*temp)[0] == 0; } bool ValidateRowSplits(const Array1<int32_t> &row_splits, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &ctx = row_splits.Context(); const int32_t *data = row_splits.Data(); int32_t dim = row_splits.Dim(); // must have at least one element and row_splits[0] == 0 if (dim == 0 || row_splits[0] != 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_splits, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. K2_EVAL( ctx, dim - 1, lambda_check_row_splits, (int32_t i)->void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }); return (*temp)[0] == 0; } bool ValidateRowSplitsAndIds(const Array1<int32_t> &row_splits, const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); // Check if their context are compatible or not while getting ContextPtr ctx = GetContext(row_splits, row_ids); int32_t num_rows = row_splits.Dim() - 1, num_elems = row_ids.Dim(); if (num_rows < 0 || (num_rows == 0 && num_elems > 0)) return false; if (row_splits[0] != 0 || (num_elems > 0 && row_ids[0] < 0)) return false; if (num_elems != row_splits[num_rows]) return false; const int32_t *row_ids_data = row_ids.Data(), *row_splits_data = row_splits.Data(); Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(ctx->IsCompatible(*temp->Context())); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp_array.Data(); K2_EVAL( ctx, ::max(num_elems, num_rows), lambda_check_row_ids, (int32_t i)->void { // check row_splits bool invalid_splits = (i < num_rows && row_splits_data[i] > row_splits_data[i + 1]); // check row_ids bool invalid_ids = (i < (num_elems - 1) && row_ids_data[i] > row_ids_data[i + 1]); if (invalid_splits || invalid_ids) *temp_data = 1; // check if row_splits and row_ids agree with each other if (i < num_elems) { int32_t this_row = row_ids_data[i]; if (this_row < 0 || this_row >= num_rows || i < row_splits_data[this_row] || i >= row_splits_data[this_row + 1]) *temp_data = 1; } }); return (*temp)[0] == 0; } void RowSplitsToRowIds(const Array1<int32_t> &row_splits, Array1<int32_t> *row_ids) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(row_splits, *row_ids); int32_t num_elems = row_ids->Dim(), num_rows = row_splits.Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); K2_CHECK_EQ(num_elems, row_splits[num_rows]); RowSplitsToRowIds(c, num_rows, row_splits.Data(), num_elems, row_ids->Data()); } void RowIdsToRowSplits(const Array1<int32_t> &row_ids, Array1<int32_t> *row_splits) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(*row_splits, row_ids); int32_t num_elems = row_ids.Dim(), num_rows = row_splits->Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); if (num_elems > 0) K2_CHECK_GT(num_rows, row_ids[num_elems - 1]); RowIdsToRowSplits(c, num_elems, row_ids.Data(), false, num_rows, row_splits->Data()); } Array1<int32_t> GetCounts(ContextPtr c, const int32_t *src_data, int32_t src_dim, int32_t n) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(n, 0); Array1<int32_t> ans(c, n, 0); // init with 0 int32_t *ans_data = ans.Data(); if (n == 0) { K2_CHECK_EQ(src_dim, 0); return ans; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { for (int32_t i = 0; i < src_dim; ++i) { ++ans_data[src_data[i]]; } } else { K2_CHECK_EQ(d, kCuda); std::size_t temp_storage_bytes = 0; K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( nullptr, temp_storage_bytes, src_data, ans_data, n + 1, 0, n, src_dim, c->GetCudaStream())); // The first time is to determine temporary // device storage requirements. Array1<int8_t> d_temp_storage(c, temp_storage_bytes); K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( d_temp_storage.Data(), temp_storage_bytes, src_data, ans_data, n + 1, 0, n, src_dim, c->GetCudaStream())); } return ans; } Array1<int32_t> GetCounts(const Array1<int32_t> &src, int32_t n) { NVTX_RANGE(K2_FUNC); return GetCounts(src.Context(), src.Data(), src.Dim(), n); } Array1<int32_t> InvertMonotonicDecreasing(const Array1<int32_t> &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); int32_t src_dim = src.Dim(); const int32_t *src_data = src.Data(); if (src_dim == 0) { return Array1<int32_t>(c, 0); } K2_DCHECK_GT(src.Back(), 0); // just call Back when debugging // note `src[0]` may do a DeviceToHost memory copy int32_t ans_dim = src[0]; Array1<int32_t> ans(c, ans_dim, 0); // init with 0 int32_t *ans_data = ans.Data(); K2_EVAL( c, src_dim, lambda_set_values, (int32_t i)->void { K2_DCHECK((i + 1 == src_dim || src_data[i + 1] <= src_data[i])); if (i + 1 == src_dim || src_data[i + 1] < src_data[i]) ans_data[src_data[i] - 1] = i + 1; }); MonotonicDecreasingUpperBound(ans, &ans); return ans; } Array1<int32_t> InvertPermutation(const Array1<int32_t> &src) { ContextPtr &c = src.Context(); int32_t dim = src.Dim(); Array1<int32_t> ans(c, dim); const int32_t *src_data = src.Data(); int32_t *ans_data = ans.Data(); K2_EVAL( c, dim, lambda_set_ans, (int32_t i)->void { ans_data[src_data[i]] = i; }); return ans; } Array1<int32_t> RowSplitsToSizes(const Array1<int32_t> &row_splits) { K2_CHECK_GT(row_splits.Dim(), 0); ContextPtr &c = row_splits.Context(); int32_t num_rows = row_splits.Dim() - 1; Array1<int32_t> sizes(c, num_rows); const int32_t *row_splits_data = row_splits.Data(); int32_t *sizes_data = sizes.Data(); K2_EVAL( c, num_rows, lambda_set_sizes, (int32_t i)->void { sizes_data[i] = row_splits_data[i + 1] - row_splits_data[i]; }); return sizes; } // This is modified from RowSplitsToRowIdsKernel. // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void SizesToMergeMapKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, uint32_t *merge_map) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; #pragma unroll(4) for (; thread_this_row < row_length; thread_this_row += threads_per_row) merge_map[this_row_split + thread_this_row] = uint32_t(row) + uint32_t(num_rows) * uint32_t(thread_this_row); } Array1<uint32_t> SizesToMergeMap(ContextPtr c, const std::vector<int32_t> &sizes) { int32_t num_srcs = sizes.size(); ContextPtr cpu_context = GetCpuContext(); Array1<int32_t> row_splits_cpu(cpu_context, num_srcs + 1); int32_t *row_splits_cpu_data = row_splits_cpu.Data(); int32_t tot_size = 0; row_splits_cpu_data[0] = 0; for (int32_t i = 0; i < num_srcs; i++) { tot_size += sizes[i]; row_splits_cpu_data[i + 1] = tot_size; } Array1<uint32_t> ans(c, tot_size); if (c->GetDeviceType() == kCpu) { uint32_t *ans_data = ans.Data(); int32_t cur = 0; for (int32_t src = 0; src < num_srcs; src++) { int32_t begin = cur, // i.e. the previous end. end = row_splits_cpu_data[src + 1]; for (; cur != end; ++cur) { // the 'src' says which source this item came from, and (cur - begin) // is the position within that source. ans_data[cur] = uint32_t(src) + uint32_t(cur - begin) * uint32_t(num_srcs); } } return ans; } K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits = row_splits_cpu.To(c); int32_t *row_splits_data = row_splits.Data(); uint32_t *merge_map_data = ans.Data(); int32_t avg_elems_per_row = (tot_size + num_srcs - 1) / num_srcs, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_srcs * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL( hipLaunchKernelGGL(( SizesToMergeMapKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_srcs, threads_per_row, row_splits_data, tot_size, merge_map_data)); return ans; } } // namespace k2
8fef5c9fa864de48ee34b33637c57b232f3e02d9.cu
/** * @brief * array_ops * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/array_ops.h" #include "k2/csrc/macros.h" namespace k2 { // See documentation in header of what this is supposed to do. // This is similar to the template Append() defined in ops_inl.h, // but with changes largely about adding `data_offsets`, and // subtracting one from the dims of all but the last array. Array1<int32_t> SpliceRowSplits(int32_t num_arrays, const Array1<int32_t> **src) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_arrays, 0); ContextPtr &c = src[0]->Context(); // row_splits_vec is the exclusive-sum of the modified dimensions of // the arrays in `src`. `Modified` means: is subtracted from the dims // of all but the last array. std::vector<int32_t> row_splits_vec(num_arrays + 1); int32_t sum = 0, max_dim = 0; row_splits_vec[0] = sum; // `last_elem_ptrs_vec` contains, for each of the arrays in `num_array`, a // pointer to the last element in that array. std::vector<const int32_t *> last_elem_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) { K2_CHECK_GE(src[i]->Dim(), 1); int32_t dim = src[i]->Dim() - (i + 1 < num_arrays ? 1 : 0); if (dim > max_dim) max_dim = dim; sum += dim; row_splits_vec[i + 1] = sum; last_elem_ptrs_vec[i] = src[i]->Data() + src[i]->Dim() - 1; } int32_t ans_size = sum; Array1<int32_t> ans(c, ans_size); int32_t *ans_data = ans.Data(); Array1<const int32_t *> last_elems_ptrs(c, last_elem_ptrs_vec); Array1<int32_t> data_offsets(c, num_arrays); // note as data_offsets.Dim() == last_elem_ptrs.Dim(), so the last element of // last_elem_ptrs.Dim() will not be summed to data_offsets, it's OK as we // don't need that value since we would not drop the last element of the last // array. ExclusiveSumDeref(last_elems_ptrs, &data_offsets); int32_t *data_offsets_data = data_offsets.Data(); if (c->GetDeviceType() == kCpu) { // a simple loop is faster, although the other branches should still work on // CPU. for (int32_t i = 0; i < num_arrays; i++) { int32_t this_dim = src[i]->Dim(); const int32_t *this_src_data = src[i]->Data(); int32_t data_offset = data_offsets_data[i]; for (int32_t j = 0; j < this_dim; j++) { ans_data[j] = this_src_data[j] + data_offset; } // notice `this_dim - 1` here, it means we will overwrite the copy of last // element of src[i] when copying elements in src[i+1] in the next // for-loop, it generates the same result with dropping the last element // of src[i] as last-elment-of-src[i] == src[i+1]->Data()[0] (equals 0) + // data_offsets_data[i+1]. ans_data += this_dim - 1; } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits(c, row_splits_vec); const int32_t *row_splits_data = row_splits.Data(); std::vector<const int32_t *> src_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) src_ptrs_vec[i] = src[i]->Data(); Array1<const int32_t *> src_ptrs(c, src_ptrs_vec); const int32_t **src_ptrs_data = src_ptrs.Data(); int32_t avg_input_size = ans_size / num_arrays; if (max_dim < 2 * avg_input_size + 512) { // here, 2 is a heuristic factor. We're saying, "if the max length of any // of the source arrays is not too much larger than the average length of // the source arrays." The `+ 512` is an additional heuristic factor, as // we care less about launching too many GPU threads if the number of // elements being processed is small. What we're saying is that the // arrays' sizes are fairly balanced, so we launch with a simple // rectangular kernel. K2_EVAL2( c, num_arrays, max_dim, lambda_set_data, (int32_t i, int32_t j)->void { int32_t row_start = row_splits_data[i], row_end = row_splits_data[i + 1]; const int32_t *src_ptr = src_ptrs_data[i]; // not we have dropped the last element of src[i] in // row_splits_data, so here it will not be copied. if (j < row_end - row_start) { ans_data[row_start + j] = src_ptr[j] + data_offsets_data[i]; } }); } else { int32_t block_dim = 256; while (block_dim * 4 < avg_input_size && block_dim < 8192) block_dim *= 2; // `index_map` will map from 'new index' to 'old index', with 0 <= // old_index < num_arrays... we handle each source array with multiple // blocks. // The elements of `index_map` will be of the form: // old_index + (block_of_this_array << 32). // where `old_index` is an index into `src` and `block_of_this_array` // tells us which block it is, as in 0, 1, 2, 3... // there won't be very many blocks, so it's not a problem to enumerate // them on CPU. std::vector<uint64_t> index_map; index_map.reserve((2 * ans_size) / block_dim); for (int32_t i = 0; i < num_arrays; i++) { int32_t this_array_size = src[i]->Dim(); int32_t this_num_blocks = NumBlocks(this_array_size, block_dim); for (int32_t j = 0; j < this_num_blocks; j++) { index_map.push_back((static_cast<uint64_t>(j) << 32) + static_cast<uint64_t>(i)); } } Array1<uint64_t> index_map_gpu(c, index_map); const uint64_t *index_map_data = index_map_gpu.Data(); K2_EVAL2( c, index_map_gpu.Dim(), block_dim, lambda_set_data_blocks, (int32_t i, int32_t j) { uint64_t index = index_map_data[i]; uint32_t orig_i = static_cast<uint32_t>(index), block_index = static_cast<uint32_t>(index >> 32); int32_t row_start = row_splits_data[orig_i], row_end = row_splits_data[orig_i + 1], orig_j = (block_index * block_dim) + j; const int32_t *src_ptr = src_ptrs_data[orig_i]; if (orig_j < row_end - row_start) { ans_data[row_start + orig_j] = src_ptr[orig_j] + data_offsets_data[orig_i]; } }); } } return ans; } bool ValidateRowIds(const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &ctx = row_ids.Context(); const int32_t *data = row_ids.Data(); int32_t dim = row_ids.Dim(); if (dim == 0) return true; // will treat this as valid // note `row_ids[0]` may copy memory from device to host if (row_ids[0] < 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_ids, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. K2_EVAL( ctx, dim - 1, lambda_check_row_ids, (int32_t i)->void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }); return (*temp)[0] == 0; } bool ValidateRowSplits(const Array1<int32_t> &row_splits, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &ctx = row_splits.Context(); const int32_t *data = row_splits.Data(); int32_t dim = row_splits.Dim(); // must have at least one element and row_splits[0] == 0 if (dim == 0 || row_splits[0] != 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_splits, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. K2_EVAL( ctx, dim - 1, lambda_check_row_splits, (int32_t i)->void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }); return (*temp)[0] == 0; } bool ValidateRowSplitsAndIds(const Array1<int32_t> &row_splits, const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); // Check if their context are compatible or not while getting ContextPtr ctx = GetContext(row_splits, row_ids); int32_t num_rows = row_splits.Dim() - 1, num_elems = row_ids.Dim(); if (num_rows < 0 || (num_rows == 0 && num_elems > 0)) return false; if (row_splits[0] != 0 || (num_elems > 0 && row_ids[0] < 0)) return false; if (num_elems != row_splits[num_rows]) return false; const int32_t *row_ids_data = row_ids.Data(), *row_splits_data = row_splits.Data(); Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(ctx->IsCompatible(*temp->Context())); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp_array.Data(); K2_EVAL( ctx, std::max(num_elems, num_rows), lambda_check_row_ids, (int32_t i)->void { // check row_splits bool invalid_splits = (i < num_rows && row_splits_data[i] > row_splits_data[i + 1]); // check row_ids bool invalid_ids = (i < (num_elems - 1) && row_ids_data[i] > row_ids_data[i + 1]); if (invalid_splits || invalid_ids) *temp_data = 1; // check if row_splits and row_ids agree with each other if (i < num_elems) { int32_t this_row = row_ids_data[i]; if (this_row < 0 || this_row >= num_rows || i < row_splits_data[this_row] || i >= row_splits_data[this_row + 1]) *temp_data = 1; } }); return (*temp)[0] == 0; } void RowSplitsToRowIds(const Array1<int32_t> &row_splits, Array1<int32_t> *row_ids) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(row_splits, *row_ids); int32_t num_elems = row_ids->Dim(), num_rows = row_splits.Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); K2_CHECK_EQ(num_elems, row_splits[num_rows]); RowSplitsToRowIds(c, num_rows, row_splits.Data(), num_elems, row_ids->Data()); } void RowIdsToRowSplits(const Array1<int32_t> &row_ids, Array1<int32_t> *row_splits) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(*row_splits, row_ids); int32_t num_elems = row_ids.Dim(), num_rows = row_splits->Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); if (num_elems > 0) K2_CHECK_GT(num_rows, row_ids[num_elems - 1]); RowIdsToRowSplits(c, num_elems, row_ids.Data(), false, num_rows, row_splits->Data()); } Array1<int32_t> GetCounts(ContextPtr c, const int32_t *src_data, int32_t src_dim, int32_t n) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(n, 0); Array1<int32_t> ans(c, n, 0); // init with 0 int32_t *ans_data = ans.Data(); if (n == 0) { K2_CHECK_EQ(src_dim, 0); return ans; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { for (int32_t i = 0; i < src_dim; ++i) { ++ans_data[src_data[i]]; } } else { K2_CHECK_EQ(d, kCuda); std::size_t temp_storage_bytes = 0; K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( nullptr, temp_storage_bytes, src_data, ans_data, n + 1, 0, n, src_dim, c->GetCudaStream())); // The first time is to determine temporary // device storage requirements. Array1<int8_t> d_temp_storage(c, temp_storage_bytes); K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( d_temp_storage.Data(), temp_storage_bytes, src_data, ans_data, n + 1, 0, n, src_dim, c->GetCudaStream())); } return ans; } Array1<int32_t> GetCounts(const Array1<int32_t> &src, int32_t n) { NVTX_RANGE(K2_FUNC); return GetCounts(src.Context(), src.Data(), src.Dim(), n); } Array1<int32_t> InvertMonotonicDecreasing(const Array1<int32_t> &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); int32_t src_dim = src.Dim(); const int32_t *src_data = src.Data(); if (src_dim == 0) { return Array1<int32_t>(c, 0); } K2_DCHECK_GT(src.Back(), 0); // just call Back when debugging // note `src[0]` may do a DeviceToHost memory copy int32_t ans_dim = src[0]; Array1<int32_t> ans(c, ans_dim, 0); // init with 0 int32_t *ans_data = ans.Data(); K2_EVAL( c, src_dim, lambda_set_values, (int32_t i)->void { K2_DCHECK((i + 1 == src_dim || src_data[i + 1] <= src_data[i])); if (i + 1 == src_dim || src_data[i + 1] < src_data[i]) ans_data[src_data[i] - 1] = i + 1; }); MonotonicDecreasingUpperBound(ans, &ans); return ans; } Array1<int32_t> InvertPermutation(const Array1<int32_t> &src) { ContextPtr &c = src.Context(); int32_t dim = src.Dim(); Array1<int32_t> ans(c, dim); const int32_t *src_data = src.Data(); int32_t *ans_data = ans.Data(); K2_EVAL( c, dim, lambda_set_ans, (int32_t i)->void { ans_data[src_data[i]] = i; }); return ans; } Array1<int32_t> RowSplitsToSizes(const Array1<int32_t> &row_splits) { K2_CHECK_GT(row_splits.Dim(), 0); ContextPtr &c = row_splits.Context(); int32_t num_rows = row_splits.Dim() - 1; Array1<int32_t> sizes(c, num_rows); const int32_t *row_splits_data = row_splits.Data(); int32_t *sizes_data = sizes.Data(); K2_EVAL( c, num_rows, lambda_set_sizes, (int32_t i)->void { sizes_data[i] = row_splits_data[i + 1] - row_splits_data[i]; }); return sizes; } // This is modified from RowSplitsToRowIdsKernel. // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void SizesToMergeMapKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, uint32_t *merge_map) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; #pragma unroll(4) for (; thread_this_row < row_length; thread_this_row += threads_per_row) merge_map[this_row_split + thread_this_row] = uint32_t(row) + uint32_t(num_rows) * uint32_t(thread_this_row); } Array1<uint32_t> SizesToMergeMap(ContextPtr c, const std::vector<int32_t> &sizes) { int32_t num_srcs = sizes.size(); ContextPtr cpu_context = GetCpuContext(); Array1<int32_t> row_splits_cpu(cpu_context, num_srcs + 1); int32_t *row_splits_cpu_data = row_splits_cpu.Data(); int32_t tot_size = 0; row_splits_cpu_data[0] = 0; for (int32_t i = 0; i < num_srcs; i++) { tot_size += sizes[i]; row_splits_cpu_data[i + 1] = tot_size; } Array1<uint32_t> ans(c, tot_size); if (c->GetDeviceType() == kCpu) { uint32_t *ans_data = ans.Data(); int32_t cur = 0; for (int32_t src = 0; src < num_srcs; src++) { int32_t begin = cur, // i.e. the previous end. end = row_splits_cpu_data[src + 1]; for (; cur != end; ++cur) { // the 'src' says which source this item came from, and (cur - begin) // is the position within that source. ans_data[cur] = uint32_t(src) + uint32_t(cur - begin) * uint32_t(num_srcs); } } return ans; } K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits = row_splits_cpu.To(c); int32_t *row_splits_data = row_splits.Data(); uint32_t *merge_map_data = ans.Data(); int32_t avg_elems_per_row = (tot_size + num_srcs - 1) / num_srcs, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_srcs * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL( SizesToMergeMapKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_srcs, threads_per_row, row_splits_data, tot_size, merge_map_data)); return ans; } } // namespace k2
fe2502eec21e730c84e02695eb671c7679a7591d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "scalar.h" __device__ double op(double d1,double d2,double *params) { if(d1 < d2) return 1; return 0; } extern "C" __global__ void min_scalar_double(int n, int idx,double dx,double *dy,int incy,double *params,double *result) { transform(n,idx,dx,dy,incy,params,result); }
fe2502eec21e730c84e02695eb671c7679a7591d.cu
#include "scalar.h" __device__ double op(double d1,double d2,double *params) { if(d1 < d2) return 1; return 0; } extern "C" __global__ void min_scalar_double(int n, int idx,double dx,double *dy,int incy,double *params,double *result) { transform(n,idx,dx,dy,incy,params,result); }
37551908d0929e224deba797372fb81c57f6a861.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void generate_decrypted(int *pDataPointer , int *pRandomData , int *pEncryptedData , long long int pSize) { long long int index = blockIdx.x * blockDim.x + threadIdx.x; if( index <=(pSize /sizeof(int) )) { (*(pEncryptedData+index)) = (*(pDataPointer+ index))^(*(pRandomData+index)); } else return; }
37551908d0929e224deba797372fb81c57f6a861.cu
#include "includes.h" __global__ void generate_decrypted(int *pDataPointer , int *pRandomData , int *pEncryptedData , long long int pSize) { long long int index = blockIdx.x * blockDim.x + threadIdx.x; if( index <=(pSize /sizeof(int) )) { (*(pEncryptedData+index)) = (*(pDataPointer+ index))^(*(pRandomData+index)); } else return; }
3af5ac076a877b346a256e77673a82ef8d20c17d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ hipComplex helvp(hipComplex z) { hipComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ hipComplex lanna(hipComplex z) { hipComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ hipComplex harva(hipComplex z) { hipComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ hipComplex herve(hipComplex z) { hipComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ hipComplex alver(hipComplex z) { hipComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex alvir(hipComplex z) { hipComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex hexva(int m, hipComplex z) { hipComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex ahilv(hipComplex z) { hipComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex aciwa(hipComplex z) { hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thy(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex origo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale =10; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(1.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex vue = cue; hipComplex rhuva(3.0,0.0); hipComplex rarva(3.0,0.0); hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); for(v=0;v<20;v++) { cue =(cue - uon/(cue-conj(aon/conj(cue - uon/conj(cue)))))/(cue + aon/(cue+conj(uon/conj(cue + aon/conj(cue))))); cue =(cue - ai*uon/(cue-ai*conj(aon/conj(cue - uon/conj(cue)))))/(cue + ai* aon/(cue+ai*conj(uon/conj(cue + aon/conj(cue))))); accume = accume * (unity-aon*helva(cue)); } cue = accume; double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
3af5ac076a877b346a256e77673a82ef8d20c17d.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ cuComplex helvp(cuComplex z) { cuComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ cuComplex lanna(cuComplex z) { cuComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ cuComplex harva(cuComplex z) { cuComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ cuComplex herve(cuComplex z) { cuComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ cuComplex alver(cuComplex z) { cuComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex alvir(cuComplex z) { cuComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex hexva(int m, cuComplex z) { cuComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex ahilv(cuComplex z) { cuComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex aciwa(cuComplex z) { cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thy(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex origo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale =10; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(1.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex vue = cue; cuComplex rhuva(3.0,0.0); cuComplex rarva(3.0,0.0); cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); for(v=0;v<20;v++) { cue =(cue - uon/(cue-conj(aon/conj(cue - uon/conj(cue)))))/(cue + aon/(cue+conj(uon/conj(cue + aon/conj(cue))))); cue =(cue - ai*uon/(cue-ai*conj(aon/conj(cue - uon/conj(cue)))))/(cue + ai* aon/(cue+ai*conj(uon/conj(cue + aon/conj(cue))))); accume = accume * (unity-aon*helva(cue)); } cue = accume; double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
63c5dbe2c9ed3c27c42bd4b106f3481ba14587b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Hologram generating algorithms for CUDA Devices Copyright 2009, 2010, 2011, 2012 Martin Persson [email protected] Small edits by Lloyd Russell 2016 This file is part of GenerateHologramCUDA. GenerateHologramCUDA is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. GenerateHologramCUDA is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with GenerateHologramCUDA. If not, see <http://www.gnu.org/licenses/>. The function "GenerateHologram" contains two different algorithms for hologram generation. The last parameter in the function call selects which one to use: 0: Complex addition of "Lenses and Prisms", no optimization (3D) 1: Weighted Gerchberg-Saxton algorithm using Fresnel propagation (3D) 2: Weighted Gerchberg-Saxton algorithm using Fast Fourier Transforms (2D) (0) produces optimal holograms for 1 or 2 traps and is significantly faster. (0) is automatically selected if the number of spots is < 3. Fresnel propagation based algorithm (1) described in: Roberto Di Leonardo, Francesca Ianni, and Giancarlo Ruocco "Computer generation of optimal holograms for optical trap arrays" Opt. Express 15, 1913-1922 (2007) The original algorithm has been modified to allow variable spot amplitudes Naming convention for variables: The prefix indicates where data is located In host functions: h = host memory d = device memory c = constant memory In global functions: g = global memory s = shared memory c = constant memory no prefix = registers The suffix indicates the data type, no suffix usually indicates an integer Possible improvements: * Improve convergence of the GS algorithms for 2 spots. *done * Compensate spot intensities for distance from center of field. *done * Put all arguments for device functions and trap positions in constant memory. *done (Requires all functions to be moved into the same file or the use of some workaround found on nVidia forum) * Put pSLMstart and aLaser in texture memory (may not improve performance on Fermi devices) * Use "zero-copy" to transfer pSLM to host. * Rename functions and variables for consistency and readability * Allow variable spot phases for Lenses and Prisms */ //#define M_CUDA_DEBUG //activates a number of custom debug macros// float dt_milliseconds; hipEvent_t start, stop; //Includes #include <stdlib.h> #include <stdio.h> #include "stdint.h" #include <string.h> #include <math.h> #include <hipfft.h> #ifndef M_PI #define M_PI 3.14159265358979323846f #endif #define MAX_SPOTS 1000 //decrease this if your GPU keeps running out of memory, was 1024 #define BLOCK_SIZE 512 //should be a power of 2, was 512 #define SLM_SIZE 2048 #if ((SLM_SIZE==16)||(SLM_SIZE==32)||(SLM_SIZE==64)||(SLM_SIZE==128)||(SLM_SIZE==256)||(SLM_SIZE==512)||(SLM_SIZE==1024)||(SLM_SIZE==2048)) #define SLMPOW2 //Uses bitwize modulu operations if teh SLM size is a power of 2 #endif // forward declarations __global__ void ApplyCorrections(uint16_t *g_pSLM_uc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f); __global__ void LensesAndPrisms(uint16_t *g_SLMuc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f); __global__ void calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained); __global__ void PropagateToSLM_Fresnel(float *g_spotRe_f, float *g_spotIm_f, float *g_pSLM2pi, float *g_weights, int iteration, float *g_pSLMstart, float *g_amps, bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f); __global__ void PropagateToSLMDC_Fresnel(float *g_pSpot, float *g_wSpot, hipfftComplex *g_cSLM_cc, float *g_pSLM_f, int iteration, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc); __global__ void setActiveRegionToZero(hipfftComplex *g_Farfield); __global__ void PropagateToSpotPositions_Fresnel(float *g_pSLM2pi, float *g_spotRe_f, float *g_spotIm_f); __global__ void PropagateToSpotPositionsDC_Fresnel(float *g_pSLM_f, float *g_obtainedPhase, float *g_weights, float *g_Iobtained, int iteration); __global__ void ReplaceAmpsSLM_FFT(float *g_aLaser, hipfftComplex *g_cAmp, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f); __global__ void ReplaceAmpsSpots_FFT(hipfftComplex *g_cSpotAmp_cc, hipfftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration); __global__ void ReplaceAmpsSpotsDC_FFT(hipfftComplex *g_cSpotAmp_cc, hipfftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration); __global__ void XYtoIndex(); __global__ void f2uc(uint16_t *uc, float *f, int N_pixels, uint16_t *g_LUT, int use_linLUT, int data_w); __global__ void uc2f(float *f, uint16_t *uc, int N); __global__ void p2c(hipfftComplex *g_c, float *g_p, int M); inline int computeAndCopySpotData(float *h_I, float *x, float *y, float *z, int N_spots, int method); // Custom debug macros #define M_CHECK_ERROR() mCheckError(__LINE__, __FILE__) #define M_SAFE_CALL(errcode) mSafeCall(errcode, __LINE__, __FILE__) #define M_CUFFT_SAFE_CALL(cuffterror) mCufftSafeCall(cuffterror, __LINE__, __FILE__) #define M_DISPLAY_DATA_F(data, length) mDisplayDataF(data, length, __LINE__) #define M_DISPLAY_DATA_UC(data, length) mDisplayDataUC(data, length, __LINE__) #define M_DISPLAY_DATA_CC(data, length) mDisplayDataCC(data, length, __LINE__) #define M_DISPLAY_DATA_I(data, length) mDisplayDataI(data, length, __LINE__) inline void mSafeCall(hipError_t status, int line, const char *file); inline void mCufftSafeCall(hipfftResult_t status, int line, const char *file); inline void mCheckError(int line, const char *file); inline void mDisplayDataF(float *d_data, int length, int line); inline void mDisplayDataCC(hipfftComplex *d_data, int length, int line); inline void mDisplayDataUC(uint16_t *d_data, int length, int line); inline void mDisplayDataI(int *d_data, int length, int line); //Global declaration float *d_x, *d_y, *d_z, *d_I; //trap coordinates and intensity in GPU memory float *d_pSLM_f; //the optimized pSpot pattern, float [-pi, pi] float *d_weights, *d_Iobtained, *d_desiredAmp; //used h_weights and calculated amplitudes for each spot and each iteration float *d_pSLMstart_f; //Initial pSpot pattern [-pi, pi] float *d_spotRe_f, *d_spotIm_f; float *d_AberrationCorr_f = NULL; float *d_LUTPolCoeff_f = NULL; float SLMsizef = (float)SLM_SIZE; int N_PolLUTCoeff = 0; int n_blocks_Phi, memsize_SLM_f, memsize_SLMuc, memsize_spotsf, data_w, N_pixels, N_iterations_last; float h_desiredAmp[MAX_SPOTS]; int h_spotIndex[MAX_SPOTS]; uint16_t *d_pSLM_uc; //The optimized pSpot pattern, uint16_t, the one sent to the SLM [0, 65535] uint16_t *h_LUT_uc; uint16_t *d_LUT_uc = NULL; int maxThreads_device; bool ApplyLUT_b = false, EnableSLM_b = false, UseAberrationCorr_b = false, UsePolLUT_b = false, saveI_b = false, useRPC_b = false, useDC_b = false; float alphaRPC_f = 10; char CUDAmessage[100]; hipError_t status; float *d_aLaserFFT, *d_LUT_coeff; hipfftHandle plan; hipfftComplex *d_FFTo_cc, *d_FFTd_cc, *d_SLM_cc; int *d_spot_index, memsize_SLMcc; int borderWidthDC_i; float *d_obtainedPhase; //Constant memory declarations __device__ __constant__ int c_data_w[1]; __device__ __constant__ float c_data_w_f[1]; __device__ __constant__ int c_half_w[1]; __device__ __constant__ float c_half_w_f[1]; __device__ __constant__ int c_N_pixels[1]; __device__ __constant__ float c_N_pixels_f[1]; __device__ __constant__ float c_SLMpitch_f[1]; __device__ __constant__ bool c_useDC_b[1]; __device__ __constant__ int c_DCborderWidth[1]; __device__ __constant__ bool c_useRPC_b[1]; __device__ __constant__ float c_alphaRPC_f[1]; __device__ __constant__ bool c_saveI_b[1]; __device__ __constant__ int c_log2data_w[1]; __device__ __constant__ float c_x[MAX_SPOTS]; __device__ __constant__ float c_y[MAX_SPOTS]; __device__ __constant__ float c_z[MAX_SPOTS]; __device__ __constant__ float c_desiredAmp[MAX_SPOTS]; __device__ __constant__ int c_spotIndex[MAX_SPOTS]; __device__ __constant__ int c_N_spots[1]; //Public dll functions //Generate a hologram extern "C" __declspec(dllexport) int GenerateHologram(float *h_checkData, uint16_t *h_pSLM_uc, float *x_spots, float *y_spots, float *z_spots, float *I_spots, int N_spots, int N_iterations, float *h_Iobtained, int method)//, float* gpuTime) { //*gpuTime = 0; //float deltaTime = 0; if (N_spots > MAX_SPOTS) N_spots = MAX_SPOTS; else if (N_spots < 1) method = 100; else if (N_spots < 3) method = 0; memsize_spotsf = N_spots*sizeof(float); method = computeAndCopySpotData(I_spots, x_spots, y_spots, z_spots, N_spots, method); //sets method to -1 if N_spots == 0. switch (method) { case 0: ////// //Generate the hologram using "Lenses and Prisms" ////// hipLaunchKernelGGL(( LensesAndPrisms), dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f); M_CHECK_ERROR(); hipDeviceSynchronize(); M_CHECK_ERROR(); if (saveI_b) { hipLaunchKernelGGL(( calculateIobtained), dim3(N_spots), dim3(SLM_SIZE), 0, 0, d_pSLM_uc, d_Iobtained); M_CHECK_ERROR(); hipDeviceSynchronize(); M_SAFE_CALL(hipMemcpy(h_Iobtained, d_Iobtained, N_spots*sizeof(float), hipMemcpyDeviceToHost)); } M_SAFE_CALL(hipMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, hipMemcpyDeviceToHost)); break; case 1: //Generate holgram using fresnel propagation //Uncomment this to start with pre-calculated hologram: //hipMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, hipMemcpyHostToDevice); //hipDeviceSynchronize(); //uc2f<<< n_blocks_Phi, BLOCK_SIZE >>>(d_pSLM_f, d_pSLM_uc, N_pixels); /*hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipEventSynchronize(start);*/ for (int l=0; l<N_iterations; l++) { //Propagate to the spot positions if (useDC_b) { M_CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_SLM_cc, d_FFTo_cc, HIPFFT_FORWARD)); M_CHECK_ERROR(); hipLaunchKernelGGL(( PropagateToSpotPositionsDC_Fresnel), dim3(N_spots), dim3(SLM_SIZE), 0, 0, d_pSLM_f, d_obtainedPhase, d_weights, d_Iobtained, l); //this function is very slow M_CHECK_ERROR(); hipLaunchKernelGGL(( setActiveRegionToZero), dim3(SLM_SIZE), dim3(SLM_SIZE) , 0, 0, d_FFTo_cc); } else hipLaunchKernelGGL(( PropagateToSpotPositions_Fresnel), dim3(N_spots), dim3(SLM_SIZE), 0, 0, d_pSLM_f, d_spotRe_f, d_spotIm_f); M_CHECK_ERROR(); hipDeviceSynchronize(); //Propagate to the SLM plane if (useDC_b) { M_CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_FFTo_cc, d_SLM_cc, HIPFFT_BACKWARD)); hipDeviceSynchronize(); hipLaunchKernelGGL(( PropagateToSLMDC_Fresnel), dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_obtainedPhase, d_weights, d_SLM_cc, d_pSLM_f, l, d_pSLMstart_f, (l==(N_iterations-1)), d_pSLM_uc); } else { hipLaunchKernelGGL(( PropagateToSLM_Fresnel), dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_spotRe_f, d_spotIm_f, d_pSLM_f, d_weights, l, d_pSLMstart_f, d_Iobtained, (l==(N_iterations-1)), d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f); } M_CHECK_ERROR(); hipDeviceSynchronize(); } /*hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&deltaTime, start, stop); *gpuTime = deltaTime; */ if (saveI_b) M_SAFE_CALL(hipMemcpy(h_Iobtained, d_Iobtained, N_spots*(N_iterations)*sizeof(float), hipMemcpyDeviceToHost)); else M_SAFE_CALL(hipMemcpy(h_Iobtained, d_weights, N_spots*(N_iterations)*sizeof(float), hipMemcpyDeviceToHost)); M_SAFE_CALL(hipMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, hipMemcpyDeviceToHost)); break; case 2: //generate hologram using fast fourier transforms //Uncomment this to start with pre-calculated hologram: //hipMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, hipMemcpyHostToDevice); //hipDeviceSynchronize(); //p_uc2c_cc_shift<<< n_blocks_Phi, BLOCK_SIZE >>>(d_SLM_cc, d_pSLM_uc, N_pixels, data_w); //M_DISPLAY_DATA_CC(d_SLM_cc, 100); M_SAFE_CALL(hipMemcpy(d_desiredAmp, h_desiredAmp, memsize_spotsf, hipMemcpyHostToDevice)); M_SAFE_CALL(hipMemset(d_FFTd_cc, 0, memsize_SLMcc)); M_CHECK_ERROR(); hipDeviceSynchronize(); for (int l=0; l<N_iterations; l++) { // Transform to trapping plane M_CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_SLM_cc, d_FFTo_cc, HIPFFT_FORWARD)); hipDeviceSynchronize(); // Copy phases for spot indices in d_FFTo_cc to d_FFTd_cc if (useDC_b) hipLaunchKernelGGL(( ReplaceAmpsSpotsDC_FFT) , dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_FFTo_cc, d_FFTd_cc, l, d_Iobtained, d_weights, (l==(N_iterations-1))); else hipLaunchKernelGGL(( ReplaceAmpsSpots_FFT) , dim3(1), dim3(N_spots) , 0, 0, d_FFTo_cc, d_FFTd_cc, l, d_Iobtained, d_weights, (l==(N_iterations-1))); M_CHECK_ERROR(); hipDeviceSynchronize(); //Transform back to SLM plane M_CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_FFTd_cc, d_SLM_cc, HIPFFT_BACKWARD)); hipDeviceSynchronize(); //M_DISPLAY_DATA_CC(d_SLM_cc, 100); // Set amplitudes in d_SLM to the laser amplitude profile hipLaunchKernelGGL(( ReplaceAmpsSLM_FFT) , dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_aLaserFFT, d_SLM_cc, d_pSLMstart_f, (l==(N_iterations-1)), d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f); M_CHECK_ERROR(); //M_DISPLAY_DATA_CC(d_SLM_cc, 100); hipDeviceSynchronize(); } if (saveI_b) M_SAFE_CALL(hipMemcpy(h_Iobtained, d_Iobtained, N_spots*(N_iterations)*sizeof(float), hipMemcpyDeviceToHost)); else M_SAFE_CALL(hipMemcpy(h_Iobtained, d_weights, N_spots*(N_iterations)*sizeof(float), hipMemcpyDeviceToHost)); M_SAFE_CALL(hipMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, hipMemcpyDeviceToHost)); break; default: break; } //Handle CUDA errors status = hipGetLastError(); return status; } //Allocate GPU memory extern "C" __declspec(dllexport) int startCUDA(float *h_pSLMstart, int deviceId) { //Make sure GPU with desired deviceId exists, set deviceId to 0 if not int deviceCount=0; if (hipGetDeviceCount(&deviceCount)!=0) if (deviceId>=deviceCount) { deviceId=0; } M_SAFE_CALL(hipSetDevice(deviceId)); hipDeviceProp_t deviceProp; M_SAFE_CALL(hipGetDeviceProperties(&deviceProp, deviceId)); maxThreads_device = deviceProp.maxThreadsPerBlock; borderWidthDC_i = 0; int MaxIterations = 1000; data_w = SLM_SIZE; hipMemcpyToSymbol(c_data_w, &data_w, sizeof(int), 0, hipMemcpyHostToDevice); float data_w_f = (float)data_w; hipMemcpyToSymbol(c_data_w_f, &data_w_f, sizeof(float), 0, hipMemcpyHostToDevice); int half_w = (int)(data_w/2); hipMemcpyToSymbol(c_half_w, &half_w, sizeof(int), 0, hipMemcpyHostToDevice); float half_w_f = (float)data_w/2.0f; hipMemcpyToSymbol(c_half_w_f, &half_w_f, sizeof(float), 0, hipMemcpyHostToDevice); N_pixels = data_w * data_w; hipMemcpyToSymbol(c_N_pixels, &N_pixels, sizeof(int), 0, hipMemcpyHostToDevice); float N_pixels_f = (float)N_pixels; hipMemcpyToSymbol(c_N_pixels_f, &N_pixels_f, sizeof(float), 0, hipMemcpyHostToDevice); int logN = (int)(log2(data_w_f)); hipMemcpyToSymbol(c_log2data_w, &logN, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_useRPC_b, &useRPC_b, sizeof(bool), 0, hipMemcpyHostToDevice); float SLMpitch_f = 1.0f/data_w_f; hipMemcpyToSymbol(c_SLMpitch_f, &SLMpitch_f, sizeof(float), 0, hipMemcpyHostToDevice); N_iterations_last = 10; memsize_spotsf = MAX_SPOTS * sizeof(float); memsize_SLM_f = N_pixels * sizeof(float); memsize_SLMuc = N_pixels * sizeof(uint16_t); memsize_SLMcc = N_pixels * sizeof(hipfftComplex); n_blocks_Phi = (N_pixels/BLOCK_SIZE + (N_pixels%BLOCK_SIZE == 0 ? 0:1)); //memory allocations for all methods M_SAFE_CALL(hipMalloc((void**)&d_x, memsize_spotsf )); M_SAFE_CALL(hipMalloc((void**)&d_y, memsize_spotsf )); M_SAFE_CALL(hipMalloc((void**)&d_z, memsize_spotsf )); M_SAFE_CALL(hipMalloc((void**)&d_I, memsize_spotsf )); M_SAFE_CALL(hipMalloc((void**)&d_desiredAmp, memsize_spotsf )); M_SAFE_CALL(hipMalloc((void**)&d_weights, MAX_SPOTS*(MaxIterations+1)*sizeof(float))); M_SAFE_CALL(hipMalloc((void**)&d_Iobtained, MAX_SPOTS*MaxIterations*sizeof(float))); M_SAFE_CALL(hipMalloc((void**)&d_obtainedPhase, memsize_spotsf )); M_SAFE_CALL(hipMalloc((void**)&d_spotRe_f, memsize_spotsf )); M_SAFE_CALL(hipMalloc((void**)&d_spotIm_f, memsize_spotsf )); int data_w_pow2 = pow(2, ceil(log((float)data_w)/log(2.0f))); M_SAFE_CALL(hipMalloc((void**)&d_pSLM_f, data_w_pow2*data_w_pow2*sizeof(float)));//the size of d_pSLM_f must be a power of 2 for the summation algorithm to work M_SAFE_CALL(hipMemset(d_pSLM_f, 0, data_w_pow2*data_w_pow2*sizeof(float))); M_SAFE_CALL(hipMalloc((void**)&d_pSLMstart_f, memsize_SLM_f)); M_SAFE_CALL(hipMalloc((void**)&d_pSLM_uc, memsize_SLMuc)); M_SAFE_CALL(hipMemset(d_pSLMstart_f, 0, N_pixels*sizeof(float))); M_SAFE_CALL(hipMemcpy(d_pSLM_f, h_pSLMstart, N_pixels*sizeof(float), hipMemcpyHostToDevice)); //memory allocations etc. for all FFT based Gerchberg-Saxton M_SAFE_CALL(hipMalloc((void**)&d_spot_index, MAX_SPOTS * sizeof(int))); M_SAFE_CALL(hipMalloc((void**)&d_FFTd_cc, memsize_SLMcc)); M_SAFE_CALL(hipMalloc((void**)&d_FFTo_cc, memsize_SLMcc)); M_SAFE_CALL(hipMalloc((void**)&d_SLM_cc, memsize_SLMcc)); M_SAFE_CALL(hipDeviceSynchronize()); hipLaunchKernelGGL(( p2c) , dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_SLM_cc, d_pSLM_f, N_pixels); M_CHECK_ERROR(); hipDeviceSynchronize(); M_CUFFT_SAFE_CALL(hipfftPlan2d(&plan, data_w, data_w, HIPFFT_C2C)); float *h_aLaserFFT = (float *)malloc(memsize_SLM_f); status = hipGetLastError(); return status; } //Free GPU memory and shut down SLM extern "C" __declspec(dllexport) int stopCUDA() { M_SAFE_CALL(hipFree(d_x)); M_SAFE_CALL(hipFree(d_y)); M_SAFE_CALL(hipFree(d_z)); M_SAFE_CALL(hipFree(d_I)); M_SAFE_CALL(hipFree(d_weights)); M_SAFE_CALL(hipFree(d_Iobtained)); M_SAFE_CALL(hipFree(d_pSLM_f)); M_SAFE_CALL(hipFree(d_pSLMstart_f)); M_SAFE_CALL(hipFree(d_pSLM_uc)); M_SAFE_CALL(hipFree(d_FFTd_cc)); M_SAFE_CALL(hipFree(d_FFTo_cc)); M_SAFE_CALL(hipFree(d_SLM_cc)); M_CUFFT_SAFE_CALL(hipfftDestroy(plan)); hipDeviceReset(); status = hipGetLastError(); return status; } //Device functions __device__ float uc2phase(float uc) { return (float)uc*2.0f*M_PI/65536.0f - M_PI; } __device__ uint16_t phase2uc(float phase2pi) { return (uint16_t)floor((phase2pi + M_PI)*65536.0f / (2.0f * M_PI)); } __device__ int phase2int32(float phase2pi) { return (int)floor((phase2pi + M_PI)*65536.0f / (2.0f * M_PI)); } __device__ float ApplyAberrationCorrection(float pSpot, float correction) { pSpot = pSpot - correction; //apply correction return (pSpot - (2.0f*M_PI) * floor((pSpot+M_PI) / (2.0f*M_PI))); //apply mod([-pi, pi], pSpot) } __device__ int getXint(int index) { #ifdef SLMPOW2 int X_int = index&(c_data_w[0]-1); #else float X_int= index%c_data_w[0]; #endif return X_int; } __device__ int getYint(int index, int X_int) { #ifdef SLMPOW2 int Y_int = (index-X_int)>>c_log2data_w[0]; #else int Y_int = (float)(floor((float)index/c_data_w_f[0])); #endif return Y_int; } __device__ int fftshift(int idx, int X, int Y) { if (X < c_half_w[0]) { if (Y < c_half_w[0]) { return idx + (c_data_w[0] * c_half_w[0]) + c_half_w[0]; } else { return idx - (c_data_w[0] * c_half_w[0]) + c_half_w[0]; } } else { if (Y < c_half_w[0]) { return idx + (c_data_w[0] * c_half_w[0]) - c_half_w[0]; } else { return idx - (c_data_w[0] * c_half_w[0]) - c_half_w[0]; } } } __device__ void warpReduceC(volatile float *s_Vre, volatile float *s_Vim, int tid) { s_Vre[tid] += s_Vre[tid + 32]; s_Vim[tid] += s_Vim[tid + 32]; s_Vre[tid] += s_Vre[tid + 16]; s_Vim[tid] += s_Vim[tid + 16]; s_Vre[tid] += s_Vre[tid + 8]; s_Vim[tid] += s_Vim[tid + 8]; s_Vre[tid] += s_Vre[tid + 4]; s_Vim[tid] += s_Vim[tid + 4]; s_Vre[tid] += s_Vre[tid + 2]; s_Vim[tid] += s_Vim[tid + 2]; s_Vre[tid] += s_Vre[tid + 1]; s_Vim[tid] += s_Vim[tid + 1]; } inline int computeAndCopySpotData(float *h_I, float *x, float *y, float *z, int N_spots, int method) { //float Isum = 0.0f; //for (int i = 0; i<N_spots; i++) // Isum += h_I[i]; for (int j = 0; j<N_spots; j++) { float sincx_rec = (x[j]==0)? 1.0f:((M_PI*x[j]/SLMsizef)/sinf(M_PI*x[j]/SLMsizef)); float sincy_rec = (y[j]==0)? 1.0f:((M_PI*y[j]/SLMsizef)/sinf(M_PI*y[j]/SLMsizef)); h_desiredAmp[j] = (h_I[j] <= 0.0f) ? 1.0f:(sincx_rec * sincy_rec * sqrtf(h_I[j]/100)*SLMsizef*SLMsizef); if (method == 2) h_spotIndex[j] = ((int)(x[j])&(data_w-1)) + ((int)(y[j])&(data_w-1))* data_w; } hipMemcpyToSymbol(c_x, x, N_spots*sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_y, y, N_spots*sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_z, z, N_spots*sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_desiredAmp, h_desiredAmp, N_spots*sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_N_spots, &N_spots, sizeof(int), 0, hipMemcpyHostToDevice); if (method == 2) hipMemcpyToSymbol(c_spotIndex, h_spotIndex, N_spots*sizeof(int), 0, hipMemcpyHostToDevice); if (N_spots == 0) method = -1; return method; } //Apply corrections to precalculated hologram __global__ void ApplyCorrections(uint16_t *g_pSLM_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f) { int idx = blockIdx.x * blockDim.x + threadIdx.x; float pSLM2pi_f = uc2phase(g_pSLM_uc[idx]); g_pSLM_uc[idx] = phase2uc(pSLM2pi_f); } //Calculate hologram using "Lenses and Prisms" __global__ void LensesAndPrisms(uint16_t *g_SLMuc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < c_N_pixels[0]) { //get pixel coordinates int X_int = getXint(idx); int Y_int = getYint(idx, X_int); float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]); float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]); float phase2pi; float SLMre = 0.0f; float SLMim = 0.0f; for (int ii=0; ii<c_N_spots[0]; ++ii) { //add variable phases to function call phase2pi = M_PI * c_z[ii] * (X*X + Y*Y) + 2.0f * M_PI * (X * (c_x[ii]) + Y * (c_y[ii]) ); SLMre = SLMre + c_desiredAmp[ii] * cosf(phase2pi); SLMim = SLMim + c_desiredAmp[ii] * sinf(phase2pi); } phase2pi = atan2f(SLMim, SLMre); // [-pi,pi] g_SLMuc[idx] = phase2uc(phase2pi); } } __global__ void calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained) { int blockSize = c_data_w[0]; int spot_number = blockIdx.x; int tid = threadIdx.x; int i = tid; __shared__ float s_Vre[SLM_SIZE]; __shared__ float s_Vim[SLM_SIZE]; s_Vre[tid] = 0.0f; s_Vim[tid] = 0.0f; float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]); float Y = - c_SLMpitch_f[0] * c_half_w_f[0]; float pSLM_1; float p; while (i < c_N_pixels[0]) { pSLM_1 = 2.0f*M_PI*(float)g_pSLM_uc[i]/65535.0f - M_PI; p = pSLM_1 - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2 * (X * c_x[spot_number] + Y * c_y[spot_number])); s_Vre[tid] += cosf(p); s_Vim[tid] += sinf(p); i += blockSize; Y += c_SLMpitch_f[0]; } /*__syncthreads(); if (tid < 512) { s_Vre[tid] += s_Vre[tid + 512]; s_Vim[tid] += s_Vim[tid + 512]; } */ __syncthreads(); if ((tid < 256)&&(SLM_SIZE>256)) { s_Vre[tid] += s_Vre[tid + 256]; s_Vim[tid] += s_Vim[tid + 256]; } __syncthreads(); if (tid < 128) { s_Vre[tid] += s_Vre[tid + 128]; s_Vim[tid] += s_Vim[tid + 128]; } __syncthreads(); if (tid < 64) { s_Vre[tid] += s_Vre[tid + 64]; s_Vim[tid] += s_Vim[tid + 64]; } __syncthreads(); if (tid < 32) warpReduceC(s_Vre, s_Vim, tid); if (tid == 0) { float spotRe_f = s_Vre[0] / c_N_pixels_f[0]; //512! float spotIm_f = s_Vim[0] / c_N_pixels_f[0]; float amp = hypotf(spotRe_f, spotIm_f); g_Iobtained[spot_number] = amp*amp; } } __global__ void calculateIandPhase(uint16_t *g_pSLM_uc, float *g_Iobtained, float *g_Pobtained) { int blockSize = c_data_w[0]; int spot_number = blockIdx.x; int tid = threadIdx.x; int i = tid; __shared__ float s_Vre[SLM_SIZE]; __shared__ float s_Vim[SLM_SIZE]; s_Vre[tid] = 0.0f; s_Vim[tid] = 0.0f; float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]); float Y = - c_SLMpitch_f[0] * c_half_w_f[0]; float pSLM_1; float p; while (i < c_N_pixels[0]) { pSLM_1 = 2.0f*M_PI*(float)g_pSLM_uc[i]/65535.0f - M_PI; p = pSLM_1 - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2 * (X * c_x[spot_number] + Y * c_y[spot_number])); s_Vre[tid] += cosf(p+2*M_PI*c_z[spot_number]); s_Vim[tid] += sinf(p+2*M_PI*c_z[spot_number]); i += blockSize; Y += c_SLMpitch_f[0]; } /*__syncthreads(); if (tid < 512) { s_Vre[tid] += s_Vre[tid + 512]; s_Vim[tid] += s_Vim[tid + 512]; } */ __syncthreads(); if ((tid < 256)&&(SLM_SIZE>256)) { s_Vre[tid] += s_Vre[tid + 256]; s_Vim[tid] += s_Vim[tid + 256]; } __syncthreads(); if (tid < 128) { s_Vre[tid] += s_Vre[tid + 128]; s_Vim[tid] += s_Vim[tid + 128]; } __syncthreads(); if (tid < 64) { s_Vre[tid] += s_Vre[tid + 64]; s_Vim[tid] += s_Vim[tid + 64]; } __syncthreads(); if (tid < 32) warpReduceC(s_Vre, s_Vim, tid); if (tid == 0) { float spotRe_f = s_Vre[0] / c_N_pixels_f[0]; //512! float spotIm_f = s_Vim[0] / c_N_pixels_f[0]; float amp = hypotf(spotRe_f, spotIm_f); g_Pobtained[spot_number] = atan2f(spotIm_f , spotRe_f); g_Iobtained[spot_number] = amp*amp; } } //Functions for GS with Fresnel propagation //Propagate from the SLM to the spot positions using Fresnel summation //works only for blocksize = SLMsize __global__ void PropagateToSpotPositions_Fresnel(float *g_pSLM2pi, float *g_spotRe_f, float *g_spotIm_f) { int spot_number = blockIdx.x; int tid = threadIdx.x; int i = tid; __shared__ float s_Vre[SLM_SIZE]; __shared__ float s_Vim[SLM_SIZE]; s_Vre[tid] = 0.0f; s_Vim[tid] = 0.0f; int blockSize = blockDim.x; float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]); float Y = - c_SLMpitch_f[0] * c_half_w_f[0]; float p; while (i < c_N_pixels[0]) { p = g_pSLM2pi[i] - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2.0f * (X * c_x[spot_number] + Y * c_y[spot_number])); s_Vre[tid] += cosf(p); s_Vim[tid] += sinf(p); i += blockSize; Y += c_SLMpitch_f[0]; } /*__syncthreads(); if (tid < 512) { s_Vre[tid] += s_Vre[tid + 512]; s_Vim[tid] += s_Vim[tid + 512]; } */ __syncthreads(); if ((tid < 256)&&(SLM_SIZE>256)) { s_Vre[tid] += s_Vre[tid + 256]; s_Vim[tid] += s_Vim[tid + 256]; } __syncthreads(); if (tid < 128) { s_Vre[tid] += s_Vre[tid + 128]; s_Vim[tid] += s_Vim[tid + 128]; } __syncthreads(); if (tid < 64) { s_Vre[tid] += s_Vre[tid + 64]; s_Vim[tid] += s_Vim[tid + 64]; } __syncthreads(); if (tid < 32) warpReduceC(s_Vre, s_Vim, tid); if (tid == 0) { g_spotRe_f[spot_number] = s_Vre[0];// / c_N_pixels_f[0]; g_spotIm_f[spot_number] = s_Vim[0];// / c_N_pixels_f[0]; } } //Propagate from the SLM to the spot positions using Fresnel summation //works only for blocksize = SLMsize __global__ void PropagateToSpotPositionsDC_Fresnel(float *g_pSLM_f, float *g_obtainedPhase, float *g_weights, float *obtainedI, int iteration) { int spot_number = blockIdx.x; int tid = threadIdx.x; int i = tid; __shared__ float s_Vre[SLM_SIZE]; __shared__ float s_Vim[SLM_SIZE]; float X, Y; float p; s_Vre[tid] = 0.0f; s_Vim[tid] = 0.0f; int X_int = getXint(i); X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]); Y = -0.5f; while (i < c_N_pixels[0]) { p = g_pSLM_f[i] - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2.0f * (X * c_x[spot_number] + Y * c_y[spot_number])); s_Vre[tid] += cosf(p); s_Vim[tid] += sinf(p); Y += c_SLMpitch_f[0]; i += SLM_SIZE; } __syncthreads(); if ((tid < 256)&&(SLM_SIZE>256)) { s_Vre[tid] += s_Vre[tid + 256]; s_Vim[tid] += s_Vim[tid + 256]; } __syncthreads(); if (tid < 128) { s_Vre[tid] += s_Vre[tid + 128]; s_Vim[tid] += s_Vim[tid + 128]; } __syncthreads(); if (tid < 64) { s_Vre[tid] += s_Vre[tid + 64]; s_Vim[tid] += s_Vim[tid + 64]; } __syncthreads(); if (tid < 32) warpReduceC(s_Vre, s_Vim, tid); if (tid == 0) { g_obtainedPhase[spot_number] = atan2f(s_Vim[0], s_Vre[0]); float obtainedAmp = hypotf(s_Vre[0], s_Vim[0]); float desiredAmp = c_desiredAmp[spot_number]; if (iteration != 0) { g_weights[spot_number + c_N_spots[0]*iteration] = g_weights[spot_number + c_N_spots[0]*(iteration-1)] * (desiredAmp / obtainedAmp); } else { //obtainedAmp = (obtainedAmp<0.5f) ? 0.5f : obtainedAmp; g_weights[spot_number] = desiredAmp/c_N_pixels_f[0]; } if (c_saveI_b[0]) obtainedI[spot_number + c_N_spots[0]*iteration] = obtainedAmp*obtainedAmp/(desiredAmp*desiredAmp);//(c_N_pixels_f[0]*c_N_pixels_f[0]); } } //Obtain phases in SLM plane __global__ void PropagateToSLM_Fresnel(float *g_spotRe_f, float *g_spotIm_f, float *g_pSLM2pi, float *g_weights, int iteration, float *g_pSLMstart, float *g_Iobtained, bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; __shared__ float s_aSpot[MAX_SPOTS], s_aSpotsMean, s_weight[MAX_SPOTS], s_pSpot[MAX_SPOTS]; float reSLM = 0.0f, imSLM = 0.0f, pSLM2pi_f = 0.0f; if (idx<c_N_pixels[0]) { if (tid<c_N_spots[0]) { float spotRe_f = g_spotRe_f[tid]; float spotIm_f = g_spotIm_f[tid]; s_pSpot[tid] = atan2f(spotIm_f, spotRe_f); s_aSpot[tid] = hypotf(spotRe_f, spotIm_f)/c_desiredAmp[tid]; if (iteration != 0) s_weight[tid] = g_weights[tid + iteration*c_N_spots[0]]; else { s_aSpot[tid] = (s_aSpot[tid]<0.5f) ? 0.5f : s_aSpot[tid]; s_weight[tid] = c_desiredAmp[tid]; } } __syncthreads(); //compute weights if (tid==0) { float s_aSpot_sum = 0.0f; for (int jj=0; jj<c_N_spots[0];jj++) { s_aSpot_sum += s_aSpot[jj]; } s_aSpotsMean = s_aSpot_sum / (float)c_N_spots[0]; } __syncthreads(); if (tid<c_N_spots[0]) { s_weight[tid] = s_weight[tid] * s_aSpotsMean / s_aSpot[tid]; if (!getpSLM65535) //Copy weights to use as initial value next run g_weights[tid + c_N_spots[0]*(iteration+1)] = s_weight[tid]; //else // g_weights[tid] = s_weight[tid]; //Transferring weights to next run may give diverging weights if (c_saveI_b[0]) g_Iobtained[tid + c_N_spots[0]*iteration] = s_aSpot[tid]*s_aSpot[tid]; //may be excluded, used for monitoring only } __syncthreads(); //get pixel coordinates int X_int = getXint(idx); int Y_int = getYint(idx, X_int); float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]); float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]); //compute SLM pSpot by summing contribution from all spots for (int k=0; k<c_N_spots[0]; k++) { float delta = M_PI * c_z[k] * (X*X + Y*Y) + 2.0f * M_PI * (X * c_x[k] + Y * c_y[k]); reSLM += s_weight[k] * cosf(s_pSpot[k] + delta); imSLM += s_weight[k] * sinf(s_pSpot[k] + delta); } pSLM2pi_f = atan2f(imSLM, reSLM); if (c_useRPC_b[0]) //Apply RPC (restricted Phase Change) { float pSLMstart = g_pSLMstart[idx]; if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0]) pSLM2pi_f = pSLMstart; if (getpSLM65535) g_pSLMstart[idx] = pSLM2pi_f; } if (getpSLM65535) //Compute final SLM phases and write to global memory... g_pSLM65535_uc[idx] = phase2uc(pSLM2pi_f); g_pSLM2pi[idx] = pSLM2pi_f; //...or write intermediate pSpot to global memory } } //Obtain phases in SLM plane __global__ void PropagateToSLMDC_Fresnel(float *g_pSpot, float *g_wSpot, hipfftComplex *g_cSLM_cc, float *g_pSLM_f, int iteration, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; __shared__ float s_weight[MAX_SPOTS], s_pSpot[MAX_SPOTS]; float reSLM = 0.0f, imSLM = 0.0f, pSLM2pi_f = 0.0f; if (idx<c_N_pixels[0]) { if (tid<c_N_spots[0]) { s_pSpot[tid] = g_pSpot[tid]; s_weight[tid] = g_wSpot[tid+c_N_spots[0]*iteration]; } __syncthreads(); //get pixel coordinates int X_int = getXint(idx); int Y_int = getYint(idx, X_int); int shiftedidx = fftshift(idx, X_int, Y_int); float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]); float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]); //compute SLM pSpot by summing contribution from all spots for (int k=0; k<c_N_spots[0]; k++) { float delta = M_PI * c_z[k] * (X*X + Y*Y) + 2.0f * M_PI * (X * c_x[k] + Y * c_y[k]); reSLM += s_weight[k] * cosf(s_pSpot[k] + delta); imSLM += s_weight[k] * sinf(s_pSpot[k] + delta); } hipfftComplex cSLM_cc = g_cSLM_cc[shiftedidx]; reSLM += cSLM_cc.x/c_N_pixels_f[0]; imSLM += cSLM_cc.y/c_N_pixels_f[0]; pSLM2pi_f = atan2f(imSLM, reSLM); if (c_useRPC_b[0]) //Apply RPC (restricted Phase Change) { float pSLMstart = g_pSLMstart[shiftedidx]; if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0]) pSLM2pi_f = pSLMstart; if (getpSLM65535) g_pSLMstart[shiftedidx] = pSLM2pi_f; } g_pSLM_f[idx] = pSLM2pi_f; g_cSLM_cc[shiftedidx].x = cosf(pSLM2pi_f); g_cSLM_cc[shiftedidx].y = sinf(pSLM2pi_f); if (getpSLM65535) //Compute final SLM phases and write to global memory... g_pSLM65535_uc[idx] = phase2uc(pSLM2pi_f); } } //Clear inside the DC frame __global__ void setActiveRegionToZero(hipfftComplex *g_Farfield_cc) //this only works if blocksize = nblocks = SLMsize = 512 { int tid = threadIdx.x; int bid = blockIdx.x; int idx = bid * blockDim.x + tid; if (((tid < (c_half_w[0] - c_DCborderWidth[0]))||(tid > ((c_half_w[0]-1) + c_DCborderWidth[0])))&&((bid < (c_half_w[0] - c_DCborderWidth[0]))||(bid > ((c_half_w[0]-1) + c_DCborderWidth[0])))) { g_Farfield_cc[idx].x = 0.0f; g_Farfield_cc[idx].y = 0.0f; } } //Functions for GS with FFT propagation //Compute the phase in SLM pixels and set amplitude to unity or Laser amp __global__ void ReplaceAmpsSLM_FFT(float *g_aLaser, hipfftComplex *g_cAmp, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<c_N_pixels[0]) { float aLaser = 1.0f;//g_aLaser[idx]; hipfftComplex cAmp = g_cAmp[idx]; float pSLM2pi_f = atan2f(cAmp.y, cAmp.x); if (c_useRPC_b[0]) { float pSLMstart = g_pSLMstart[idx]; if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0]) pSLM2pi_f = pSLMstart; } if (getpSLM65535) { if (c_useRPC_b[0]) g_pSLMstart[idx] = pSLM2pi_f; //float phase65535; int X_int = getXint(idx); int Y_int = getYint(idx, X_int); int shiftedidx = fftshift(idx, X_int, Y_int); g_pSLM65535_uc[shiftedidx] = phase2uc(pSLM2pi_f); } g_cAmp[idx].x = aLaser*cosf(pSLM2pi_f); g_cAmp[idx].y = aLaser*sinf(pSLM2pi_f); } __syncthreads(); } //Adjust amplitudes in spot positions __global__ void ReplaceAmpsSpots_FFT(hipfftComplex *g_cSpotAmp_cc, hipfftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration) { int tid = threadIdx.x; int spotIndex; float pSpot; __shared__ float s_aSpot[MAX_SPOTS], s_ISpotsMeanSq; float weight; hipfftComplex cSpotAmp_cc; if (tid<c_N_spots[0]) { spotIndex = c_spotIndex[tid]; cSpotAmp_cc = g_cSpotAmp_cc[spotIndex]; pSpot = atan2f(cSpotAmp_cc.y, cSpotAmp_cc.x); s_aSpot[tid] = hypotf(cSpotAmp_cc.x, cSpotAmp_cc.y)/c_desiredAmp[tid]; if (iteration != 0) weight = g_weight[tid + iteration*c_N_spots[0]]; else { s_aSpot[tid] = (s_aSpot[tid]<0.5f) ? 0.5f : s_aSpot[tid]; weight = c_desiredAmp[tid]; } } __syncthreads(); //compute weights if (tid==0) { float ISpot_sum = 0.0f; for (int jj=0; jj<c_N_spots[0];jj++) { ISpot_sum += s_aSpot[jj]*s_aSpot[jj]; } s_ISpotsMeanSq = sqrtf(ISpot_sum / (float)c_N_spots[0]); //integer division!! } __syncthreads(); if (tid<c_N_spots[0]) { weight = weight * s_ISpotsMeanSq / s_aSpot[tid]; cSpotAmp_cc.x = cosf(pSpot) * weight; cSpotAmp_cc.y = sinf(pSpot) * weight; g_cSpotAmpNew_cc[spotIndex] = cSpotAmp_cc; if (last_iteration) g_weight[tid] = weight; else g_weight[c_N_spots[0] * (iteration + 1) + tid] = weight; if (c_saveI_b[0]) g_Iobtained[c_N_spots[0] * (iteration) + tid] = s_aSpot[tid]*s_aSpot[tid]; } } //Adjust amplitudes in spot positions __global__ void ReplaceAmpsSpotsDC_FFT(hipfftComplex *g_cSpotAmp_cc, hipfftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int spotIndex; float pSpot; //__shared__ float s_aSpot[MAX_SPOTS], s_ISpotsMeanSq; float weight; hipfftComplex cSpotAmp_cc; if (idx<c_N_spots[0]) { spotIndex = c_spotIndex[idx]; cSpotAmp_cc = g_cSpotAmp_cc[spotIndex]; pSpot = atan2f(cSpotAmp_cc.y, cSpotAmp_cc.x); float aSpot = hypotf(cSpotAmp_cc.x, cSpotAmp_cc.y)/c_desiredAmp[idx]; if (iteration != 0) weight = g_weight[idx + iteration*c_N_spots[0]]; else { aSpot = (aSpot<0.5f) ? 0.5f : aSpot; //ska det vara s hr med DC? weight = c_desiredAmp[idx]/(c_N_pixels_f[0]); } weight = weight / aSpot; cSpotAmp_cc.x = cosf(pSpot) * weight; cSpotAmp_cc.y = sinf(pSpot) * weight; g_cSpotAmpNew_cc[spotIndex] = cSpotAmp_cc; if (last_iteration) g_weight[idx] = weight; else g_weight[c_N_spots[0] * (iteration + 1) + idx] = weight; if (c_saveI_b[0]) g_Iobtained[c_N_spots[0] * (iteration) + idx] = aSpot*aSpot; } int X_int = getXint(idx); int Y_int = getYint(idx, X_int); if (((X_int > (c_half_w[0] - c_DCborderWidth[0]))&&(X_int < ((c_half_w[0]-1) + c_DCborderWidth[0])))||((Y_int > (c_half_w[0] - c_DCborderWidth[0]))&&(Y_int < ((c_half_w[0]-1) + c_DCborderWidth[0])))) { g_cSpotAmpNew_cc[idx].x = g_cSpotAmp_cc[idx].x/(c_N_pixels_f[0]); g_cSpotAmpNew_cc[idx].y = g_cSpotAmp_cc[idx].y/(c_N_pixels_f[0]); } } //Misc help functions __global__ void testfunc(float *testdata) { int idx = blockIdx.x * blockDim.x + threadIdx.x; testdata[idx] = idx; } //Convert from uint16_t [0, 65535] to float [-pi, pi] __global__ void uc2f(float *f, uint16_t *uc, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<N) { f[idx] = uc[idx]*2.0f*M_PI/65536.0f - M_PI; } } //Calculate complex from phases __global__ void p2c(hipfftComplex *g_c, float *g_p, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { float pSpot = g_p[idx]; g_c[idx].x = cosf(pSpot); g_c[idx].y = sinf(pSpot); } __syncthreads(); } //Calculate amplitudes from complex __global__ void c_cc2a_f(float *g_a, hipfftComplex *g_c, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { g_a[idx] = hypotf(g_c[idx].x, g_c[idx].y); } __syncthreads(); } //Calculate phases from complex __global__ void c_cc2p_cc(hipfftComplex *g_p, hipfftComplex *g_c, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { g_p[idx].x = atan2f(g_c[idx].y, g_c[idx].x); g_p[idx].y = 0.0f; } __syncthreads(); } //Calculate phases from complex __global__ void c_cc2p_f(float *g_p, hipfftComplex *g_c, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { g_p[idx] = atan2f(g_c[idx].y, g_c[idx].x); } __syncthreads(); } //Copy real part from complex __global__ void c_cc2re_f(float *g_p, hipfftComplex *g_c, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { g_p[idx] = g_c[idx].x; } __syncthreads(); } //Copy imaginary part from complex __global__ void c_cc2im_f(float *g_p, hipfftComplex *g_c, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { g_p[idx] = g_c[idx].y; } __syncthreads(); } //Custom debug functions inline void mSafeCall(hipError_t status, int line, const char *file) { #ifdef M_CUDA_DEBUG do { if(status != hipSuccess) { char CUDAmessage[200] = "CUDA says: "; strcat(CUDAmessage, hipGetErrorString(status)); sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line); //AfxMessageBox(CUDAmessage); if (status != HIPFFT_SUCCESS) exit(-1); } hipDeviceSynchronize(); status = hipGetLastError(); if(status!=hipSuccess) { char CUDAmessage[200] = "CUDA failed after sychronization:\n"; strcat(CUDAmessage, hipGetErrorString(status)); sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line); //AfxMessageBox(CUDAmessage); exit(-1); } }while(0); #endif return; } inline void mCufftSafeCall(hipfftResult_t status, int line, const char *file) { #ifdef M_CUDA_DEBUG if(status != HIPFFT_SUCCESS) { char CUDAmessage[200] = "CUFFT error, CUDA says:\n "; switch (status) { case HIPFFT_INVALID_PLAN: strcat(CUDAmessage,"HIPFFT_INVALID_PLAN\n");break; case HIPFFT_ALLOC_FAILED: strcat(CUDAmessage,"HIPFFT_ALLOC_FAILED\n");break; case HIPFFT_INVALID_TYPE: strcat(CUDAmessage,"HIPFFT_INVALID_TYPE\n");break; case HIPFFT_INVALID_VALUE: strcat(CUDAmessage,"HIPFFT_INVALID_VALUE\n");break; case HIPFFT_INTERNAL_ERROR: strcat(CUDAmessage,"HIPFFT_INTERNAL_ERROR\n");break; case HIPFFT_EXEC_FAILED: strcat(CUDAmessage,"HIPFFT_EXEC_FAILED\n");break; case HIPFFT_SETUP_FAILED: strcat(CUDAmessage,"HIPFFT_SETUP_FAILED\n");break; case HIPFFT_INVALID_SIZE: strcat(CUDAmessage,"HIPFFT_INVALID_SIZE\n");break; case HIPFFT_UNALIGNED_DATA: strcat(CUDAmessage,"HIPFFT_UNALIGNED_DATA\n");break; default: strcat(CUDAmessage,"CUFFT Unknown error code\n"); } sprintf(CUDAmessage, "%son line: %d\nin file: %s", CUDAmessage, line, file); //AfxMessageBox(CUDAmessage); exit(-1); } hipDeviceSynchronize(); hipError_t status2 = hipGetLastError(); if(status2!=hipSuccess) { char CUDAmessage[200] = "CUDA failed after sychronization:\n"; strcat(CUDAmessage, hipGetErrorString(status2)); sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line); //AfxMessageBox(CUDAmessage); exit(-1); } #endif return; } inline void mCheckError(int line, const char *file) { #ifdef M_CUDA_DEBUG do { hipError_t status = hipGetLastError(); if(status!=hipSuccess) { char CUDAmessage[200] = "CUDA says: "; strcat(CUDAmessage, hipGetErrorString(status)); sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line); //AfxMessageBox(CUDAmessage); exit(-1); } hipDeviceSynchronize(); status = hipGetLastError(); if(status!=hipSuccess) { char CUDAmessage[200] = "CUDA failed after sychronization:\n"; strcat(CUDAmessage, hipGetErrorString(status)); sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line); //AfxMessageBox(CUDAmessage); exit(-1); } }while(0); #endif return; } inline void mDisplayDataF(float *d_data, int length, int line) { #ifdef M_CUDA_DEBUG do { int maxlength = 50; float *h_data; length = (length<=maxlength) ? length : maxlength; char MessageString[1000]; h_data = (float*)malloc(length * sizeof (float)); M_SAFE_CALL(hipMemcpy(h_data, d_data, length*sizeof(float), hipMemcpyDeviceToHost)); sprintf(MessageString, "Line: %d\nData: ", line); for (int ii = 0;ii<length;++ii) { sprintf(MessageString, "%s %f", MessageString, h_data[ii]); } //AfxMessageBox(MessageString, MB_ICONINFORMATION); free(h_data); }while(0); #endif return; } inline void mDisplayDataCC(hipfftComplex *d_data, int length, int line) { #ifdef M_CUDA_DEBUG do { int maxlength = 25; hipfftComplex *h_data; length = (length<=maxlength) ? length : maxlength; char MessageString[1000]; h_data = (hipfftComplex*)malloc(length * sizeof (hipfftComplex)); M_SAFE_CALL(hipMemcpy(h_data, d_data, length*sizeof(hipfftComplex), hipMemcpyDeviceToHost)); sprintf(MessageString, "Line: %d\nData: ", line); for (int ii = 0;ii<length;++ii) { sprintf(MessageString, "%s re: %f im: %f", MessageString, h_data[ii].x, h_data[ii].y); } //AfxMessageBox(MessageString, MB_ICONINFORMATION); free(h_data); }while(0); #endif return; } inline void mDisplayDataUC(uint16_t *d_data, int length, int line) { #ifdef M_CUDA_DEBUG do { int maxlength = 50; uint16_t *h_data; length = (length<=maxlength) ? length : maxlength; char MessageString[1000]; h_data = (uint16_t*)malloc(length * sizeof (uint16_t)); M_SAFE_CALL(hipMemcpy(h_data, d_data, length*sizeof(uint16_t), hipMemcpyDeviceToHost)); sprintf(MessageString, "Line: %d\nData: ", line); for (int ii = 0;ii<length;++ii) { sprintf(MessageString, "%s %hhu", MessageString, h_data[ii]); } //AfxMessageBox(MessageString, MB_ICONINFORMATION); free(h_data); }while(0); #endif return; } inline void mDisplayDataI(int *d_data, int length, int line) { #ifdef M_CUDA_DEBUG do { int maxlength = 50; int *h_data; length = (length<=maxlength) ? length : maxlength; char MessageString[1000]; h_data = (int*)malloc(length * sizeof (int)); M_SAFE_CALL(hipMemcpy(h_data, d_data, length*sizeof(int), hipMemcpyDeviceToHost)); sprintf(MessageString, "Line: %d\nData: ", line); for (int ii = 0;ii<length;++ii) { sprintf(MessageString, "%s %d", MessageString, h_data[ii]); } //AfxMessageBox(MessageString, MB_ICONINFORMATION); free(h_data); }while(0); #endif return; } //Calculate amplitudes in positions given by x, y, and z from a given hologram extern "C" __declspec(dllexport) int GetIandPhase(float *x_spots, float *y_spots, float *z_spots, float *h_pSLM_uc, int N_spots_all, int data_w, float *h_I_obt, float *h_Phase_obt) { float *d_Iobtained_all; float *d_Pobtained_all; hipMalloc((void**)&d_Iobtained_all, N_spots_all*sizeof(float) ); hipMalloc((void**)&d_Pobtained_all, N_spots_all*sizeof(float) ); hipMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, hipMemcpyHostToDevice); int offset = 0; int N_spots_rem = N_spots_all; int N_spots_this; while (N_spots_rem > 0) { N_spots_this = (N_spots_rem > MAX_SPOTS) ? MAX_SPOTS : N_spots_rem; hipMemcpyToSymbol(c_x, x_spots+offset, N_spots_this*sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_y, y_spots+offset, N_spots_this*sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_z, z_spots+offset, N_spots_this*sizeof(float), 0, hipMemcpyHostToDevice); hipLaunchKernelGGL(( calculateIandPhase), dim3(N_spots_this), dim3(512), 0, 0, d_pSLM_uc, d_Iobtained_all+offset, d_Pobtained_all+offset); //calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained) hipDeviceSynchronize(); N_spots_rem -= MAX_SPOTS; offset += MAX_SPOTS; } hipMemcpy(h_I_obt, d_Iobtained_all, N_spots_all*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(h_Phase_obt, d_Pobtained_all, N_spots_all*sizeof(float), hipMemcpyDeviceToHost); hipFree(d_Iobtained_all); hipFree(d_Pobtained_all); status = hipGetLastError(); return status; }
63c5dbe2c9ed3c27c42bd4b106f3481ba14587b7.cu
/* Hologram generating algorithms for CUDA Devices Copyright 2009, 2010, 2011, 2012 Martin Persson [email protected] Small edits by Lloyd Russell 2016 This file is part of GenerateHologramCUDA. GenerateHologramCUDA is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. GenerateHologramCUDA is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with GenerateHologramCUDA. If not, see <http://www.gnu.org/licenses/>. The function "GenerateHologram" contains two different algorithms for hologram generation. The last parameter in the function call selects which one to use: 0: Complex addition of "Lenses and Prisms", no optimization (3D) 1: Weighted Gerchberg-Saxton algorithm using Fresnel propagation (3D) 2: Weighted Gerchberg-Saxton algorithm using Fast Fourier Transforms (2D) (0) produces optimal holograms for 1 or 2 traps and is significantly faster. (0) is automatically selected if the number of spots is < 3. Fresnel propagation based algorithm (1) described in: Roberto Di Leonardo, Francesca Ianni, and Giancarlo Ruocco "Computer generation of optimal holograms for optical trap arrays" Opt. Express 15, 1913-1922 (2007) The original algorithm has been modified to allow variable spot amplitudes Naming convention for variables: The prefix indicates where data is located In host functions: h = host memory d = device memory c = constant memory In global functions: g = global memory s = shared memory c = constant memory no prefix = registers The suffix indicates the data type, no suffix usually indicates an integer Possible improvements: * Improve convergence of the GS algorithms for 2 spots. *done * Compensate spot intensities for distance from center of field. *done * Put all arguments for device functions and trap positions in constant memory. *done (Requires all functions to be moved into the same file or the use of some workaround found on nVidia forum) * Put pSLMstart and aLaser in texture memory (may not improve performance on Fermi devices) * Use "zero-copy" to transfer pSLM to host. * Rename functions and variables for consistency and readability * Allow variable spot phases for Lenses and Prisms */ //#define M_CUDA_DEBUG //activates a number of custom debug macros// float dt_milliseconds; cudaEvent_t start, stop; //Includes #include <stdlib.h> #include <stdio.h> #include "stdint.h" #include <string.h> #include <math.h> #include <cufft.h> #ifndef M_PI #define M_PI 3.14159265358979323846f #endif #define MAX_SPOTS 1000 //decrease this if your GPU keeps running out of memory, was 1024 #define BLOCK_SIZE 512 //should be a power of 2, was 512 #define SLM_SIZE 2048 #if ((SLM_SIZE==16)||(SLM_SIZE==32)||(SLM_SIZE==64)||(SLM_SIZE==128)||(SLM_SIZE==256)||(SLM_SIZE==512)||(SLM_SIZE==1024)||(SLM_SIZE==2048)) #define SLMPOW2 //Uses bitwize modulu operations if teh SLM size is a power of 2 #endif // forward declarations __global__ void ApplyCorrections(uint16_t *g_pSLM_uc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f); __global__ void LensesAndPrisms(uint16_t *g_SLMuc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f); __global__ void calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained); __global__ void PropagateToSLM_Fresnel(float *g_spotRe_f, float *g_spotIm_f, float *g_pSLM2pi, float *g_weights, int iteration, float *g_pSLMstart, float *g_amps, bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f); __global__ void PropagateToSLMDC_Fresnel(float *g_pSpot, float *g_wSpot, cufftComplex *g_cSLM_cc, float *g_pSLM_f, int iteration, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc); __global__ void setActiveRegionToZero(cufftComplex *g_Farfield); __global__ void PropagateToSpotPositions_Fresnel(float *g_pSLM2pi, float *g_spotRe_f, float *g_spotIm_f); __global__ void PropagateToSpotPositionsDC_Fresnel(float *g_pSLM_f, float *g_obtainedPhase, float *g_weights, float *g_Iobtained, int iteration); __global__ void ReplaceAmpsSLM_FFT(float *g_aLaser, cufftComplex *g_cAmp, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f); __global__ void ReplaceAmpsSpots_FFT(cufftComplex *g_cSpotAmp_cc, cufftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration); __global__ void ReplaceAmpsSpotsDC_FFT(cufftComplex *g_cSpotAmp_cc, cufftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration); __global__ void XYtoIndex(); __global__ void f2uc(uint16_t *uc, float *f, int N_pixels, uint16_t *g_LUT, int use_linLUT, int data_w); __global__ void uc2f(float *f, uint16_t *uc, int N); __global__ void p2c(cufftComplex *g_c, float *g_p, int M); inline int computeAndCopySpotData(float *h_I, float *x, float *y, float *z, int N_spots, int method); // Custom debug macros #define M_CHECK_ERROR() mCheckError(__LINE__, __FILE__) #define M_SAFE_CALL(errcode) mSafeCall(errcode, __LINE__, __FILE__) #define M_CUFFT_SAFE_CALL(cuffterror) mCufftSafeCall(cuffterror, __LINE__, __FILE__) #define M_DISPLAY_DATA_F(data, length) mDisplayDataF(data, length, __LINE__) #define M_DISPLAY_DATA_UC(data, length) mDisplayDataUC(data, length, __LINE__) #define M_DISPLAY_DATA_CC(data, length) mDisplayDataCC(data, length, __LINE__) #define M_DISPLAY_DATA_I(data, length) mDisplayDataI(data, length, __LINE__) inline void mSafeCall(cudaError_t status, int line, const char *file); inline void mCufftSafeCall(cufftResult_t status, int line, const char *file); inline void mCheckError(int line, const char *file); inline void mDisplayDataF(float *d_data, int length, int line); inline void mDisplayDataCC(cufftComplex *d_data, int length, int line); inline void mDisplayDataUC(uint16_t *d_data, int length, int line); inline void mDisplayDataI(int *d_data, int length, int line); //Global declaration float *d_x, *d_y, *d_z, *d_I; //trap coordinates and intensity in GPU memory float *d_pSLM_f; //the optimized pSpot pattern, float [-pi, pi] float *d_weights, *d_Iobtained, *d_desiredAmp; //used h_weights and calculated amplitudes for each spot and each iteration float *d_pSLMstart_f; //Initial pSpot pattern [-pi, pi] float *d_spotRe_f, *d_spotIm_f; float *d_AberrationCorr_f = NULL; float *d_LUTPolCoeff_f = NULL; float SLMsizef = (float)SLM_SIZE; int N_PolLUTCoeff = 0; int n_blocks_Phi, memsize_SLM_f, memsize_SLMuc, memsize_spotsf, data_w, N_pixels, N_iterations_last; float h_desiredAmp[MAX_SPOTS]; int h_spotIndex[MAX_SPOTS]; uint16_t *d_pSLM_uc; //The optimized pSpot pattern, uint16_t, the one sent to the SLM [0, 65535] uint16_t *h_LUT_uc; uint16_t *d_LUT_uc = NULL; int maxThreads_device; bool ApplyLUT_b = false, EnableSLM_b = false, UseAberrationCorr_b = false, UsePolLUT_b = false, saveI_b = false, useRPC_b = false, useDC_b = false; float alphaRPC_f = 10; char CUDAmessage[100]; cudaError_t status; float *d_aLaserFFT, *d_LUT_coeff; cufftHandle plan; cufftComplex *d_FFTo_cc, *d_FFTd_cc, *d_SLM_cc; int *d_spot_index, memsize_SLMcc; int borderWidthDC_i; float *d_obtainedPhase; //Constant memory declarations __device__ __constant__ int c_data_w[1]; __device__ __constant__ float c_data_w_f[1]; __device__ __constant__ int c_half_w[1]; __device__ __constant__ float c_half_w_f[1]; __device__ __constant__ int c_N_pixels[1]; __device__ __constant__ float c_N_pixels_f[1]; __device__ __constant__ float c_SLMpitch_f[1]; __device__ __constant__ bool c_useDC_b[1]; __device__ __constant__ int c_DCborderWidth[1]; __device__ __constant__ bool c_useRPC_b[1]; __device__ __constant__ float c_alphaRPC_f[1]; __device__ __constant__ bool c_saveI_b[1]; __device__ __constant__ int c_log2data_w[1]; __device__ __constant__ float c_x[MAX_SPOTS]; __device__ __constant__ float c_y[MAX_SPOTS]; __device__ __constant__ float c_z[MAX_SPOTS]; __device__ __constant__ float c_desiredAmp[MAX_SPOTS]; __device__ __constant__ int c_spotIndex[MAX_SPOTS]; __device__ __constant__ int c_N_spots[1]; //Public dll functions //Generate a hologram extern "C" __declspec(dllexport) int GenerateHologram(float *h_checkData, uint16_t *h_pSLM_uc, float *x_spots, float *y_spots, float *z_spots, float *I_spots, int N_spots, int N_iterations, float *h_Iobtained, int method)//, float* gpuTime) { //*gpuTime = 0; //float deltaTime = 0; if (N_spots > MAX_SPOTS) N_spots = MAX_SPOTS; else if (N_spots < 1) method = 100; else if (N_spots < 3) method = 0; memsize_spotsf = N_spots*sizeof(float); method = computeAndCopySpotData(I_spots, x_spots, y_spots, z_spots, N_spots, method); //sets method to -1 if N_spots == 0. switch (method) { case 0: ////// //Generate the hologram using "Lenses and Prisms" ////// LensesAndPrisms<<< n_blocks_Phi, BLOCK_SIZE >>>(d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f); M_CHECK_ERROR(); cudaDeviceSynchronize(); M_CHECK_ERROR(); if (saveI_b) { calculateIobtained<<< N_spots, SLM_SIZE>>>(d_pSLM_uc, d_Iobtained); M_CHECK_ERROR(); cudaDeviceSynchronize(); M_SAFE_CALL(cudaMemcpy(h_Iobtained, d_Iobtained, N_spots*sizeof(float), cudaMemcpyDeviceToHost)); } M_SAFE_CALL(cudaMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, cudaMemcpyDeviceToHost)); break; case 1: //Generate holgram using fresnel propagation //Uncomment this to start with pre-calculated hologram: //cudaMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, cudaMemcpyHostToDevice); //cudaDeviceSynchronize(); //uc2f<<< n_blocks_Phi, BLOCK_SIZE >>>(d_pSLM_f, d_pSLM_uc, N_pixels); /*cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaEventSynchronize(start);*/ for (int l=0; l<N_iterations; l++) { //Propagate to the spot positions if (useDC_b) { M_CUFFT_SAFE_CALL(cufftExecC2C(plan, d_SLM_cc, d_FFTo_cc, CUFFT_FORWARD)); M_CHECK_ERROR(); PropagateToSpotPositionsDC_Fresnel<<< N_spots, SLM_SIZE>>>(d_pSLM_f, d_obtainedPhase, d_weights, d_Iobtained, l); //this function is very slow M_CHECK_ERROR(); setActiveRegionToZero<<< SLM_SIZE, SLM_SIZE >>>(d_FFTo_cc); } else PropagateToSpotPositions_Fresnel<<< N_spots, SLM_SIZE>>>(d_pSLM_f, d_spotRe_f, d_spotIm_f); M_CHECK_ERROR(); cudaDeviceSynchronize(); //Propagate to the SLM plane if (useDC_b) { M_CUFFT_SAFE_CALL(cufftExecC2C(plan, d_FFTo_cc, d_SLM_cc, CUFFT_INVERSE)); cudaDeviceSynchronize(); PropagateToSLMDC_Fresnel<<< n_blocks_Phi, BLOCK_SIZE >>>(d_obtainedPhase, d_weights, d_SLM_cc, d_pSLM_f, l, d_pSLMstart_f, (l==(N_iterations-1)), d_pSLM_uc); } else { PropagateToSLM_Fresnel<<< n_blocks_Phi, BLOCK_SIZE >>>(d_spotRe_f, d_spotIm_f, d_pSLM_f, d_weights, l, d_pSLMstart_f, d_Iobtained, (l==(N_iterations-1)), d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f); } M_CHECK_ERROR(); cudaDeviceSynchronize(); } /*cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&deltaTime, start, stop); *gpuTime = deltaTime; */ if (saveI_b) M_SAFE_CALL(cudaMemcpy(h_Iobtained, d_Iobtained, N_spots*(N_iterations)*sizeof(float), cudaMemcpyDeviceToHost)); else M_SAFE_CALL(cudaMemcpy(h_Iobtained, d_weights, N_spots*(N_iterations)*sizeof(float), cudaMemcpyDeviceToHost)); M_SAFE_CALL(cudaMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, cudaMemcpyDeviceToHost)); break; case 2: //generate hologram using fast fourier transforms //Uncomment this to start with pre-calculated hologram: //cudaMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, cudaMemcpyHostToDevice); //cudaDeviceSynchronize(); //p_uc2c_cc_shift<<< n_blocks_Phi, BLOCK_SIZE >>>(d_SLM_cc, d_pSLM_uc, N_pixels, data_w); //M_DISPLAY_DATA_CC(d_SLM_cc, 100); M_SAFE_CALL(cudaMemcpy(d_desiredAmp, h_desiredAmp, memsize_spotsf, cudaMemcpyHostToDevice)); M_SAFE_CALL(cudaMemset(d_FFTd_cc, 0, memsize_SLMcc)); M_CHECK_ERROR(); cudaDeviceSynchronize(); for (int l=0; l<N_iterations; l++) { // Transform to trapping plane M_CUFFT_SAFE_CALL(cufftExecC2C(plan, d_SLM_cc, d_FFTo_cc, CUFFT_FORWARD)); cudaDeviceSynchronize(); // Copy phases for spot indices in d_FFTo_cc to d_FFTd_cc if (useDC_b) ReplaceAmpsSpotsDC_FFT <<< n_blocks_Phi, BLOCK_SIZE >>> (d_FFTo_cc, d_FFTd_cc, l, d_Iobtained, d_weights, (l==(N_iterations-1))); else ReplaceAmpsSpots_FFT <<< 1, N_spots >>> (d_FFTo_cc, d_FFTd_cc, l, d_Iobtained, d_weights, (l==(N_iterations-1))); M_CHECK_ERROR(); cudaDeviceSynchronize(); //Transform back to SLM plane M_CUFFT_SAFE_CALL(cufftExecC2C(plan, d_FFTd_cc, d_SLM_cc, CUFFT_INVERSE)); cudaDeviceSynchronize(); //M_DISPLAY_DATA_CC(d_SLM_cc, 100); // Set amplitudes in d_SLM to the laser amplitude profile ReplaceAmpsSLM_FFT <<< n_blocks_Phi, BLOCK_SIZE >>> (d_aLaserFFT, d_SLM_cc, d_pSLMstart_f, (l==(N_iterations-1)), d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f); M_CHECK_ERROR(); //M_DISPLAY_DATA_CC(d_SLM_cc, 100); cudaDeviceSynchronize(); } if (saveI_b) M_SAFE_CALL(cudaMemcpy(h_Iobtained, d_Iobtained, N_spots*(N_iterations)*sizeof(float), cudaMemcpyDeviceToHost)); else M_SAFE_CALL(cudaMemcpy(h_Iobtained, d_weights, N_spots*(N_iterations)*sizeof(float), cudaMemcpyDeviceToHost)); M_SAFE_CALL(cudaMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, cudaMemcpyDeviceToHost)); break; default: break; } //Handle CUDA errors status = cudaGetLastError(); return status; } //Allocate GPU memory extern "C" __declspec(dllexport) int startCUDA(float *h_pSLMstart, int deviceId) { //Make sure GPU with desired deviceId exists, set deviceId to 0 if not int deviceCount=0; if (cudaGetDeviceCount(&deviceCount)!=0) if (deviceId>=deviceCount) { deviceId=0; } M_SAFE_CALL(cudaSetDevice(deviceId)); cudaDeviceProp deviceProp; M_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, deviceId)); maxThreads_device = deviceProp.maxThreadsPerBlock; borderWidthDC_i = 0; int MaxIterations = 1000; data_w = SLM_SIZE; cudaMemcpyToSymbol(c_data_w, &data_w, sizeof(int), 0, cudaMemcpyHostToDevice); float data_w_f = (float)data_w; cudaMemcpyToSymbol(c_data_w_f, &data_w_f, sizeof(float), 0, cudaMemcpyHostToDevice); int half_w = (int)(data_w/2); cudaMemcpyToSymbol(c_half_w, &half_w, sizeof(int), 0, cudaMemcpyHostToDevice); float half_w_f = (float)data_w/2.0f; cudaMemcpyToSymbol(c_half_w_f, &half_w_f, sizeof(float), 0, cudaMemcpyHostToDevice); N_pixels = data_w * data_w; cudaMemcpyToSymbol(c_N_pixels, &N_pixels, sizeof(int), 0, cudaMemcpyHostToDevice); float N_pixels_f = (float)N_pixels; cudaMemcpyToSymbol(c_N_pixels_f, &N_pixels_f, sizeof(float), 0, cudaMemcpyHostToDevice); int logN = (int)(log2(data_w_f)); cudaMemcpyToSymbol(c_log2data_w, &logN, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_useRPC_b, &useRPC_b, sizeof(bool), 0, cudaMemcpyHostToDevice); float SLMpitch_f = 1.0f/data_w_f; cudaMemcpyToSymbol(c_SLMpitch_f, &SLMpitch_f, sizeof(float), 0, cudaMemcpyHostToDevice); N_iterations_last = 10; memsize_spotsf = MAX_SPOTS * sizeof(float); memsize_SLM_f = N_pixels * sizeof(float); memsize_SLMuc = N_pixels * sizeof(uint16_t); memsize_SLMcc = N_pixels * sizeof(cufftComplex); n_blocks_Phi = (N_pixels/BLOCK_SIZE + (N_pixels%BLOCK_SIZE == 0 ? 0:1)); //memory allocations for all methods M_SAFE_CALL(cudaMalloc((void**)&d_x, memsize_spotsf )); M_SAFE_CALL(cudaMalloc((void**)&d_y, memsize_spotsf )); M_SAFE_CALL(cudaMalloc((void**)&d_z, memsize_spotsf )); M_SAFE_CALL(cudaMalloc((void**)&d_I, memsize_spotsf )); M_SAFE_CALL(cudaMalloc((void**)&d_desiredAmp, memsize_spotsf )); M_SAFE_CALL(cudaMalloc((void**)&d_weights, MAX_SPOTS*(MaxIterations+1)*sizeof(float))); M_SAFE_CALL(cudaMalloc((void**)&d_Iobtained, MAX_SPOTS*MaxIterations*sizeof(float))); M_SAFE_CALL(cudaMalloc((void**)&d_obtainedPhase, memsize_spotsf )); M_SAFE_CALL(cudaMalloc((void**)&d_spotRe_f, memsize_spotsf )); M_SAFE_CALL(cudaMalloc((void**)&d_spotIm_f, memsize_spotsf )); int data_w_pow2 = pow(2, ceil(log((float)data_w)/log(2.0f))); M_SAFE_CALL(cudaMalloc((void**)&d_pSLM_f, data_w_pow2*data_w_pow2*sizeof(float)));//the size of d_pSLM_f must be a power of 2 for the summation algorithm to work M_SAFE_CALL(cudaMemset(d_pSLM_f, 0, data_w_pow2*data_w_pow2*sizeof(float))); M_SAFE_CALL(cudaMalloc((void**)&d_pSLMstart_f, memsize_SLM_f)); M_SAFE_CALL(cudaMalloc((void**)&d_pSLM_uc, memsize_SLMuc)); M_SAFE_CALL(cudaMemset(d_pSLMstart_f, 0, N_pixels*sizeof(float))); M_SAFE_CALL(cudaMemcpy(d_pSLM_f, h_pSLMstart, N_pixels*sizeof(float), cudaMemcpyHostToDevice)); //memory allocations etc. for all FFT based Gerchberg-Saxton M_SAFE_CALL(cudaMalloc((void**)&d_spot_index, MAX_SPOTS * sizeof(int))); M_SAFE_CALL(cudaMalloc((void**)&d_FFTd_cc, memsize_SLMcc)); M_SAFE_CALL(cudaMalloc((void**)&d_FFTo_cc, memsize_SLMcc)); M_SAFE_CALL(cudaMalloc((void**)&d_SLM_cc, memsize_SLMcc)); M_SAFE_CALL(cudaDeviceSynchronize()); p2c <<< n_blocks_Phi, BLOCK_SIZE >>>(d_SLM_cc, d_pSLM_f, N_pixels); M_CHECK_ERROR(); cudaDeviceSynchronize(); M_CUFFT_SAFE_CALL(cufftPlan2d(&plan, data_w, data_w, CUFFT_C2C)); float *h_aLaserFFT = (float *)malloc(memsize_SLM_f); status = cudaGetLastError(); return status; } //Free GPU memory and shut down SLM extern "C" __declspec(dllexport) int stopCUDA() { M_SAFE_CALL(cudaFree(d_x)); M_SAFE_CALL(cudaFree(d_y)); M_SAFE_CALL(cudaFree(d_z)); M_SAFE_CALL(cudaFree(d_I)); M_SAFE_CALL(cudaFree(d_weights)); M_SAFE_CALL(cudaFree(d_Iobtained)); M_SAFE_CALL(cudaFree(d_pSLM_f)); M_SAFE_CALL(cudaFree(d_pSLMstart_f)); M_SAFE_CALL(cudaFree(d_pSLM_uc)); M_SAFE_CALL(cudaFree(d_FFTd_cc)); M_SAFE_CALL(cudaFree(d_FFTo_cc)); M_SAFE_CALL(cudaFree(d_SLM_cc)); M_CUFFT_SAFE_CALL(cufftDestroy(plan)); cudaDeviceReset(); status = cudaGetLastError(); return status; } //Device functions __device__ float uc2phase(float uc) { return (float)uc*2.0f*M_PI/65536.0f - M_PI; } __device__ uint16_t phase2uc(float phase2pi) { return (uint16_t)floor((phase2pi + M_PI)*65536.0f / (2.0f * M_PI)); } __device__ int phase2int32(float phase2pi) { return (int)floor((phase2pi + M_PI)*65536.0f / (2.0f * M_PI)); } __device__ float ApplyAberrationCorrection(float pSpot, float correction) { pSpot = pSpot - correction; //apply correction return (pSpot - (2.0f*M_PI) * floor((pSpot+M_PI) / (2.0f*M_PI))); //apply mod([-pi, pi], pSpot) } __device__ int getXint(int index) { #ifdef SLMPOW2 int X_int = index&(c_data_w[0]-1); #else float X_int= index%c_data_w[0]; #endif return X_int; } __device__ int getYint(int index, int X_int) { #ifdef SLMPOW2 int Y_int = (index-X_int)>>c_log2data_w[0]; #else int Y_int = (float)(floor((float)index/c_data_w_f[0])); #endif return Y_int; } __device__ int fftshift(int idx, int X, int Y) { if (X < c_half_w[0]) { if (Y < c_half_w[0]) { return idx + (c_data_w[0] * c_half_w[0]) + c_half_w[0]; } else { return idx - (c_data_w[0] * c_half_w[0]) + c_half_w[0]; } } else { if (Y < c_half_w[0]) { return idx + (c_data_w[0] * c_half_w[0]) - c_half_w[0]; } else { return idx - (c_data_w[0] * c_half_w[0]) - c_half_w[0]; } } } __device__ void warpReduceC(volatile float *s_Vre, volatile float *s_Vim, int tid) { s_Vre[tid] += s_Vre[tid + 32]; s_Vim[tid] += s_Vim[tid + 32]; s_Vre[tid] += s_Vre[tid + 16]; s_Vim[tid] += s_Vim[tid + 16]; s_Vre[tid] += s_Vre[tid + 8]; s_Vim[tid] += s_Vim[tid + 8]; s_Vre[tid] += s_Vre[tid + 4]; s_Vim[tid] += s_Vim[tid + 4]; s_Vre[tid] += s_Vre[tid + 2]; s_Vim[tid] += s_Vim[tid + 2]; s_Vre[tid] += s_Vre[tid + 1]; s_Vim[tid] += s_Vim[tid + 1]; } inline int computeAndCopySpotData(float *h_I, float *x, float *y, float *z, int N_spots, int method) { //float Isum = 0.0f; //for (int i = 0; i<N_spots; i++) // Isum += h_I[i]; for (int j = 0; j<N_spots; j++) { float sincx_rec = (x[j]==0)? 1.0f:((M_PI*x[j]/SLMsizef)/sinf(M_PI*x[j]/SLMsizef)); float sincy_rec = (y[j]==0)? 1.0f:((M_PI*y[j]/SLMsizef)/sinf(M_PI*y[j]/SLMsizef)); h_desiredAmp[j] = (h_I[j] <= 0.0f) ? 1.0f:(sincx_rec * sincy_rec * sqrtf(h_I[j]/100)*SLMsizef*SLMsizef); if (method == 2) h_spotIndex[j] = ((int)(x[j])&(data_w-1)) + ((int)(y[j])&(data_w-1))* data_w; } cudaMemcpyToSymbol(c_x, x, N_spots*sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_y, y, N_spots*sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_z, z, N_spots*sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_desiredAmp, h_desiredAmp, N_spots*sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_N_spots, &N_spots, sizeof(int), 0, cudaMemcpyHostToDevice); if (method == 2) cudaMemcpyToSymbol(c_spotIndex, h_spotIndex, N_spots*sizeof(int), 0, cudaMemcpyHostToDevice); if (N_spots == 0) method = -1; return method; } //Apply corrections to precalculated hologram __global__ void ApplyCorrections(uint16_t *g_pSLM_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f) { int idx = blockIdx.x * blockDim.x + threadIdx.x; float pSLM2pi_f = uc2phase(g_pSLM_uc[idx]); g_pSLM_uc[idx] = phase2uc(pSLM2pi_f); } //Calculate hologram using "Lenses and Prisms" __global__ void LensesAndPrisms(uint16_t *g_SLMuc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < c_N_pixels[0]) { //get pixel coordinates int X_int = getXint(idx); int Y_int = getYint(idx, X_int); float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]); float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]); float phase2pi; float SLMre = 0.0f; float SLMim = 0.0f; for (int ii=0; ii<c_N_spots[0]; ++ii) { //add variable phases to function call phase2pi = M_PI * c_z[ii] * (X*X + Y*Y) + 2.0f * M_PI * (X * (c_x[ii]) + Y * (c_y[ii]) ); SLMre = SLMre + c_desiredAmp[ii] * cosf(phase2pi); SLMim = SLMim + c_desiredAmp[ii] * sinf(phase2pi); } phase2pi = atan2f(SLMim, SLMre); // [-pi,pi] g_SLMuc[idx] = phase2uc(phase2pi); } } __global__ void calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained) { int blockSize = c_data_w[0]; int spot_number = blockIdx.x; int tid = threadIdx.x; int i = tid; __shared__ float s_Vre[SLM_SIZE]; __shared__ float s_Vim[SLM_SIZE]; s_Vre[tid] = 0.0f; s_Vim[tid] = 0.0f; float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]); float Y = - c_SLMpitch_f[0] * c_half_w_f[0]; float pSLM_1; float p; while (i < c_N_pixels[0]) { pSLM_1 = 2.0f*M_PI*(float)g_pSLM_uc[i]/65535.0f - M_PI; p = pSLM_1 - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2 * (X * c_x[spot_number] + Y * c_y[spot_number])); s_Vre[tid] += cosf(p); s_Vim[tid] += sinf(p); i += blockSize; Y += c_SLMpitch_f[0]; } /*__syncthreads(); if (tid < 512) { s_Vre[tid] += s_Vre[tid + 512]; s_Vim[tid] += s_Vim[tid + 512]; } */ __syncthreads(); if ((tid < 256)&&(SLM_SIZE>256)) { s_Vre[tid] += s_Vre[tid + 256]; s_Vim[tid] += s_Vim[tid + 256]; } __syncthreads(); if (tid < 128) { s_Vre[tid] += s_Vre[tid + 128]; s_Vim[tid] += s_Vim[tid + 128]; } __syncthreads(); if (tid < 64) { s_Vre[tid] += s_Vre[tid + 64]; s_Vim[tid] += s_Vim[tid + 64]; } __syncthreads(); if (tid < 32) warpReduceC(s_Vre, s_Vim, tid); if (tid == 0) { float spotRe_f = s_Vre[0] / c_N_pixels_f[0]; //512! float spotIm_f = s_Vim[0] / c_N_pixels_f[0]; float amp = hypotf(spotRe_f, spotIm_f); g_Iobtained[spot_number] = amp*amp; } } __global__ void calculateIandPhase(uint16_t *g_pSLM_uc, float *g_Iobtained, float *g_Pobtained) { int blockSize = c_data_w[0]; int spot_number = blockIdx.x; int tid = threadIdx.x; int i = tid; __shared__ float s_Vre[SLM_SIZE]; __shared__ float s_Vim[SLM_SIZE]; s_Vre[tid] = 0.0f; s_Vim[tid] = 0.0f; float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]); float Y = - c_SLMpitch_f[0] * c_half_w_f[0]; float pSLM_1; float p; while (i < c_N_pixels[0]) { pSLM_1 = 2.0f*M_PI*(float)g_pSLM_uc[i]/65535.0f - M_PI; p = pSLM_1 - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2 * (X * c_x[spot_number] + Y * c_y[spot_number])); s_Vre[tid] += cosf(p+2*M_PI*c_z[spot_number]); s_Vim[tid] += sinf(p+2*M_PI*c_z[spot_number]); i += blockSize; Y += c_SLMpitch_f[0]; } /*__syncthreads(); if (tid < 512) { s_Vre[tid] += s_Vre[tid + 512]; s_Vim[tid] += s_Vim[tid + 512]; } */ __syncthreads(); if ((tid < 256)&&(SLM_SIZE>256)) { s_Vre[tid] += s_Vre[tid + 256]; s_Vim[tid] += s_Vim[tid + 256]; } __syncthreads(); if (tid < 128) { s_Vre[tid] += s_Vre[tid + 128]; s_Vim[tid] += s_Vim[tid + 128]; } __syncthreads(); if (tid < 64) { s_Vre[tid] += s_Vre[tid + 64]; s_Vim[tid] += s_Vim[tid + 64]; } __syncthreads(); if (tid < 32) warpReduceC(s_Vre, s_Vim, tid); if (tid == 0) { float spotRe_f = s_Vre[0] / c_N_pixels_f[0]; //512! float spotIm_f = s_Vim[0] / c_N_pixels_f[0]; float amp = hypotf(spotRe_f, spotIm_f); g_Pobtained[spot_number] = atan2f(spotIm_f , spotRe_f); g_Iobtained[spot_number] = amp*amp; } } //Functions for GS with Fresnel propagation //Propagate from the SLM to the spot positions using Fresnel summation //works only for blocksize = SLMsize __global__ void PropagateToSpotPositions_Fresnel(float *g_pSLM2pi, float *g_spotRe_f, float *g_spotIm_f) { int spot_number = blockIdx.x; int tid = threadIdx.x; int i = tid; __shared__ float s_Vre[SLM_SIZE]; __shared__ float s_Vim[SLM_SIZE]; s_Vre[tid] = 0.0f; s_Vim[tid] = 0.0f; int blockSize = blockDim.x; float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]); float Y = - c_SLMpitch_f[0] * c_half_w_f[0]; float p; while (i < c_N_pixels[0]) { p = g_pSLM2pi[i] - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2.0f * (X * c_x[spot_number] + Y * c_y[spot_number])); s_Vre[tid] += cosf(p); s_Vim[tid] += sinf(p); i += blockSize; Y += c_SLMpitch_f[0]; } /*__syncthreads(); if (tid < 512) { s_Vre[tid] += s_Vre[tid + 512]; s_Vim[tid] += s_Vim[tid + 512]; } */ __syncthreads(); if ((tid < 256)&&(SLM_SIZE>256)) { s_Vre[tid] += s_Vre[tid + 256]; s_Vim[tid] += s_Vim[tid + 256]; } __syncthreads(); if (tid < 128) { s_Vre[tid] += s_Vre[tid + 128]; s_Vim[tid] += s_Vim[tid + 128]; } __syncthreads(); if (tid < 64) { s_Vre[tid] += s_Vre[tid + 64]; s_Vim[tid] += s_Vim[tid + 64]; } __syncthreads(); if (tid < 32) warpReduceC(s_Vre, s_Vim, tid); if (tid == 0) { g_spotRe_f[spot_number] = s_Vre[0];// / c_N_pixels_f[0]; g_spotIm_f[spot_number] = s_Vim[0];// / c_N_pixels_f[0]; } } //Propagate from the SLM to the spot positions using Fresnel summation //works only for blocksize = SLMsize __global__ void PropagateToSpotPositionsDC_Fresnel(float *g_pSLM_f, float *g_obtainedPhase, float *g_weights, float *obtainedI, int iteration) { int spot_number = blockIdx.x; int tid = threadIdx.x; int i = tid; __shared__ float s_Vre[SLM_SIZE]; __shared__ float s_Vim[SLM_SIZE]; float X, Y; float p; s_Vre[tid] = 0.0f; s_Vim[tid] = 0.0f; int X_int = getXint(i); X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]); Y = -0.5f; while (i < c_N_pixels[0]) { p = g_pSLM_f[i] - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2.0f * (X * c_x[spot_number] + Y * c_y[spot_number])); s_Vre[tid] += cosf(p); s_Vim[tid] += sinf(p); Y += c_SLMpitch_f[0]; i += SLM_SIZE; } __syncthreads(); if ((tid < 256)&&(SLM_SIZE>256)) { s_Vre[tid] += s_Vre[tid + 256]; s_Vim[tid] += s_Vim[tid + 256]; } __syncthreads(); if (tid < 128) { s_Vre[tid] += s_Vre[tid + 128]; s_Vim[tid] += s_Vim[tid + 128]; } __syncthreads(); if (tid < 64) { s_Vre[tid] += s_Vre[tid + 64]; s_Vim[tid] += s_Vim[tid + 64]; } __syncthreads(); if (tid < 32) warpReduceC(s_Vre, s_Vim, tid); if (tid == 0) { g_obtainedPhase[spot_number] = atan2f(s_Vim[0], s_Vre[0]); float obtainedAmp = hypotf(s_Vre[0], s_Vim[0]); float desiredAmp = c_desiredAmp[spot_number]; if (iteration != 0) { g_weights[spot_number + c_N_spots[0]*iteration] = g_weights[spot_number + c_N_spots[0]*(iteration-1)] * (desiredAmp / obtainedAmp); } else { //obtainedAmp = (obtainedAmp<0.5f) ? 0.5f : obtainedAmp; g_weights[spot_number] = desiredAmp/c_N_pixels_f[0]; } if (c_saveI_b[0]) obtainedI[spot_number + c_N_spots[0]*iteration] = obtainedAmp*obtainedAmp/(desiredAmp*desiredAmp);//(c_N_pixels_f[0]*c_N_pixels_f[0]); } } //Obtain phases in SLM plane __global__ void PropagateToSLM_Fresnel(float *g_spotRe_f, float *g_spotIm_f, float *g_pSLM2pi, float *g_weights, int iteration, float *g_pSLMstart, float *g_Iobtained, bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; __shared__ float s_aSpot[MAX_SPOTS], s_aSpotsMean, s_weight[MAX_SPOTS], s_pSpot[MAX_SPOTS]; float reSLM = 0.0f, imSLM = 0.0f, pSLM2pi_f = 0.0f; if (idx<c_N_pixels[0]) { if (tid<c_N_spots[0]) { float spotRe_f = g_spotRe_f[tid]; float spotIm_f = g_spotIm_f[tid]; s_pSpot[tid] = atan2f(spotIm_f, spotRe_f); s_aSpot[tid] = hypotf(spotRe_f, spotIm_f)/c_desiredAmp[tid]; if (iteration != 0) s_weight[tid] = g_weights[tid + iteration*c_N_spots[0]]; else { s_aSpot[tid] = (s_aSpot[tid]<0.5f) ? 0.5f : s_aSpot[tid]; s_weight[tid] = c_desiredAmp[tid]; } } __syncthreads(); //compute weights if (tid==0) { float s_aSpot_sum = 0.0f; for (int jj=0; jj<c_N_spots[0];jj++) { s_aSpot_sum += s_aSpot[jj]; } s_aSpotsMean = s_aSpot_sum / (float)c_N_spots[0]; } __syncthreads(); if (tid<c_N_spots[0]) { s_weight[tid] = s_weight[tid] * s_aSpotsMean / s_aSpot[tid]; if (!getpSLM65535) //Copy weights to use as initial value next run g_weights[tid + c_N_spots[0]*(iteration+1)] = s_weight[tid]; //else // g_weights[tid] = s_weight[tid]; //Transferring weights to next run may give diverging weights if (c_saveI_b[0]) g_Iobtained[tid + c_N_spots[0]*iteration] = s_aSpot[tid]*s_aSpot[tid]; //may be excluded, used for monitoring only } __syncthreads(); //get pixel coordinates int X_int = getXint(idx); int Y_int = getYint(idx, X_int); float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]); float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]); //compute SLM pSpot by summing contribution from all spots for (int k=0; k<c_N_spots[0]; k++) { float delta = M_PI * c_z[k] * (X*X + Y*Y) + 2.0f * M_PI * (X * c_x[k] + Y * c_y[k]); reSLM += s_weight[k] * cosf(s_pSpot[k] + delta); imSLM += s_weight[k] * sinf(s_pSpot[k] + delta); } pSLM2pi_f = atan2f(imSLM, reSLM); if (c_useRPC_b[0]) //Apply RPC (restricted Phase Change) { float pSLMstart = g_pSLMstart[idx]; if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0]) pSLM2pi_f = pSLMstart; if (getpSLM65535) g_pSLMstart[idx] = pSLM2pi_f; } if (getpSLM65535) //Compute final SLM phases and write to global memory... g_pSLM65535_uc[idx] = phase2uc(pSLM2pi_f); g_pSLM2pi[idx] = pSLM2pi_f; //...or write intermediate pSpot to global memory } } //Obtain phases in SLM plane __global__ void PropagateToSLMDC_Fresnel(float *g_pSpot, float *g_wSpot, cufftComplex *g_cSLM_cc, float *g_pSLM_f, int iteration, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; __shared__ float s_weight[MAX_SPOTS], s_pSpot[MAX_SPOTS]; float reSLM = 0.0f, imSLM = 0.0f, pSLM2pi_f = 0.0f; if (idx<c_N_pixels[0]) { if (tid<c_N_spots[0]) { s_pSpot[tid] = g_pSpot[tid]; s_weight[tid] = g_wSpot[tid+c_N_spots[0]*iteration]; } __syncthreads(); //get pixel coordinates int X_int = getXint(idx); int Y_int = getYint(idx, X_int); int shiftedidx = fftshift(idx, X_int, Y_int); float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]); float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]); //compute SLM pSpot by summing contribution from all spots for (int k=0; k<c_N_spots[0]; k++) { float delta = M_PI * c_z[k] * (X*X + Y*Y) + 2.0f * M_PI * (X * c_x[k] + Y * c_y[k]); reSLM += s_weight[k] * cosf(s_pSpot[k] + delta); imSLM += s_weight[k] * sinf(s_pSpot[k] + delta); } cufftComplex cSLM_cc = g_cSLM_cc[shiftedidx]; reSLM += cSLM_cc.x/c_N_pixels_f[0]; imSLM += cSLM_cc.y/c_N_pixels_f[0]; pSLM2pi_f = atan2f(imSLM, reSLM); if (c_useRPC_b[0]) //Apply RPC (restricted Phase Change) { float pSLMstart = g_pSLMstart[shiftedidx]; if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0]) pSLM2pi_f = pSLMstart; if (getpSLM65535) g_pSLMstart[shiftedidx] = pSLM2pi_f; } g_pSLM_f[idx] = pSLM2pi_f; g_cSLM_cc[shiftedidx].x = cosf(pSLM2pi_f); g_cSLM_cc[shiftedidx].y = sinf(pSLM2pi_f); if (getpSLM65535) //Compute final SLM phases and write to global memory... g_pSLM65535_uc[idx] = phase2uc(pSLM2pi_f); } } //Clear inside the DC frame __global__ void setActiveRegionToZero(cufftComplex *g_Farfield_cc) //this only works if blocksize = nblocks = SLMsize = 512 { int tid = threadIdx.x; int bid = blockIdx.x; int idx = bid * blockDim.x + tid; if (((tid < (c_half_w[0] - c_DCborderWidth[0]))||(tid > ((c_half_w[0]-1) + c_DCborderWidth[0])))&&((bid < (c_half_w[0] - c_DCborderWidth[0]))||(bid > ((c_half_w[0]-1) + c_DCborderWidth[0])))) { g_Farfield_cc[idx].x = 0.0f; g_Farfield_cc[idx].y = 0.0f; } } //Functions for GS with FFT propagation //Compute the phase in SLM pixels and set amplitude to unity or Laser amp __global__ void ReplaceAmpsSLM_FFT(float *g_aLaser, cufftComplex *g_cAmp, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<c_N_pixels[0]) { float aLaser = 1.0f;//g_aLaser[idx]; cufftComplex cAmp = g_cAmp[idx]; float pSLM2pi_f = atan2f(cAmp.y, cAmp.x); if (c_useRPC_b[0]) { float pSLMstart = g_pSLMstart[idx]; if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0]) pSLM2pi_f = pSLMstart; } if (getpSLM65535) { if (c_useRPC_b[0]) g_pSLMstart[idx] = pSLM2pi_f; //float phase65535; int X_int = getXint(idx); int Y_int = getYint(idx, X_int); int shiftedidx = fftshift(idx, X_int, Y_int); g_pSLM65535_uc[shiftedidx] = phase2uc(pSLM2pi_f); } g_cAmp[idx].x = aLaser*cosf(pSLM2pi_f); g_cAmp[idx].y = aLaser*sinf(pSLM2pi_f); } __syncthreads(); } //Adjust amplitudes in spot positions __global__ void ReplaceAmpsSpots_FFT(cufftComplex *g_cSpotAmp_cc, cufftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration) { int tid = threadIdx.x; int spotIndex; float pSpot; __shared__ float s_aSpot[MAX_SPOTS], s_ISpotsMeanSq; float weight; cufftComplex cSpotAmp_cc; if (tid<c_N_spots[0]) { spotIndex = c_spotIndex[tid]; cSpotAmp_cc = g_cSpotAmp_cc[spotIndex]; pSpot = atan2f(cSpotAmp_cc.y, cSpotAmp_cc.x); s_aSpot[tid] = hypotf(cSpotAmp_cc.x, cSpotAmp_cc.y)/c_desiredAmp[tid]; if (iteration != 0) weight = g_weight[tid + iteration*c_N_spots[0]]; else { s_aSpot[tid] = (s_aSpot[tid]<0.5f) ? 0.5f : s_aSpot[tid]; weight = c_desiredAmp[tid]; } } __syncthreads(); //compute weights if (tid==0) { float ISpot_sum = 0.0f; for (int jj=0; jj<c_N_spots[0];jj++) { ISpot_sum += s_aSpot[jj]*s_aSpot[jj]; } s_ISpotsMeanSq = sqrtf(ISpot_sum / (float)c_N_spots[0]); //integer division!! } __syncthreads(); if (tid<c_N_spots[0]) { weight = weight * s_ISpotsMeanSq / s_aSpot[tid]; cSpotAmp_cc.x = cosf(pSpot) * weight; cSpotAmp_cc.y = sinf(pSpot) * weight; g_cSpotAmpNew_cc[spotIndex] = cSpotAmp_cc; if (last_iteration) g_weight[tid] = weight; else g_weight[c_N_spots[0] * (iteration + 1) + tid] = weight; if (c_saveI_b[0]) g_Iobtained[c_N_spots[0] * (iteration) + tid] = s_aSpot[tid]*s_aSpot[tid]; } } //Adjust amplitudes in spot positions __global__ void ReplaceAmpsSpotsDC_FFT(cufftComplex *g_cSpotAmp_cc, cufftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int spotIndex; float pSpot; //__shared__ float s_aSpot[MAX_SPOTS], s_ISpotsMeanSq; float weight; cufftComplex cSpotAmp_cc; if (idx<c_N_spots[0]) { spotIndex = c_spotIndex[idx]; cSpotAmp_cc = g_cSpotAmp_cc[spotIndex]; pSpot = atan2f(cSpotAmp_cc.y, cSpotAmp_cc.x); float aSpot = hypotf(cSpotAmp_cc.x, cSpotAmp_cc.y)/c_desiredAmp[idx]; if (iteration != 0) weight = g_weight[idx + iteration*c_N_spots[0]]; else { aSpot = (aSpot<0.5f) ? 0.5f : aSpot; //ska det vara så här med DC? weight = c_desiredAmp[idx]/(c_N_pixels_f[0]); } weight = weight / aSpot; cSpotAmp_cc.x = cosf(pSpot) * weight; cSpotAmp_cc.y = sinf(pSpot) * weight; g_cSpotAmpNew_cc[spotIndex] = cSpotAmp_cc; if (last_iteration) g_weight[idx] = weight; else g_weight[c_N_spots[0] * (iteration + 1) + idx] = weight; if (c_saveI_b[0]) g_Iobtained[c_N_spots[0] * (iteration) + idx] = aSpot*aSpot; } int X_int = getXint(idx); int Y_int = getYint(idx, X_int); if (((X_int > (c_half_w[0] - c_DCborderWidth[0]))&&(X_int < ((c_half_w[0]-1) + c_DCborderWidth[0])))||((Y_int > (c_half_w[0] - c_DCborderWidth[0]))&&(Y_int < ((c_half_w[0]-1) + c_DCborderWidth[0])))) { g_cSpotAmpNew_cc[idx].x = g_cSpotAmp_cc[idx].x/(c_N_pixels_f[0]); g_cSpotAmpNew_cc[idx].y = g_cSpotAmp_cc[idx].y/(c_N_pixels_f[0]); } } //Misc help functions __global__ void testfunc(float *testdata) { int idx = blockIdx.x * blockDim.x + threadIdx.x; testdata[idx] = idx; } //Convert from uint16_t [0, 65535] to float [-pi, pi] __global__ void uc2f(float *f, uint16_t *uc, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<N) { f[idx] = uc[idx]*2.0f*M_PI/65536.0f - M_PI; } } //Calculate complex from phases __global__ void p2c(cufftComplex *g_c, float *g_p, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { float pSpot = g_p[idx]; g_c[idx].x = cosf(pSpot); g_c[idx].y = sinf(pSpot); } __syncthreads(); } //Calculate amplitudes from complex __global__ void c_cc2a_f(float *g_a, cufftComplex *g_c, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { g_a[idx] = hypotf(g_c[idx].x, g_c[idx].y); } __syncthreads(); } //Calculate phases from complex __global__ void c_cc2p_cc(cufftComplex *g_p, cufftComplex *g_c, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { g_p[idx].x = atan2f(g_c[idx].y, g_c[idx].x); g_p[idx].y = 0.0f; } __syncthreads(); } //Calculate phases from complex __global__ void c_cc2p_f(float *g_p, cufftComplex *g_c, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { g_p[idx] = atan2f(g_c[idx].y, g_c[idx].x); } __syncthreads(); } //Copy real part from complex __global__ void c_cc2re_f(float *g_p, cufftComplex *g_c, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { g_p[idx] = g_c[idx].x; } __syncthreads(); } //Copy imaginary part from complex __global__ void c_cc2im_f(float *g_p, cufftComplex *g_c, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<M) { g_p[idx] = g_c[idx].y; } __syncthreads(); } //Custom debug functions inline void mSafeCall(cudaError_t status, int line, const char *file) { #ifdef M_CUDA_DEBUG do { if(status != cudaSuccess) { char CUDAmessage[200] = "CUDA says: "; strcat(CUDAmessage, cudaGetErrorString(status)); sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line); //AfxMessageBox(CUDAmessage); if (status != CUFFT_SUCCESS) exit(-1); } cudaDeviceSynchronize(); status = cudaGetLastError(); if(status!=cudaSuccess) { char CUDAmessage[200] = "CUDA failed after sychronization:\n"; strcat(CUDAmessage, cudaGetErrorString(status)); sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line); //AfxMessageBox(CUDAmessage); exit(-1); } }while(0); #endif return; } inline void mCufftSafeCall(cufftResult_t status, int line, const char *file) { #ifdef M_CUDA_DEBUG if(status != CUFFT_SUCCESS) { char CUDAmessage[200] = "CUFFT error, CUDA says:\n "; switch (status) { case CUFFT_INVALID_PLAN: strcat(CUDAmessage,"CUFFT_INVALID_PLAN\n");break; case CUFFT_ALLOC_FAILED: strcat(CUDAmessage,"CUFFT_ALLOC_FAILED\n");break; case CUFFT_INVALID_TYPE: strcat(CUDAmessage,"CUFFT_INVALID_TYPE\n");break; case CUFFT_INVALID_VALUE: strcat(CUDAmessage,"CUFFT_INVALID_VALUE\n");break; case CUFFT_INTERNAL_ERROR: strcat(CUDAmessage,"CUFFT_INTERNAL_ERROR\n");break; case CUFFT_EXEC_FAILED: strcat(CUDAmessage,"CUFFT_EXEC_FAILED\n");break; case CUFFT_SETUP_FAILED: strcat(CUDAmessage,"CUFFT_SETUP_FAILED\n");break; case CUFFT_INVALID_SIZE: strcat(CUDAmessage,"CUFFT_INVALID_SIZE\n");break; case CUFFT_UNALIGNED_DATA: strcat(CUDAmessage,"CUFFT_UNALIGNED_DATA\n");break; default: strcat(CUDAmessage,"CUFFT Unknown error code\n"); } sprintf(CUDAmessage, "%son line: %d\nin file: %s", CUDAmessage, line, file); //AfxMessageBox(CUDAmessage); exit(-1); } cudaDeviceSynchronize(); cudaError_t status2 = cudaGetLastError(); if(status2!=cudaSuccess) { char CUDAmessage[200] = "CUDA failed after sychronization:\n"; strcat(CUDAmessage, cudaGetErrorString(status2)); sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line); //AfxMessageBox(CUDAmessage); exit(-1); } #endif return; } inline void mCheckError(int line, const char *file) { #ifdef M_CUDA_DEBUG do { cudaError_t status = cudaGetLastError(); if(status!=cudaSuccess) { char CUDAmessage[200] = "CUDA says: "; strcat(CUDAmessage, cudaGetErrorString(status)); sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line); //AfxMessageBox(CUDAmessage); exit(-1); } cudaDeviceSynchronize(); status = cudaGetLastError(); if(status!=cudaSuccess) { char CUDAmessage[200] = "CUDA failed after sychronization:\n"; strcat(CUDAmessage, cudaGetErrorString(status)); sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line); //AfxMessageBox(CUDAmessage); exit(-1); } }while(0); #endif return; } inline void mDisplayDataF(float *d_data, int length, int line) { #ifdef M_CUDA_DEBUG do { int maxlength = 50; float *h_data; length = (length<=maxlength) ? length : maxlength; char MessageString[1000]; h_data = (float*)malloc(length * sizeof (float)); M_SAFE_CALL(cudaMemcpy(h_data, d_data, length*sizeof(float), cudaMemcpyDeviceToHost)); sprintf(MessageString, "Line: %d\nData: ", line); for (int ii = 0;ii<length;++ii) { sprintf(MessageString, "%s %f", MessageString, h_data[ii]); } //AfxMessageBox(MessageString, MB_ICONINFORMATION); free(h_data); }while(0); #endif return; } inline void mDisplayDataCC(cufftComplex *d_data, int length, int line) { #ifdef M_CUDA_DEBUG do { int maxlength = 25; cufftComplex *h_data; length = (length<=maxlength) ? length : maxlength; char MessageString[1000]; h_data = (cufftComplex*)malloc(length * sizeof (cufftComplex)); M_SAFE_CALL(cudaMemcpy(h_data, d_data, length*sizeof(cufftComplex), cudaMemcpyDeviceToHost)); sprintf(MessageString, "Line: %d\nData: ", line); for (int ii = 0;ii<length;++ii) { sprintf(MessageString, "%s re: %f im: %f", MessageString, h_data[ii].x, h_data[ii].y); } //AfxMessageBox(MessageString, MB_ICONINFORMATION); free(h_data); }while(0); #endif return; } inline void mDisplayDataUC(uint16_t *d_data, int length, int line) { #ifdef M_CUDA_DEBUG do { int maxlength = 50; uint16_t *h_data; length = (length<=maxlength) ? length : maxlength; char MessageString[1000]; h_data = (uint16_t*)malloc(length * sizeof (uint16_t)); M_SAFE_CALL(cudaMemcpy(h_data, d_data, length*sizeof(uint16_t), cudaMemcpyDeviceToHost)); sprintf(MessageString, "Line: %d\nData: ", line); for (int ii = 0;ii<length;++ii) { sprintf(MessageString, "%s %hhu", MessageString, h_data[ii]); } //AfxMessageBox(MessageString, MB_ICONINFORMATION); free(h_data); }while(0); #endif return; } inline void mDisplayDataI(int *d_data, int length, int line) { #ifdef M_CUDA_DEBUG do { int maxlength = 50; int *h_data; length = (length<=maxlength) ? length : maxlength; char MessageString[1000]; h_data = (int*)malloc(length * sizeof (int)); M_SAFE_CALL(cudaMemcpy(h_data, d_data, length*sizeof(int), cudaMemcpyDeviceToHost)); sprintf(MessageString, "Line: %d\nData: ", line); for (int ii = 0;ii<length;++ii) { sprintf(MessageString, "%s %d", MessageString, h_data[ii]); } //AfxMessageBox(MessageString, MB_ICONINFORMATION); free(h_data); }while(0); #endif return; } //Calculate amplitudes in positions given by x, y, and z from a given hologram extern "C" __declspec(dllexport) int GetIandPhase(float *x_spots, float *y_spots, float *z_spots, float *h_pSLM_uc, int N_spots_all, int data_w, float *h_I_obt, float *h_Phase_obt) { float *d_Iobtained_all; float *d_Pobtained_all; cudaMalloc((void**)&d_Iobtained_all, N_spots_all*sizeof(float) ); cudaMalloc((void**)&d_Pobtained_all, N_spots_all*sizeof(float) ); cudaMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, cudaMemcpyHostToDevice); int offset = 0; int N_spots_rem = N_spots_all; int N_spots_this; while (N_spots_rem > 0) { N_spots_this = (N_spots_rem > MAX_SPOTS) ? MAX_SPOTS : N_spots_rem; cudaMemcpyToSymbol(c_x, x_spots+offset, N_spots_this*sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_y, y_spots+offset, N_spots_this*sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_z, z_spots+offset, N_spots_this*sizeof(float), 0, cudaMemcpyHostToDevice); calculateIandPhase<<< N_spots_this, 512>>>(d_pSLM_uc, d_Iobtained_all+offset, d_Pobtained_all+offset); //calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained) cudaDeviceSynchronize(); N_spots_rem -= MAX_SPOTS; offset += MAX_SPOTS; } cudaMemcpy(h_I_obt, d_Iobtained_all, N_spots_all*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_Phase_obt, d_Pobtained_all, N_spots_all*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_Iobtained_all); cudaFree(d_Pobtained_all); status = cudaGetLastError(); return status; }
1b9ce78b581f4d0ba5e29a3d4f0cbab55f1c16e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020-2023, XGBoost Contributors */ #include <algorithm> // std::max #include <vector> #include <limits> #include "../../collective/communicator-inl.cuh" #include "../../common/categorical.h" #include "../../data/ellpack_page.cuh" #include "evaluate_splits.cuh" #include "expand_entry.cuh" namespace xgboost::tree { // With constraints XGBOOST_DEVICE float LossChangeMissing(const GradientPairInt64 &scan, const GradientPairInt64 &missing, const GradientPairInt64 &parent_sum, const GPUTrainingParam &param, bst_node_t nidx, bst_feature_t fidx, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, bool &missing_left_out, const GradientQuantiser& quantiser) { // NOLINT const auto left_sum = scan + missing; float missing_left_gain = evaluator.CalcSplitGain( param, nidx, fidx, quantiser.ToFloatingPoint(left_sum), quantiser.ToFloatingPoint(parent_sum - left_sum)); float missing_right_gain = evaluator.CalcSplitGain( param, nidx, fidx, quantiser.ToFloatingPoint(scan), quantiser.ToFloatingPoint(parent_sum - scan)); missing_left_out = missing_left_gain > missing_right_gain; return missing_left_out?missing_left_gain:missing_right_gain; } // This kernel uses block_size == warp_size. This is an unusually small block size for a cuda kernel // - normally a larger block size is preferred to increase the number of resident warps on each SM // (occupancy). In the below case each thread has a very large amount of work per thread relative to // typical cuda kernels. Thus the SM can be highly utilised by a small number of threads. It was // discovered by experiments that a small block size here is significantly faster. Furthermore, // using only a single warp, synchronisation barriers are eliminated and broadcasts can be performed // using warp intrinsics instead of slower shared memory. template <int kBlockSize> class EvaluateSplitAgent { public: using ArgMaxT = hipcub::KeyValuePair<int, float>; using BlockScanT = hipcub::BlockScan<GradientPairInt64, kBlockSize>; using MaxReduceT = hipcub::WarpReduce<ArgMaxT>; using SumReduceT = hipcub::WarpReduce<GradientPairInt64>; struct TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; const int fidx; const int nidx; const float min_fvalue; const uint32_t gidx_begin; // beginning bin const uint32_t gidx_end; // end bin for i^th feature const dh::LDGIterator<float> feature_values; const GradientPairInt64 *node_histogram; const GradientQuantiser &rounding; const GradientPairInt64 parent_sum; const GradientPairInt64 missing; const GPUTrainingParam &param; const TreeEvaluator::SplitEvaluator<GPUTrainingParam> &evaluator; TempStorage *temp_storage; SumCallbackOp<GradientPairInt64> prefix_op; static float constexpr kNullGain = -std::numeric_limits<bst_float>::infinity(); __device__ EvaluateSplitAgent( TempStorage *temp_storage, int fidx, const EvaluateSplitInputs &inputs, const EvaluateSplitSharedInputs &shared_inputs, const TreeEvaluator::SplitEvaluator<GPUTrainingParam> &evaluator) : temp_storage(temp_storage), nidx(inputs.nidx), fidx(fidx), min_fvalue(__ldg(shared_inputs.min_fvalue.data() + fidx)), gidx_begin(__ldg(shared_inputs.feature_segments.data() + fidx)), gidx_end(__ldg(shared_inputs.feature_segments.data() + fidx + 1)), feature_values(shared_inputs.feature_values.data()), node_histogram(inputs.gradient_histogram.data()), rounding(shared_inputs.rounding), parent_sum(dh::LDGIterator<GradientPairInt64>(&inputs.parent_sum)[0]), param(shared_inputs.param), evaluator(evaluator), missing(parent_sum - ReduceFeature()) { static_assert( kBlockSize == 32, "This kernel relies on the assumption block_size == warp_size"); // There should be no missing value gradients for a dense matrix KERNEL_CHECK(!shared_inputs.is_dense || missing.GetQuantisedHess() == 0); } __device__ GradientPairInt64 ReduceFeature() { GradientPairInt64 local_sum; for (int idx = gidx_begin + threadIdx.x; idx < gidx_end; idx += kBlockSize) { local_sum += LoadGpair(node_histogram + idx); } local_sum = SumReduceT(temp_storage->sum_reduce).Sum(local_sum); // NOLINT // Broadcast result from thread 0 return {__shfl_sync(0xffffffff, local_sum.GetQuantisedGrad(), 0), __shfl_sync(0xffffffff, local_sum.GetQuantisedHess(), 0)}; } // Load using efficient 128 vector load instruction __device__ __forceinline__ GradientPairInt64 LoadGpair(const GradientPairInt64 *ptr) { float4 tmp = *reinterpret_cast<const float4 *>(ptr); auto gpair = *reinterpret_cast<const GradientPairInt64 *>(&tmp); static_assert(sizeof(decltype(gpair)) == sizeof(float4), "Vector type size does not match gradient pair size."); return gpair; } __device__ __forceinline__ void Numerical(DeviceSplitCandidate *__restrict__ best_split) { for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += kBlockSize) { bool thread_active = (scan_begin + threadIdx.x) < gidx_end; GradientPairInt64 bin = thread_active ? LoadGpair(node_histogram + scan_begin + threadIdx.x) : GradientPairInt64(); BlockScanT(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op); // Whether the gradient of missing values is put to the left side. bool missing_left = true; float gain = thread_active ? LossChangeMissing(bin, missing, parent_sum, param, nidx, fidx, evaluator, missing_left, rounding) : kNullGain; // Find thread with best gain auto best = MaxReduceT(temp_storage->max_reduce).Reduce({threadIdx.x, gain}, hipcub::ArgMax()); // This reduce result is only valid in thread 0 // broadcast to the rest of the warp auto best_thread = __shfl_sync(0xffffffff, best.key, 0); // Best thread updates the split if (threadIdx.x == best_thread) { // Use pointer from cut to indicate begin and end of bins for each feature. int split_gidx = (scan_begin + threadIdx.x) - 1; float fvalue = split_gidx < static_cast<int>(gidx_begin) ? min_fvalue : feature_values[split_gidx]; GradientPairInt64 left = missing_left ? bin + missing : bin; GradientPairInt64 right = parent_sum - left; best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, left, right, false, param, rounding); } } } __device__ __forceinline__ void OneHot(DeviceSplitCandidate *__restrict__ best_split) { for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += kBlockSize) { bool thread_active = (scan_begin + threadIdx.x) < gidx_end; auto rest = thread_active ? LoadGpair(node_histogram + scan_begin + threadIdx.x) : GradientPairInt64(); GradientPairInt64 bin = parent_sum - rest - missing; // Whether the gradient of missing values is put to the left side. bool missing_left = true; float gain = thread_active ? LossChangeMissing(bin, missing, parent_sum, param, nidx, fidx, evaluator, missing_left, rounding) : kNullGain; // Find thread with best gain auto best = MaxReduceT(temp_storage->max_reduce).Reduce({threadIdx.x, gain}, hipcub::ArgMax()); // This reduce result is only valid in thread 0 // broadcast to the rest of the warp auto best_thread = __shfl_sync(0xffffffff, best.key, 0); // Best thread updates the split if (threadIdx.x == best_thread) { int32_t split_gidx = (scan_begin + threadIdx.x); float fvalue = feature_values[split_gidx]; GradientPairInt64 left = missing_left ? bin + missing : bin; GradientPairInt64 right = parent_sum - left; best_split->UpdateCat(gain, missing_left ? kLeftDir : kRightDir, static_cast<bst_cat_t>(fvalue), fidx, left, right, param, rounding); } } } /** * \brief Gather and update the best split. */ __device__ __forceinline__ void PartitionUpdate(bst_bin_t scan_begin, bool thread_active, bool missing_left, bst_bin_t it, GradientPairInt64 const &left_sum, GradientPairInt64 const &right_sum, DeviceSplitCandidate *__restrict__ best_split) { auto gain = thread_active ? evaluator.CalcSplitGain(param, nidx, fidx, rounding.ToFloatingPoint(left_sum), rounding.ToFloatingPoint(right_sum)) : kNullGain; // Find thread with best gain auto best = MaxReduceT(temp_storage->max_reduce).Reduce({threadIdx.x, gain}, hipcub::ArgMax()); // This reduce result is only valid in thread 0 // broadcast to the rest of the warp auto best_thread = __shfl_sync(0xffffffff, best.key, 0); // Best thread updates the split if (threadIdx.x == best_thread) { assert(thread_active); // index of best threshold inside a feature. auto best_thresh = it - gidx_begin; best_split->UpdateCat(gain, missing_left ? kLeftDir : kRightDir, best_thresh, fidx, left_sum, right_sum, param, rounding); } } /** * \brief Partition-based split for categorical feature. */ __device__ __forceinline__ void Partition(DeviceSplitCandidate *__restrict__ best_split, common::Span<bst_feature_t> sorted_idx, std::size_t node_offset, GPUTrainingParam const &param) { bst_bin_t n_bins_feature = gidx_end - gidx_begin; auto n_bins = ::min(param.max_cat_threshold, n_bins_feature); bst_bin_t it_begin = gidx_begin; bst_bin_t it_end = it_begin + n_bins - 1; // forward for (bst_bin_t scan_begin = it_begin; scan_begin < it_end; scan_begin += kBlockSize) { auto it = scan_begin + static_cast<bst_bin_t>(threadIdx.x); bool thread_active = it < it_end; auto right_sum = thread_active ? LoadGpair(node_histogram + sorted_idx[it] - node_offset) : GradientPairInt64(); // No min value for cat feature, use inclusive scan. BlockScanT(temp_storage->scan).InclusiveSum(right_sum, right_sum, prefix_op); GradientPairInt64 left_sum = parent_sum - right_sum; PartitionUpdate(scan_begin, thread_active, true, it, left_sum, right_sum, best_split); } // backward it_begin = gidx_end - 1; it_end = it_begin - n_bins + 1; prefix_op = SumCallbackOp<GradientPairInt64>{}; // reset for (bst_bin_t scan_begin = it_begin; scan_begin > it_end; scan_begin -= kBlockSize) { auto it = scan_begin - static_cast<bst_bin_t>(threadIdx.x); bool thread_active = it > it_end; auto left_sum = thread_active ? LoadGpair(node_histogram + sorted_idx[it] - node_offset) : GradientPairInt64(); // No min value for cat feature, use inclusive scan. BlockScanT(temp_storage->scan).InclusiveSum(left_sum, left_sum, prefix_op); GradientPairInt64 right_sum = parent_sum - left_sum; PartitionUpdate(scan_begin, thread_active, false, it, left_sum, right_sum, best_split); } } }; template <int kBlockSize> __global__ __launch_bounds__(kBlockSize) void EvaluateSplitsKernel( bst_feature_t max_active_features, common::Span<const EvaluateSplitInputs> d_inputs, const EvaluateSplitSharedInputs shared_inputs, common::Span<bst_feature_t> sorted_idx, const TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, common::Span<DeviceSplitCandidate> out_candidates) { // Aligned && shared storage for best_split __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate &best_split = uninitialized_split.Alias(); if (threadIdx.x == 0) { best_split = DeviceSplitCandidate(); } __syncthreads(); // Allocate blocks to one feature of one node const auto input_idx = blockIdx.x / max_active_features; const EvaluateSplitInputs &inputs = d_inputs[input_idx]; // One block for each feature. Features are sampled, so fidx != blockIdx.x // Some blocks may not have any feature to work on, simply return int feature_offset = blockIdx.x % max_active_features; if (feature_offset >= inputs.feature_set.size()) { return; } int fidx = inputs.feature_set[feature_offset]; using AgentT = EvaluateSplitAgent<kBlockSize>; __shared__ typename AgentT::TempStorage temp_storage; AgentT agent(&temp_storage, fidx, inputs, shared_inputs, evaluator); if (common::IsCat(shared_inputs.feature_types, fidx)) { auto n_bins_in_feat = shared_inputs.feature_segments[fidx + 1] - shared_inputs.feature_segments[fidx]; if (common::UseOneHot(n_bins_in_feat, shared_inputs.param.max_cat_to_onehot)) { agent.OneHot(&best_split); } else { auto total_bins = shared_inputs.feature_values.size(); size_t offset = total_bins * input_idx; auto node_sorted_idx = sorted_idx.subspan(offset, total_bins); agent.Partition(&best_split, node_sorted_idx, offset, shared_inputs.param); } } else { agent.Numerical(&best_split); } cub::CTA_SYNC(); if (threadIdx.x == 0) { // Record best loss for each feature out_candidates[blockIdx.x] = best_split; } } __device__ DeviceSplitCandidate operator+(const DeviceSplitCandidate &a, const DeviceSplitCandidate &b) { return b.loss_chg > a.loss_chg ? b : a; } /** * \brief Set the bits for categorical splits based on the split threshold. */ __device__ void SetCategoricalSplit(const EvaluateSplitSharedInputs &shared_inputs, common::Span<bst_feature_t const> d_sorted_idx, bst_feature_t fidx, std::size_t input_idx, common::Span<common::CatBitField::value_type> out, DeviceSplitCandidate *p_out_split) { auto &out_split = *p_out_split; auto out_cats = common::CatBitField{out}; // Simple case for one hot split if (common::UseOneHot(shared_inputs.FeatureBins(fidx), shared_inputs.param.max_cat_to_onehot)) { out_cats.Set(common::AsCat(out_split.thresh)); return; } // partition-based split auto node_sorted_idx = d_sorted_idx.subspan(shared_inputs.feature_values.size() * input_idx, shared_inputs.feature_values.size()); size_t node_offset = input_idx * shared_inputs.feature_values.size(); auto const best_thresh = out_split.thresh; if (best_thresh == -1) { return; } auto f_sorted_idx = node_sorted_idx.subspan(shared_inputs.feature_segments[fidx], shared_inputs.FeatureBins(fidx)); bool forward = out_split.dir == kLeftDir; bst_bin_t partition = forward ? best_thresh + 1 : best_thresh; auto beg = dh::tcbegin(f_sorted_idx); assert(partition > 0 && "Invalid partition."); thrust::for_each(thrust::seq, beg, beg + partition, [&](size_t c) { auto cat = shared_inputs.feature_values[c - node_offset]; out_cats.Set(common::AsCat(cat)); }); } void GPUHistEvaluator::LaunchEvaluateSplits( bst_feature_t max_active_features, common::Span<const EvaluateSplitInputs> d_inputs, EvaluateSplitSharedInputs shared_inputs, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, common::Span<DeviceSplitCandidate> out_splits) { if (need_sort_histogram_) { this->SortHistogram(d_inputs, shared_inputs, evaluator); } size_t combined_num_features = max_active_features * d_inputs.size(); dh::TemporaryArray<DeviceSplitCandidate> feature_best_splits( combined_num_features, DeviceSplitCandidate()); // One block for each feature uint32_t constexpr kBlockThreads = 32; dh::LaunchKernel {static_cast<uint32_t>(combined_num_features), kBlockThreads, 0}( EvaluateSplitsKernel<kBlockThreads>, max_active_features, d_inputs, shared_inputs, this->SortedIdx(d_inputs.size(), shared_inputs.feature_values.size()), evaluator, dh::ToSpan(feature_best_splits)); // Reduce to get best candidate for left and right child over all features auto reduce_offset = dh::MakeTransformIterator<size_t>(thrust::make_counting_iterator(0llu), [=] __device__(size_t idx) -> size_t { return idx * max_active_features; }); size_t temp_storage_bytes = 0; auto num_segments = out_splits.size(); hipcub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes, feature_best_splits.data(), out_splits.data(), num_segments, reduce_offset, reduce_offset + 1); dh::TemporaryArray<int8_t> temp(temp_storage_bytes); hipcub::DeviceSegmentedReduce::Sum(temp.data().get(), temp_storage_bytes, feature_best_splits.data(), out_splits.data(), num_segments, reduce_offset, reduce_offset + 1); } void GPUHistEvaluator::CopyToHost(const std::vector<bst_node_t> &nidx) { if (!has_categoricals_) return; auto d_cats = this->DeviceCatStorage(nidx); auto h_cats = this->HostCatStorage(nidx); dh::CUDAEvent event; event.Record(dh::DefaultStream()); for (auto idx : nidx) { copy_stream_.View().Wait(event); dh::safe_cuda(hipMemcpyAsync( h_cats.GetNodeCatStorage(idx).data(), d_cats.GetNodeCatStorage(idx).data(), d_cats.GetNodeCatStorage(idx).size_bytes(), hipMemcpyDeviceToHost, copy_stream_.View())); } } void GPUHistEvaluator::EvaluateSplits( const std::vector<bst_node_t> &nidx, bst_feature_t max_active_features, common::Span<const EvaluateSplitInputs> d_inputs, EvaluateSplitSharedInputs shared_inputs, common::Span<GPUExpandEntry> out_entries) { auto evaluator = this->tree_evaluator_.template GetEvaluator<GPUTrainingParam>(); dh::TemporaryArray<DeviceSplitCandidate> splits_out_storage(d_inputs.size()); auto out_splits = dh::ToSpan(splits_out_storage); this->LaunchEvaluateSplits(max_active_features, d_inputs, shared_inputs, evaluator, out_splits); if (is_column_split_) { // With column-wise data split, we gather the split candidates from all the workers and find the // global best candidates. auto const world_size = collective::GetWorldSize(); dh::TemporaryArray<DeviceSplitCandidate> all_candidate_storage(out_splits.size() * world_size); auto all_candidates = dh::ToSpan(all_candidate_storage); collective::AllGather(device_, out_splits.data(), all_candidates.data(), out_splits.size() * sizeof(DeviceSplitCandidate)); // Reduce to get the best candidate from all workers. dh::LaunchN(out_splits.size(), [world_size, all_candidates, out_splits] __device__(size_t i) { for (auto rank = 0; rank < world_size; rank++) { out_splits[i] = out_splits[i] + all_candidates[rank * out_splits.size() + i]; } }); } auto d_sorted_idx = this->SortedIdx(d_inputs.size(), shared_inputs.feature_values.size()); auto d_entries = out_entries; auto device_cats_accessor = this->DeviceCatStorage(nidx); // turn candidate into entry, along with handling sort based split. dh::LaunchN(d_inputs.size(), [=] __device__(size_t i) mutable { auto const input = d_inputs[i]; auto &split = out_splits[i]; // Subtract parent gain here // As it is constant, this is more efficient than doing it during every // split evaluation float parent_gain = CalcGain(shared_inputs.param, shared_inputs.rounding.ToFloatingPoint(input.parent_sum)); split.loss_chg -= parent_gain; auto fidx = out_splits[i].findex; if (split.is_cat) { SetCategoricalSplit(shared_inputs, d_sorted_idx, fidx, i, device_cats_accessor.GetNodeCatStorage(input.nidx), &out_splits[i]); } float base_weight = evaluator.CalcWeight(input.nidx, shared_inputs.param, shared_inputs.rounding.ToFloatingPoint( split.left_sum + split.right_sum)); float left_weight = evaluator.CalcWeight( input.nidx, shared_inputs.param, shared_inputs.rounding.ToFloatingPoint(split.left_sum)); float right_weight = evaluator.CalcWeight( input.nidx, shared_inputs.param, shared_inputs.rounding.ToFloatingPoint(split.right_sum)); d_entries[i] = GPUExpandEntry{input.nidx, input.depth, out_splits[i], base_weight, left_weight, right_weight}; }); this->CopyToHost(nidx); } GPUExpandEntry GPUHistEvaluator::EvaluateSingleSplit( EvaluateSplitInputs input, EvaluateSplitSharedInputs shared_inputs) { dh::device_vector<EvaluateSplitInputs> inputs = std::vector<EvaluateSplitInputs>{input}; dh::TemporaryArray<GPUExpandEntry> out_entries(1); this->EvaluateSplits({input.nidx}, input.feature_set.size(), dh::ToSpan(inputs), shared_inputs, dh::ToSpan(out_entries)); GPUExpandEntry root_entry; dh::safe_cuda(hipMemcpyAsync(&root_entry, out_entries.data().get(), sizeof(GPUExpandEntry), hipMemcpyDeviceToHost)); return root_entry; } } // namespace xgboost::tree
1b9ce78b581f4d0ba5e29a3d4f0cbab55f1c16e7.cu
/** * Copyright 2020-2023, XGBoost Contributors */ #include <algorithm> // std::max #include <vector> #include <limits> #include "../../collective/communicator-inl.cuh" #include "../../common/categorical.h" #include "../../data/ellpack_page.cuh" #include "evaluate_splits.cuh" #include "expand_entry.cuh" namespace xgboost::tree { // With constraints XGBOOST_DEVICE float LossChangeMissing(const GradientPairInt64 &scan, const GradientPairInt64 &missing, const GradientPairInt64 &parent_sum, const GPUTrainingParam &param, bst_node_t nidx, bst_feature_t fidx, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, bool &missing_left_out, const GradientQuantiser& quantiser) { // NOLINT const auto left_sum = scan + missing; float missing_left_gain = evaluator.CalcSplitGain( param, nidx, fidx, quantiser.ToFloatingPoint(left_sum), quantiser.ToFloatingPoint(parent_sum - left_sum)); float missing_right_gain = evaluator.CalcSplitGain( param, nidx, fidx, quantiser.ToFloatingPoint(scan), quantiser.ToFloatingPoint(parent_sum - scan)); missing_left_out = missing_left_gain > missing_right_gain; return missing_left_out?missing_left_gain:missing_right_gain; } // This kernel uses block_size == warp_size. This is an unusually small block size for a cuda kernel // - normally a larger block size is preferred to increase the number of resident warps on each SM // (occupancy). In the below case each thread has a very large amount of work per thread relative to // typical cuda kernels. Thus the SM can be highly utilised by a small number of threads. It was // discovered by experiments that a small block size here is significantly faster. Furthermore, // using only a single warp, synchronisation barriers are eliminated and broadcasts can be performed // using warp intrinsics instead of slower shared memory. template <int kBlockSize> class EvaluateSplitAgent { public: using ArgMaxT = cub::KeyValuePair<int, float>; using BlockScanT = cub::BlockScan<GradientPairInt64, kBlockSize>; using MaxReduceT = cub::WarpReduce<ArgMaxT>; using SumReduceT = cub::WarpReduce<GradientPairInt64>; struct TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; const int fidx; const int nidx; const float min_fvalue; const uint32_t gidx_begin; // beginning bin const uint32_t gidx_end; // end bin for i^th feature const dh::LDGIterator<float> feature_values; const GradientPairInt64 *node_histogram; const GradientQuantiser &rounding; const GradientPairInt64 parent_sum; const GradientPairInt64 missing; const GPUTrainingParam &param; const TreeEvaluator::SplitEvaluator<GPUTrainingParam> &evaluator; TempStorage *temp_storage; SumCallbackOp<GradientPairInt64> prefix_op; static float constexpr kNullGain = -std::numeric_limits<bst_float>::infinity(); __device__ EvaluateSplitAgent( TempStorage *temp_storage, int fidx, const EvaluateSplitInputs &inputs, const EvaluateSplitSharedInputs &shared_inputs, const TreeEvaluator::SplitEvaluator<GPUTrainingParam> &evaluator) : temp_storage(temp_storage), nidx(inputs.nidx), fidx(fidx), min_fvalue(__ldg(shared_inputs.min_fvalue.data() + fidx)), gidx_begin(__ldg(shared_inputs.feature_segments.data() + fidx)), gidx_end(__ldg(shared_inputs.feature_segments.data() + fidx + 1)), feature_values(shared_inputs.feature_values.data()), node_histogram(inputs.gradient_histogram.data()), rounding(shared_inputs.rounding), parent_sum(dh::LDGIterator<GradientPairInt64>(&inputs.parent_sum)[0]), param(shared_inputs.param), evaluator(evaluator), missing(parent_sum - ReduceFeature()) { static_assert( kBlockSize == 32, "This kernel relies on the assumption block_size == warp_size"); // There should be no missing value gradients for a dense matrix KERNEL_CHECK(!shared_inputs.is_dense || missing.GetQuantisedHess() == 0); } __device__ GradientPairInt64 ReduceFeature() { GradientPairInt64 local_sum; for (int idx = gidx_begin + threadIdx.x; idx < gidx_end; idx += kBlockSize) { local_sum += LoadGpair(node_histogram + idx); } local_sum = SumReduceT(temp_storage->sum_reduce).Sum(local_sum); // NOLINT // Broadcast result from thread 0 return {__shfl_sync(0xffffffff, local_sum.GetQuantisedGrad(), 0), __shfl_sync(0xffffffff, local_sum.GetQuantisedHess(), 0)}; } // Load using efficient 128 vector load instruction __device__ __forceinline__ GradientPairInt64 LoadGpair(const GradientPairInt64 *ptr) { float4 tmp = *reinterpret_cast<const float4 *>(ptr); auto gpair = *reinterpret_cast<const GradientPairInt64 *>(&tmp); static_assert(sizeof(decltype(gpair)) == sizeof(float4), "Vector type size does not match gradient pair size."); return gpair; } __device__ __forceinline__ void Numerical(DeviceSplitCandidate *__restrict__ best_split) { for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += kBlockSize) { bool thread_active = (scan_begin + threadIdx.x) < gidx_end; GradientPairInt64 bin = thread_active ? LoadGpair(node_histogram + scan_begin + threadIdx.x) : GradientPairInt64(); BlockScanT(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op); // Whether the gradient of missing values is put to the left side. bool missing_left = true; float gain = thread_active ? LossChangeMissing(bin, missing, parent_sum, param, nidx, fidx, evaluator, missing_left, rounding) : kNullGain; // Find thread with best gain auto best = MaxReduceT(temp_storage->max_reduce).Reduce({threadIdx.x, gain}, cub::ArgMax()); // This reduce result is only valid in thread 0 // broadcast to the rest of the warp auto best_thread = __shfl_sync(0xffffffff, best.key, 0); // Best thread updates the split if (threadIdx.x == best_thread) { // Use pointer from cut to indicate begin and end of bins for each feature. int split_gidx = (scan_begin + threadIdx.x) - 1; float fvalue = split_gidx < static_cast<int>(gidx_begin) ? min_fvalue : feature_values[split_gidx]; GradientPairInt64 left = missing_left ? bin + missing : bin; GradientPairInt64 right = parent_sum - left; best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, left, right, false, param, rounding); } } } __device__ __forceinline__ void OneHot(DeviceSplitCandidate *__restrict__ best_split) { for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += kBlockSize) { bool thread_active = (scan_begin + threadIdx.x) < gidx_end; auto rest = thread_active ? LoadGpair(node_histogram + scan_begin + threadIdx.x) : GradientPairInt64(); GradientPairInt64 bin = parent_sum - rest - missing; // Whether the gradient of missing values is put to the left side. bool missing_left = true; float gain = thread_active ? LossChangeMissing(bin, missing, parent_sum, param, nidx, fidx, evaluator, missing_left, rounding) : kNullGain; // Find thread with best gain auto best = MaxReduceT(temp_storage->max_reduce).Reduce({threadIdx.x, gain}, cub::ArgMax()); // This reduce result is only valid in thread 0 // broadcast to the rest of the warp auto best_thread = __shfl_sync(0xffffffff, best.key, 0); // Best thread updates the split if (threadIdx.x == best_thread) { int32_t split_gidx = (scan_begin + threadIdx.x); float fvalue = feature_values[split_gidx]; GradientPairInt64 left = missing_left ? bin + missing : bin; GradientPairInt64 right = parent_sum - left; best_split->UpdateCat(gain, missing_left ? kLeftDir : kRightDir, static_cast<bst_cat_t>(fvalue), fidx, left, right, param, rounding); } } } /** * \brief Gather and update the best split. */ __device__ __forceinline__ void PartitionUpdate(bst_bin_t scan_begin, bool thread_active, bool missing_left, bst_bin_t it, GradientPairInt64 const &left_sum, GradientPairInt64 const &right_sum, DeviceSplitCandidate *__restrict__ best_split) { auto gain = thread_active ? evaluator.CalcSplitGain(param, nidx, fidx, rounding.ToFloatingPoint(left_sum), rounding.ToFloatingPoint(right_sum)) : kNullGain; // Find thread with best gain auto best = MaxReduceT(temp_storage->max_reduce).Reduce({threadIdx.x, gain}, cub::ArgMax()); // This reduce result is only valid in thread 0 // broadcast to the rest of the warp auto best_thread = __shfl_sync(0xffffffff, best.key, 0); // Best thread updates the split if (threadIdx.x == best_thread) { assert(thread_active); // index of best threshold inside a feature. auto best_thresh = it - gidx_begin; best_split->UpdateCat(gain, missing_left ? kLeftDir : kRightDir, best_thresh, fidx, left_sum, right_sum, param, rounding); } } /** * \brief Partition-based split for categorical feature. */ __device__ __forceinline__ void Partition(DeviceSplitCandidate *__restrict__ best_split, common::Span<bst_feature_t> sorted_idx, std::size_t node_offset, GPUTrainingParam const &param) { bst_bin_t n_bins_feature = gidx_end - gidx_begin; auto n_bins = std::min(param.max_cat_threshold, n_bins_feature); bst_bin_t it_begin = gidx_begin; bst_bin_t it_end = it_begin + n_bins - 1; // forward for (bst_bin_t scan_begin = it_begin; scan_begin < it_end; scan_begin += kBlockSize) { auto it = scan_begin + static_cast<bst_bin_t>(threadIdx.x); bool thread_active = it < it_end; auto right_sum = thread_active ? LoadGpair(node_histogram + sorted_idx[it] - node_offset) : GradientPairInt64(); // No min value for cat feature, use inclusive scan. BlockScanT(temp_storage->scan).InclusiveSum(right_sum, right_sum, prefix_op); GradientPairInt64 left_sum = parent_sum - right_sum; PartitionUpdate(scan_begin, thread_active, true, it, left_sum, right_sum, best_split); } // backward it_begin = gidx_end - 1; it_end = it_begin - n_bins + 1; prefix_op = SumCallbackOp<GradientPairInt64>{}; // reset for (bst_bin_t scan_begin = it_begin; scan_begin > it_end; scan_begin -= kBlockSize) { auto it = scan_begin - static_cast<bst_bin_t>(threadIdx.x); bool thread_active = it > it_end; auto left_sum = thread_active ? LoadGpair(node_histogram + sorted_idx[it] - node_offset) : GradientPairInt64(); // No min value for cat feature, use inclusive scan. BlockScanT(temp_storage->scan).InclusiveSum(left_sum, left_sum, prefix_op); GradientPairInt64 right_sum = parent_sum - left_sum; PartitionUpdate(scan_begin, thread_active, false, it, left_sum, right_sum, best_split); } } }; template <int kBlockSize> __global__ __launch_bounds__(kBlockSize) void EvaluateSplitsKernel( bst_feature_t max_active_features, common::Span<const EvaluateSplitInputs> d_inputs, const EvaluateSplitSharedInputs shared_inputs, common::Span<bst_feature_t> sorted_idx, const TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, common::Span<DeviceSplitCandidate> out_candidates) { // Aligned && shared storage for best_split __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate &best_split = uninitialized_split.Alias(); if (threadIdx.x == 0) { best_split = DeviceSplitCandidate(); } __syncthreads(); // Allocate blocks to one feature of one node const auto input_idx = blockIdx.x / max_active_features; const EvaluateSplitInputs &inputs = d_inputs[input_idx]; // One block for each feature. Features are sampled, so fidx != blockIdx.x // Some blocks may not have any feature to work on, simply return int feature_offset = blockIdx.x % max_active_features; if (feature_offset >= inputs.feature_set.size()) { return; } int fidx = inputs.feature_set[feature_offset]; using AgentT = EvaluateSplitAgent<kBlockSize>; __shared__ typename AgentT::TempStorage temp_storage; AgentT agent(&temp_storage, fidx, inputs, shared_inputs, evaluator); if (common::IsCat(shared_inputs.feature_types, fidx)) { auto n_bins_in_feat = shared_inputs.feature_segments[fidx + 1] - shared_inputs.feature_segments[fidx]; if (common::UseOneHot(n_bins_in_feat, shared_inputs.param.max_cat_to_onehot)) { agent.OneHot(&best_split); } else { auto total_bins = shared_inputs.feature_values.size(); size_t offset = total_bins * input_idx; auto node_sorted_idx = sorted_idx.subspan(offset, total_bins); agent.Partition(&best_split, node_sorted_idx, offset, shared_inputs.param); } } else { agent.Numerical(&best_split); } cub::CTA_SYNC(); if (threadIdx.x == 0) { // Record best loss for each feature out_candidates[blockIdx.x] = best_split; } } __device__ DeviceSplitCandidate operator+(const DeviceSplitCandidate &a, const DeviceSplitCandidate &b) { return b.loss_chg > a.loss_chg ? b : a; } /** * \brief Set the bits for categorical splits based on the split threshold. */ __device__ void SetCategoricalSplit(const EvaluateSplitSharedInputs &shared_inputs, common::Span<bst_feature_t const> d_sorted_idx, bst_feature_t fidx, std::size_t input_idx, common::Span<common::CatBitField::value_type> out, DeviceSplitCandidate *p_out_split) { auto &out_split = *p_out_split; auto out_cats = common::CatBitField{out}; // Simple case for one hot split if (common::UseOneHot(shared_inputs.FeatureBins(fidx), shared_inputs.param.max_cat_to_onehot)) { out_cats.Set(common::AsCat(out_split.thresh)); return; } // partition-based split auto node_sorted_idx = d_sorted_idx.subspan(shared_inputs.feature_values.size() * input_idx, shared_inputs.feature_values.size()); size_t node_offset = input_idx * shared_inputs.feature_values.size(); auto const best_thresh = out_split.thresh; if (best_thresh == -1) { return; } auto f_sorted_idx = node_sorted_idx.subspan(shared_inputs.feature_segments[fidx], shared_inputs.FeatureBins(fidx)); bool forward = out_split.dir == kLeftDir; bst_bin_t partition = forward ? best_thresh + 1 : best_thresh; auto beg = dh::tcbegin(f_sorted_idx); assert(partition > 0 && "Invalid partition."); thrust::for_each(thrust::seq, beg, beg + partition, [&](size_t c) { auto cat = shared_inputs.feature_values[c - node_offset]; out_cats.Set(common::AsCat(cat)); }); } void GPUHistEvaluator::LaunchEvaluateSplits( bst_feature_t max_active_features, common::Span<const EvaluateSplitInputs> d_inputs, EvaluateSplitSharedInputs shared_inputs, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, common::Span<DeviceSplitCandidate> out_splits) { if (need_sort_histogram_) { this->SortHistogram(d_inputs, shared_inputs, evaluator); } size_t combined_num_features = max_active_features * d_inputs.size(); dh::TemporaryArray<DeviceSplitCandidate> feature_best_splits( combined_num_features, DeviceSplitCandidate()); // One block for each feature uint32_t constexpr kBlockThreads = 32; dh::LaunchKernel {static_cast<uint32_t>(combined_num_features), kBlockThreads, 0}( EvaluateSplitsKernel<kBlockThreads>, max_active_features, d_inputs, shared_inputs, this->SortedIdx(d_inputs.size(), shared_inputs.feature_values.size()), evaluator, dh::ToSpan(feature_best_splits)); // Reduce to get best candidate for left and right child over all features auto reduce_offset = dh::MakeTransformIterator<size_t>(thrust::make_counting_iterator(0llu), [=] __device__(size_t idx) -> size_t { return idx * max_active_features; }); size_t temp_storage_bytes = 0; auto num_segments = out_splits.size(); cub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes, feature_best_splits.data(), out_splits.data(), num_segments, reduce_offset, reduce_offset + 1); dh::TemporaryArray<int8_t> temp(temp_storage_bytes); cub::DeviceSegmentedReduce::Sum(temp.data().get(), temp_storage_bytes, feature_best_splits.data(), out_splits.data(), num_segments, reduce_offset, reduce_offset + 1); } void GPUHistEvaluator::CopyToHost(const std::vector<bst_node_t> &nidx) { if (!has_categoricals_) return; auto d_cats = this->DeviceCatStorage(nidx); auto h_cats = this->HostCatStorage(nidx); dh::CUDAEvent event; event.Record(dh::DefaultStream()); for (auto idx : nidx) { copy_stream_.View().Wait(event); dh::safe_cuda(cudaMemcpyAsync( h_cats.GetNodeCatStorage(idx).data(), d_cats.GetNodeCatStorage(idx).data(), d_cats.GetNodeCatStorage(idx).size_bytes(), cudaMemcpyDeviceToHost, copy_stream_.View())); } } void GPUHistEvaluator::EvaluateSplits( const std::vector<bst_node_t> &nidx, bst_feature_t max_active_features, common::Span<const EvaluateSplitInputs> d_inputs, EvaluateSplitSharedInputs shared_inputs, common::Span<GPUExpandEntry> out_entries) { auto evaluator = this->tree_evaluator_.template GetEvaluator<GPUTrainingParam>(); dh::TemporaryArray<DeviceSplitCandidate> splits_out_storage(d_inputs.size()); auto out_splits = dh::ToSpan(splits_out_storage); this->LaunchEvaluateSplits(max_active_features, d_inputs, shared_inputs, evaluator, out_splits); if (is_column_split_) { // With column-wise data split, we gather the split candidates from all the workers and find the // global best candidates. auto const world_size = collective::GetWorldSize(); dh::TemporaryArray<DeviceSplitCandidate> all_candidate_storage(out_splits.size() * world_size); auto all_candidates = dh::ToSpan(all_candidate_storage); collective::AllGather(device_, out_splits.data(), all_candidates.data(), out_splits.size() * sizeof(DeviceSplitCandidate)); // Reduce to get the best candidate from all workers. dh::LaunchN(out_splits.size(), [world_size, all_candidates, out_splits] __device__(size_t i) { for (auto rank = 0; rank < world_size; rank++) { out_splits[i] = out_splits[i] + all_candidates[rank * out_splits.size() + i]; } }); } auto d_sorted_idx = this->SortedIdx(d_inputs.size(), shared_inputs.feature_values.size()); auto d_entries = out_entries; auto device_cats_accessor = this->DeviceCatStorage(nidx); // turn candidate into entry, along with handling sort based split. dh::LaunchN(d_inputs.size(), [=] __device__(size_t i) mutable { auto const input = d_inputs[i]; auto &split = out_splits[i]; // Subtract parent gain here // As it is constant, this is more efficient than doing it during every // split evaluation float parent_gain = CalcGain(shared_inputs.param, shared_inputs.rounding.ToFloatingPoint(input.parent_sum)); split.loss_chg -= parent_gain; auto fidx = out_splits[i].findex; if (split.is_cat) { SetCategoricalSplit(shared_inputs, d_sorted_idx, fidx, i, device_cats_accessor.GetNodeCatStorage(input.nidx), &out_splits[i]); } float base_weight = evaluator.CalcWeight(input.nidx, shared_inputs.param, shared_inputs.rounding.ToFloatingPoint( split.left_sum + split.right_sum)); float left_weight = evaluator.CalcWeight( input.nidx, shared_inputs.param, shared_inputs.rounding.ToFloatingPoint(split.left_sum)); float right_weight = evaluator.CalcWeight( input.nidx, shared_inputs.param, shared_inputs.rounding.ToFloatingPoint(split.right_sum)); d_entries[i] = GPUExpandEntry{input.nidx, input.depth, out_splits[i], base_weight, left_weight, right_weight}; }); this->CopyToHost(nidx); } GPUExpandEntry GPUHistEvaluator::EvaluateSingleSplit( EvaluateSplitInputs input, EvaluateSplitSharedInputs shared_inputs) { dh::device_vector<EvaluateSplitInputs> inputs = std::vector<EvaluateSplitInputs>{input}; dh::TemporaryArray<GPUExpandEntry> out_entries(1); this->EvaluateSplits({input.nidx}, input.feature_set.size(), dh::ToSpan(inputs), shared_inputs, dh::ToSpan(out_entries)); GPUExpandEntry root_entry; dh::safe_cuda(cudaMemcpyAsync(&root_entry, out_entries.data().get(), sizeof(GPUExpandEntry), cudaMemcpyDeviceToHost)); return root_entry; } } // namespace xgboost::tree
de2ff608598073d97b726a83f0b6920e2f68cded.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void dwt_per_Y(float *d_ip, int rows, int cols, int cA_rows, int filt_len, int Halo_steps, float *d_cL, float *d_cH) { extern __shared__ float s_Data[]; //Offset to the upper halo edge const int baseX = blockIdx.x * Y_BLOCKDIM_X + threadIdx.x; const int baseY = ((blockIdx.y * 2 * Y_RESULT_STEPS) - Halo_steps) * Y_BLOCKDIM_Y + threadIdx.y; const int baseY1 = (blockIdx.y * Y_RESULT_STEPS) * Y_BLOCKDIM_Y + threadIdx.y; if (baseX < cols) { d_ip += baseY * cols + baseX; d_cL += baseY1 * cols + baseX; d_cH += baseY1 * cols + baseX; //Loading data to shared memory if (rows % 2 == 1) { //Upper halo #pragma unroll for (int i = 0; i < Halo_steps; i++) { if (baseY + i * Y_BLOCKDIM_Y == -1) s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = d_ip[(rows - 1) * cols]; else s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = (baseY >= -i * Y_BLOCKDIM_Y) ? d_ip[i * Y_BLOCKDIM_Y * cols] : d_ip[(i * Y_BLOCKDIM_Y * cols) + ((rows + 1)*cols)]; } //Lower halo + Main data #pragma unroll for (int i = Halo_steps; i < Halo_steps + 2 * Y_RESULT_STEPS + Halo_steps; i++) { if (baseY + i * Y_BLOCKDIM_Y == rows) s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = d_ip[(i * Y_BLOCKDIM_Y * (cols - 1))]; else s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = (rows - baseY > i * Y_BLOCKDIM_Y) ? d_ip[i * Y_BLOCKDIM_Y * cols] : d_ip[(i * Y_BLOCKDIM_Y * cols) - ((rows + 1)*cols)]; } __syncthreads(); } else { //Upper halo #pragma unroll for (int i = 0; i < Halo_steps; i++) { s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = (baseY >= -i * Y_BLOCKDIM_Y) ? d_ip[i * Y_BLOCKDIM_Y * cols] : d_ip[(i * Y_BLOCKDIM_Y * cols) + (rows*cols)]; } //Lower halo + Main data #pragma unroll for (int i = Halo_steps; i < Halo_steps + 2 * Y_RESULT_STEPS + Halo_steps; i++) { s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = (rows - baseY > i * Y_BLOCKDIM_Y) ? d_ip[i * Y_BLOCKDIM_Y * cols] : d_ip[(i * Y_BLOCKDIM_Y * cols) - (rows*cols)]; } __syncthreads(); } //Compute and store results #pragma unroll for (int i = 0; i < Y_RESULT_STEPS; i++) { if ((baseY1 + i * Y_BLOCKDIM_Y < cA_rows)) { int l2 = filt_len / 2; float sum_cL = 0, sum_cH = 0; for (int l = 0; l < filt_len; ++l) { sum_cL += c_lpd[l] * s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + 2 * threadIdx.y + 2 * i * Y_BLOCKDIM_Y + Halo_steps * Y_BLOCKDIM_Y + l2 - l]; sum_cH += c_hpd[l] * s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + 2 * threadIdx.y + 2 * i * Y_BLOCKDIM_Y + Halo_steps * Y_BLOCKDIM_Y + l2 - l]; } d_cL[i * Y_BLOCKDIM_Y * cols] = sum_cL; d_cH[i * Y_BLOCKDIM_Y * cols] = sum_cH; } } } }
de2ff608598073d97b726a83f0b6920e2f68cded.cu
#include "includes.h" __global__ void dwt_per_Y(float *d_ip, int rows, int cols, int cA_rows, int filt_len, int Halo_steps, float *d_cL, float *d_cH) { extern __shared__ float s_Data[]; //Offset to the upper halo edge const int baseX = blockIdx.x * Y_BLOCKDIM_X + threadIdx.x; const int baseY = ((blockIdx.y * 2 * Y_RESULT_STEPS) - Halo_steps) * Y_BLOCKDIM_Y + threadIdx.y; const int baseY1 = (blockIdx.y * Y_RESULT_STEPS) * Y_BLOCKDIM_Y + threadIdx.y; if (baseX < cols) { d_ip += baseY * cols + baseX; d_cL += baseY1 * cols + baseX; d_cH += baseY1 * cols + baseX; //Loading data to shared memory if (rows % 2 == 1) { //Upper halo #pragma unroll for (int i = 0; i < Halo_steps; i++) { if (baseY + i * Y_BLOCKDIM_Y == -1) s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = d_ip[(rows - 1) * cols]; else s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = (baseY >= -i * Y_BLOCKDIM_Y) ? d_ip[i * Y_BLOCKDIM_Y * cols] : d_ip[(i * Y_BLOCKDIM_Y * cols) + ((rows + 1)*cols)]; } //Lower halo + Main data #pragma unroll for (int i = Halo_steps; i < Halo_steps + 2 * Y_RESULT_STEPS + Halo_steps; i++) { if (baseY + i * Y_BLOCKDIM_Y == rows) s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = d_ip[(i * Y_BLOCKDIM_Y * (cols - 1))]; else s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = (rows - baseY > i * Y_BLOCKDIM_Y) ? d_ip[i * Y_BLOCKDIM_Y * cols] : d_ip[(i * Y_BLOCKDIM_Y * cols) - ((rows + 1)*cols)]; } __syncthreads(); } else { //Upper halo #pragma unroll for (int i = 0; i < Halo_steps; i++) { s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = (baseY >= -i * Y_BLOCKDIM_Y) ? d_ip[i * Y_BLOCKDIM_Y * cols] : d_ip[(i * Y_BLOCKDIM_Y * cols) + (rows*cols)]; } //Lower halo + Main data #pragma unroll for (int i = Halo_steps; i < Halo_steps + 2 * Y_RESULT_STEPS + Halo_steps; i++) { s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = (rows - baseY > i * Y_BLOCKDIM_Y) ? d_ip[i * Y_BLOCKDIM_Y * cols] : d_ip[(i * Y_BLOCKDIM_Y * cols) - (rows*cols)]; } __syncthreads(); } //Compute and store results #pragma unroll for (int i = 0; i < Y_RESULT_STEPS; i++) { if ((baseY1 + i * Y_BLOCKDIM_Y < cA_rows)) { int l2 = filt_len / 2; float sum_cL = 0, sum_cH = 0; for (int l = 0; l < filt_len; ++l) { sum_cL += c_lpd[l] * s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + 2 * threadIdx.y + 2 * i * Y_BLOCKDIM_Y + Halo_steps * Y_BLOCKDIM_Y + l2 - l]; sum_cH += c_hpd[l] * s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + 2 * threadIdx.y + 2 * i * Y_BLOCKDIM_Y + Halo_steps * Y_BLOCKDIM_Y + l2 - l]; } d_cL[i * Y_BLOCKDIM_Y * cols] = sum_cL; d_cH[i * Y_BLOCKDIM_Y * cols] = sum_cH; } } } }
b87685feccdb03e398387074fa1754273ecbc5ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <rocblas.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/execution_policy.h> #define pow_2(x) ( ((x) * (x)) ) #if MOLECULE_SIZE > 2000 #define LARGE #define BLOCK_SIZE 512 #else #define SMALL #define BLOCK_SIZE 64 #endif struct Atom { float x, y, z; Atom() {} __device__ Atom(float x_, float y_, float z_) : x(x_), y(y_), z(z_) {} }; __global__ void atoms_difference(sMolecule A, sMolecule B, float * d_result, int n, int line_blocks) { float a_x, a_y, a_z, b_x, b_y, b_z; __shared__ int skip, quot, reminder; if (0 == threadIdx.x) { quot = blockIdx.x / line_blocks; reminder = blockIdx.x % line_blocks; if (quot > reminder) { skip = 1; } else { skip = 0; } } __syncthreads(); if (skip == 1) { return; } int block_begin = (quot) * BLOCK_SIZE; int i = block_begin + threadIdx.x; int begin = (reminder) * BLOCK_SIZE; __shared__ float A_x[BLOCK_SIZE], A_y[BLOCK_SIZE], A_z[BLOCK_SIZE]; A_x[threadIdx.x] = A.x[begin + threadIdx.x]; A_y[threadIdx.x] = A.y[begin + threadIdx.x]; A_z[threadIdx.x] = A.z[begin + threadIdx.x]; __shared__ float B_x[BLOCK_SIZE], B_y[BLOCK_SIZE], B_z[BLOCK_SIZE]; B_x[threadIdx.x] = B.x[begin + threadIdx.x]; B_y[threadIdx.x] = B.y[begin + threadIdx.x]; B_z[threadIdx.x] = B.z[begin + threadIdx.x]; if (i >= n) { return; } __shared__ int copy_from_shared; if (0 == threadIdx.x) { if (block_begin == begin) { copy_from_shared = 1; } else { copy_from_shared = 0; } } __syncthreads(); if (1 == copy_from_shared) { a_x = A_x[threadIdx.x]; a_y = A_y[threadIdx.x]; a_z = A_z[threadIdx.x]; b_x = B_x[threadIdx.x]; b_y = B_y[threadIdx.x]; b_z = B_z[threadIdx.x]; } else { a_x = A.x[i]; a_y = A.y[i]; a_z = A.z[i]; b_x = B.x[i]; b_y = B.y[i]; b_z = B.z[i]; } float sum = 0.0; #ifdef LARGE #pragma unroll 32 #endif #ifdef SMALL #pragma unroll 64 #endif for (int j = 0; j < BLOCK_SIZE; ++j) { int index = begin + j; if (index >= n) { break; } if (i < index) { // printf("processing (%d, %d)\n", i, index); float diff_x = A_x[j] - a_x; float diff_y = A_y[j] - a_y; float diff_z = A_z[j] - a_z; float da = sqrt(pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z)); diff_x = B_x[j] - b_x; diff_y = B_y[j] - b_y; diff_z = B_z[j] - b_z; float db = sqrt(pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z)); // printf("Ax diff [%f, %f, %f]\n", // pow_2(A.x[i] - A.x[j]), // pow_2(A.y[i] - A.y[j]), // pow_2(A.z[i] - A.z[j])); // printf("Da: %f db: %f\n", da, db); // printf("saving result: %f\n", pow_2(da - db)); sum += pow_2(da - db); } } atomicAdd(d_result + i, sum); } float solveGPU(sMolecule d_A, sMolecule d_B, int n) { int line_blocks = n / BLOCK_SIZE + 1; int GRID_SIZE = pow_2(line_blocks); float *d_result; int result_size = n; hipError_t err = hipMalloc(&d_result, result_size * sizeof(float)); if ( hipSuccess != err ) { fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n", __FILE__, __LINE__, hipGetErrorString(err) ); return 0.0f; } err = hipMemset(d_result, 0, result_size * sizeof(float)); if ( hipSuccess != err ) { fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n", __FILE__, __LINE__, hipGetErrorString(err) ); return 0.0f; } hipLaunchKernelGGL(( atoms_difference), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, d_A, d_B, d_result, n, line_blocks); float RMSD = 0; thrust::device_ptr<float> dptr(d_result); RMSD = thrust::reduce(thrust::device, dptr, dptr + result_size); hipFree(d_result); return sqrt(1 / ((float)n * ((float)n - 1)) * RMSD); }
b87685feccdb03e398387074fa1754273ecbc5ef.cu
#include <cublas_v2.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/execution_policy.h> #define pow_2(x) ( ((x) * (x)) ) #if MOLECULE_SIZE > 2000 #define LARGE #define BLOCK_SIZE 512 #else #define SMALL #define BLOCK_SIZE 64 #endif struct Atom { float x, y, z; Atom() {} __device__ Atom(float x_, float y_, float z_) : x(x_), y(y_), z(z_) {} }; __global__ void atoms_difference(sMolecule A, sMolecule B, float * d_result, int n, int line_blocks) { float a_x, a_y, a_z, b_x, b_y, b_z; __shared__ int skip, quot, reminder; if (0 == threadIdx.x) { quot = blockIdx.x / line_blocks; reminder = blockIdx.x % line_blocks; if (quot > reminder) { skip = 1; } else { skip = 0; } } __syncthreads(); if (skip == 1) { return; } int block_begin = (quot) * BLOCK_SIZE; int i = block_begin + threadIdx.x; int begin = (reminder) * BLOCK_SIZE; __shared__ float A_x[BLOCK_SIZE], A_y[BLOCK_SIZE], A_z[BLOCK_SIZE]; A_x[threadIdx.x] = A.x[begin + threadIdx.x]; A_y[threadIdx.x] = A.y[begin + threadIdx.x]; A_z[threadIdx.x] = A.z[begin + threadIdx.x]; __shared__ float B_x[BLOCK_SIZE], B_y[BLOCK_SIZE], B_z[BLOCK_SIZE]; B_x[threadIdx.x] = B.x[begin + threadIdx.x]; B_y[threadIdx.x] = B.y[begin + threadIdx.x]; B_z[threadIdx.x] = B.z[begin + threadIdx.x]; if (i >= n) { return; } __shared__ int copy_from_shared; if (0 == threadIdx.x) { if (block_begin == begin) { copy_from_shared = 1; } else { copy_from_shared = 0; } } __syncthreads(); if (1 == copy_from_shared) { a_x = A_x[threadIdx.x]; a_y = A_y[threadIdx.x]; a_z = A_z[threadIdx.x]; b_x = B_x[threadIdx.x]; b_y = B_y[threadIdx.x]; b_z = B_z[threadIdx.x]; } else { a_x = A.x[i]; a_y = A.y[i]; a_z = A.z[i]; b_x = B.x[i]; b_y = B.y[i]; b_z = B.z[i]; } float sum = 0.0; #ifdef LARGE #pragma unroll 32 #endif #ifdef SMALL #pragma unroll 64 #endif for (int j = 0; j < BLOCK_SIZE; ++j) { int index = begin + j; if (index >= n) { break; } if (i < index) { // printf("processing (%d, %d)\n", i, index); float diff_x = A_x[j] - a_x; float diff_y = A_y[j] - a_y; float diff_z = A_z[j] - a_z; float da = sqrt(pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z)); diff_x = B_x[j] - b_x; diff_y = B_y[j] - b_y; diff_z = B_z[j] - b_z; float db = sqrt(pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z)); // printf("Ax diff [%f, %f, %f]\n", // pow_2(A.x[i] - A.x[j]), // pow_2(A.y[i] - A.y[j]), // pow_2(A.z[i] - A.z[j])); // printf("Da: %f db: %f\n", da, db); // printf("saving result: %f\n", pow_2(da - db)); sum += pow_2(da - db); } } atomicAdd(d_result + i, sum); } float solveGPU(sMolecule d_A, sMolecule d_B, int n) { int line_blocks = n / BLOCK_SIZE + 1; int GRID_SIZE = pow_2(line_blocks); float *d_result; int result_size = n; cudaError err = cudaMalloc(&d_result, result_size * sizeof(float)); if ( cudaSuccess != err ) { fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n", __FILE__, __LINE__, cudaGetErrorString(err) ); return 0.0f; } err = cudaMemset(d_result, 0, result_size * sizeof(float)); if ( cudaSuccess != err ) { fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n", __FILE__, __LINE__, cudaGetErrorString(err) ); return 0.0f; } atoms_difference<<<GRID_SIZE, BLOCK_SIZE>>> (d_A, d_B, d_result, n, line_blocks); float RMSD = 0; thrust::device_ptr<float> dptr(d_result); RMSD = thrust::reduce(thrust::device, dptr, dptr + result_size); cudaFree(d_result); return sqrt(1 / ((float)n * ((float)n - 1)) * RMSD); }
3347a35f2741ce486f6ca3d53a73b635bb7413f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernal( void ) { }
3347a35f2741ce486f6ca3d53a73b635bb7413f5.cu
#include "includes.h" __global__ void kernal( void ) { }
75238f500dd55476a4025ceafd4ab64269e15c8c.hip
// !!! This is a file automatically generated by hipify!!! #include <exception> #include <torch/extension.h> #include <ATen/ATen.h> #include <vector> #include <hip/hip_runtime_api.h> #include <thrust/device_ptr.h> #include <thrust/transform.h> #include "common.h" using namespace std; namespace { // template<typename T> // inline void leaky_relu_backward_impl(T *z, T *dz, float slope, int64_t count) { // // Create thrust pointers // thrust::device_ptr<T> th_z = thrust::device_pointer_cast(z); // thrust::device_ptr<T> th_dz = thrust::device_pointer_cast(dz); // // thrust::transform_if(th_dz, th_dz + count, th_z, th_dz, // [slope] __device__ (const T& dz) { return dz * slope; }, // [] __device__ (const T& z) { return z < 0; }); // thrust::transform_if(th_z, th_z + count, th_z, // [slope] __device__ (const T& z) { return z / slope; }, // [] __device__ (const T& z) { return z < 0; }); // } } void LeakyRelu_Forward_CUDA(at::Tensor z, float slope) { at::leaky_relu_(z, slope); } void LeakyRelu_Backward_CUDA(at::Tensor z, at::Tensor dz, float slope) { int64_t count = z.numel(); /* AT_DISPATCH_FLOATING_TYPES(z.type(), "LeakyRelu_Backward_CUDA", ([&] { leaky_relu_backward_impl<scalar_t>(z.data<scalar_t>(), dz.data<scalar_t>(), slope, count); })); */ // unstable after scaling at::leaky_relu_(z, 1.0 / slope); // This API is changed on pytorch side, feature broken throw "PyTorch API break, Don't use InplaceABN for now."; // at::leaky_relu_backward(dz, z, slope, false); }
75238f500dd55476a4025ceafd4ab64269e15c8c.cu
#include <exception> #include <torch/extension.h> #include <ATen/ATen.h> #include <vector> #include <cuda_runtime_api.h> #include <thrust/device_ptr.h> #include <thrust/transform.h> #include "common.h" using namespace std; namespace { // template<typename T> // inline void leaky_relu_backward_impl(T *z, T *dz, float slope, int64_t count) { // // Create thrust pointers // thrust::device_ptr<T> th_z = thrust::device_pointer_cast(z); // thrust::device_ptr<T> th_dz = thrust::device_pointer_cast(dz); // // thrust::transform_if(th_dz, th_dz + count, th_z, th_dz, // [slope] __device__ (const T& dz) { return dz * slope; }, // [] __device__ (const T& z) { return z < 0; }); // thrust::transform_if(th_z, th_z + count, th_z, // [slope] __device__ (const T& z) { return z / slope; }, // [] __device__ (const T& z) { return z < 0; }); // } } void LeakyRelu_Forward_CUDA(at::Tensor z, float slope) { at::leaky_relu_(z, slope); } void LeakyRelu_Backward_CUDA(at::Tensor z, at::Tensor dz, float slope) { int64_t count = z.numel(); /* AT_DISPATCH_FLOATING_TYPES(z.type(), "LeakyRelu_Backward_CUDA", ([&] { leaky_relu_backward_impl<scalar_t>(z.data<scalar_t>(), dz.data<scalar_t>(), slope, count); })); */ // unstable after scaling at::leaky_relu_(z, 1.0 / slope); // This API is changed on pytorch side, feature broken throw "PyTorch API break, Don't use InplaceABN for now."; // at::leaky_relu_backward(dz, z, slope, false); }
bdfd1e1c71445077aa67e26dca060993db93de9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************//** * \file generateE.cu * \author Anush Krishnan ([email protected]) * \brief Implementation of the kernels to generate elements of the interpolation matrix. */ #include "generateE.h" /** * \brief Discrete delta function defined by Roma et al. (1999). * * \param x x- or y- component of the vector defined between two points * \param h the grid-spacing * * \return the value of the discrete delta function */ __device__ \ real dhRomaDeviceE(real x, real h) { real r = fabs(x)/h; if(r>1.5) return 0.0; else if(r>0.5 && r<=1.5) return 1.0/(6*h)*( 5.0 - 3.0*r - sqrt(-3.0*(1-r)*(1-r) + 1.0) ); else return 1.0/(3*h)*( 1.0 + sqrt(-3.0*r*r + 1.0) ); } /** * \brief Two-dimension discrete delta function. * * \param x x-component of the vector defined between two points * \param y y-component of the vector defined between two points * \param h the grid-spacing * * \return the value of the discrete delta function in 2D */ __device__ \ real deltaDeviceE(real x, real y, real h) { return dhRomaDeviceE(x, h) * dhRomaDeviceE(y, h); } /** * \namespace kernels * \brief Contains all custom-written CUDA kernels. */ namespace kernels { /** * \brief Generates the interpolation matrix (on the host). * * \param ERows row index of elements of the interpolation matrix * \param ECols column index of elements of the interpolation matrix * \param EVals value of elements of the interpolation matrix * \param nx number of cells in the x-direction * \param ny number of cells in the y-direction * \param x x-component of grid points * \param y y-component of grid points * \param dx cell-widths in the x-direction * \param totalPoints number of body points (all bodies included) * \param xB x-coordinate of body points (all bodies included) * \param yB y-coordinate of body points (all bodies included) * \param I x-index of the cells in which body points are located * \param J y-index of the cells in which body points are located */ void generateEHost(int *ERows, int *ECols, real *EVals, int nx, int ny, real *x, real *y, real *dx, int totalPoints, real *xB, real *yB, int *I, int *J) { for(int bodyIdx=0; bodyIdx<totalPoints; bodyIdx++) { int Ib=I[bodyIdx], Jb=J[bodyIdx], EIdx = bodyIdx*12, i, j; real Dx = dx[Ib]; // uB = integral (u * delta * dxdy) // E = E_hat * R^-1 => divide E_hat by Dx // populate x-components for(j=Jb-1; j<=Jb+1; j++) { for(i=Ib-2; i<=Ib+1; i++) { ERows[EIdx] = bodyIdx; ECols[EIdx] = j*(nx-1) + i; EVals[EIdx] = Dx*delta(x[i+1]-xB[bodyIdx], 0.5*(y[j]+y[j+1])-yB[bodyIdx], Dx); EIdx++; } } // populate y-components for(j=Jb-2; j<=Jb+1; j++) { for(i=Ib-1; i<=Ib+1; i++) { ERows[EIdx+12*totalPoints-12] = bodyIdx + totalPoints; ECols[EIdx+12*totalPoints-12] = j*nx + i + (nx-1)*ny; EVals[EIdx+12*totalPoints-12] = Dx*delta(0.5*(x[i]+x[i+1])-xB[bodyIdx], y[j+1]-yB[bodyIdx], Dx); EIdx++; } } } } /** * \brief Computes elements of the interpolation matrix. * * \param ERows row index of elements of the interpolation matrix * \param ECols column index of elements of the interpolation matrix * \param EVals value of elements of the interpolation matrix * \param nx number of cells in the x-direction * \param ny number of cells in the y-direction * \param x x-component of grid points * \param y y-component of grid points * \param dx cell-widths in the x-direction * \param totalPoints number of body points (all bodies included) * \param xB x-coordinate of body points (all bodies included) * \param yB y-coordinate of body points (all bodies included) * \param I x-index of the cell in which the body point is located * \param J y-index of the cell in which the body point is located */ __global__ \ void generateE(int *ERows, int *ECols, real *EVals, int nx, int ny, real *x, real *y, real *dx, int totalPoints, real *xB, real *yB, int *I, int *J) { int bodyIdx = threadIdx.x + blockIdx.x*blockDim.x; if(bodyIdx < totalPoints) { int Ib=I[bodyIdx], Jb=J[bodyIdx], EIdx = bodyIdx*12, i, j; real Dx = dx[Ib]; // uB = integral u * delta * dxdy = Ehat * u // E = Ehat * R^-1 => divide by Dx // E = Dx * delta // populate x-components for(j=Jb-1; j<=Jb+1; j++) { for(i=Ib-2; i<=Ib+1; i++) { ERows[EIdx] = bodyIdx; ECols[EIdx] = j*(nx-1) + i; EVals[EIdx] = Dx*deltaDeviceE(x[i+1]-xB[bodyIdx], 0.5*(y[j]+y[j+1])-yB[bodyIdx], Dx); EIdx++; } } // populate y-components for(j=Jb-2; j<=Jb+1; j++) { for(i=Ib-1; i<=Ib+1; i++) { ERows[EIdx+12*totalPoints-12] = bodyIdx + totalPoints; ECols[EIdx+12*totalPoints-12] = j*nx + i + (nx-1)*ny; EVals[EIdx+12*totalPoints-12] = Dx*deltaDeviceE(0.5*(x[i]+x[i+1])-xB[bodyIdx], y[j+1]-yB[bodyIdx], Dx); EIdx++; } } } } } // end of namespace kernels
bdfd1e1c71445077aa67e26dca060993db93de9f.cu
/***************************************************************************//** * \file generateE.cu * \author Anush Krishnan ([email protected]) * \brief Implementation of the kernels to generate elements of the interpolation matrix. */ #include "generateE.h" /** * \brief Discrete delta function defined by Roma et al. (1999). * * \param x x- or y- component of the vector defined between two points * \param h the grid-spacing * * \return the value of the discrete delta function */ __device__ \ real dhRomaDeviceE(real x, real h) { real r = fabs(x)/h; if(r>1.5) return 0.0; else if(r>0.5 && r<=1.5) return 1.0/(6*h)*( 5.0 - 3.0*r - sqrt(-3.0*(1-r)*(1-r) + 1.0) ); else return 1.0/(3*h)*( 1.0 + sqrt(-3.0*r*r + 1.0) ); } /** * \brief Two-dimension discrete delta function. * * \param x x-component of the vector defined between two points * \param y y-component of the vector defined between two points * \param h the grid-spacing * * \return the value of the discrete delta function in 2D */ __device__ \ real deltaDeviceE(real x, real y, real h) { return dhRomaDeviceE(x, h) * dhRomaDeviceE(y, h); } /** * \namespace kernels * \brief Contains all custom-written CUDA kernels. */ namespace kernels { /** * \brief Generates the interpolation matrix (on the host). * * \param ERows row index of elements of the interpolation matrix * \param ECols column index of elements of the interpolation matrix * \param EVals value of elements of the interpolation matrix * \param nx number of cells in the x-direction * \param ny number of cells in the y-direction * \param x x-component of grid points * \param y y-component of grid points * \param dx cell-widths in the x-direction * \param totalPoints number of body points (all bodies included) * \param xB x-coordinate of body points (all bodies included) * \param yB y-coordinate of body points (all bodies included) * \param I x-index of the cells in which body points are located * \param J y-index of the cells in which body points are located */ void generateEHost(int *ERows, int *ECols, real *EVals, int nx, int ny, real *x, real *y, real *dx, int totalPoints, real *xB, real *yB, int *I, int *J) { for(int bodyIdx=0; bodyIdx<totalPoints; bodyIdx++) { int Ib=I[bodyIdx], Jb=J[bodyIdx], EIdx = bodyIdx*12, i, j; real Dx = dx[Ib]; // uB = integral (u * delta * dxdy) // E = E_hat * R^-1 => divide E_hat by Dx // populate x-components for(j=Jb-1; j<=Jb+1; j++) { for(i=Ib-2; i<=Ib+1; i++) { ERows[EIdx] = bodyIdx; ECols[EIdx] = j*(nx-1) + i; EVals[EIdx] = Dx*delta(x[i+1]-xB[bodyIdx], 0.5*(y[j]+y[j+1])-yB[bodyIdx], Dx); EIdx++; } } // populate y-components for(j=Jb-2; j<=Jb+1; j++) { for(i=Ib-1; i<=Ib+1; i++) { ERows[EIdx+12*totalPoints-12] = bodyIdx + totalPoints; ECols[EIdx+12*totalPoints-12] = j*nx + i + (nx-1)*ny; EVals[EIdx+12*totalPoints-12] = Dx*delta(0.5*(x[i]+x[i+1])-xB[bodyIdx], y[j+1]-yB[bodyIdx], Dx); EIdx++; } } } } /** * \brief Computes elements of the interpolation matrix. * * \param ERows row index of elements of the interpolation matrix * \param ECols column index of elements of the interpolation matrix * \param EVals value of elements of the interpolation matrix * \param nx number of cells in the x-direction * \param ny number of cells in the y-direction * \param x x-component of grid points * \param y y-component of grid points * \param dx cell-widths in the x-direction * \param totalPoints number of body points (all bodies included) * \param xB x-coordinate of body points (all bodies included) * \param yB y-coordinate of body points (all bodies included) * \param I x-index of the cell in which the body point is located * \param J y-index of the cell in which the body point is located */ __global__ \ void generateE(int *ERows, int *ECols, real *EVals, int nx, int ny, real *x, real *y, real *dx, int totalPoints, real *xB, real *yB, int *I, int *J) { int bodyIdx = threadIdx.x + blockIdx.x*blockDim.x; if(bodyIdx < totalPoints) { int Ib=I[bodyIdx], Jb=J[bodyIdx], EIdx = bodyIdx*12, i, j; real Dx = dx[Ib]; // uB = integral u * delta * dxdy = Ehat * u // E = Ehat * R^-1 => divide by Dx // E = Dx * delta // populate x-components for(j=Jb-1; j<=Jb+1; j++) { for(i=Ib-2; i<=Ib+1; i++) { ERows[EIdx] = bodyIdx; ECols[EIdx] = j*(nx-1) + i; EVals[EIdx] = Dx*deltaDeviceE(x[i+1]-xB[bodyIdx], 0.5*(y[j]+y[j+1])-yB[bodyIdx], Dx); EIdx++; } } // populate y-components for(j=Jb-2; j<=Jb+1; j++) { for(i=Ib-1; i<=Ib+1; i++) { ERows[EIdx+12*totalPoints-12] = bodyIdx + totalPoints; ECols[EIdx+12*totalPoints-12] = j*nx + i + (nx-1)*ny; EVals[EIdx+12*totalPoints-12] = Dx*deltaDeviceE(0.5*(x[i]+x[i+1])-xB[bodyIdx], y[j+1]-yB[bodyIdx], Dx); EIdx++; } } } } } // end of namespace kernels
71921a03f87158d12a6474616a3292d11f56ce9d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <structs/utilities.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/gather.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/lists/detail/sorting.hpp> #include <cudf/lists/drop_list_duplicates.hpp> #include <cudf/structs/struct_view.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/transform.h> namespace cudf { namespace lists { namespace detail { namespace { template <typename Type> struct has_negative_nans_fn { column_device_view const d_entries; bool const has_nulls; has_negative_nans_fn(column_device_view const d_entries, bool const has_nulls) : d_entries(d_entries), has_nulls(has_nulls) { } __device__ Type operator()(size_type idx) const noexcept { if (has_nulls && d_entries.is_null_nocheck(idx)) { return false; } auto const val = d_entries.element<Type>(idx); return std::isnan(val) && std::signbit(val); // std::signbit(x) == true if x is negative } }; /** * @brief A structure to be used along with type_dispatcher to check if a column has any * negative NaN value. * * This functor is used to check for replacing negative NaN if there exists one. It is neccessary * because when calling to `lists::detail::sort_lists`, the negative NaN and positive NaN values (if * both exist) are separated to the two ends of the output column. This is due to the API * `lists::detail::sort_lists` internally calls `hipcub::DeviceSegmentedRadixSort`, which performs * sorting by comparing bits of the input numbers. Since negative and positive NaN have * different bits representation, they may not be moved to be close to each other after sorted. */ struct has_negative_nans_dispatch { template <typename Type, std::enable_if_t<cuda::std::is_floating_point_v<Type>>* = nullptr> bool operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { auto const d_entries = column_device_view::create(lists_entries, stream); return thrust::count_if( rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.size()), detail::has_negative_nans_fn<Type>{*d_entries, lists_entries.has_nulls()}); } template <typename Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> bool operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const { // Recursively check negative NaN on the children columns. return std::any_of( thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.num_children()), [structs_view = structs_column_view{lists_entries}, stream](auto const child_idx) { auto const col = structs_view.get_sliced_child(child_idx); return type_dispatcher(col.type(), detail::has_negative_nans_dispatch{}, col, stream); }); } template <typename Type, std::enable_if_t<!cuda::std::is_floating_point_v<Type> && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> bool operator()(column_view const&, rmm::cuda_stream_view) const { // Columns of non floating-point data will never contain NaN. return false; } }; template <typename Type> struct replace_negative_nans_fn { __device__ Type operator()(Type val) const noexcept { return std::isnan(val) ? std::numeric_limits<Type>::quiet_NaN() : val; } }; /** * @brief A structure to be used along with type_dispatcher to replace -NaN by NaN for all rows * in a floating-point data column. */ struct replace_negative_nans_dispatch { template <typename Type, std::enable_if_t<!cuda::std::is_floating_point_v<Type> && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view) const noexcept { // For non floating point type and non struct, just return a copy of the input. return std::make_unique<column>(lists_entries); } template <typename Type, std::enable_if_t<cuda::std::is_floating_point_v<Type>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { auto new_entries = cudf::detail::allocate_like( lists_entries, lists_entries.size(), cudf::mask_allocation_policy::NEVER, stream); new_entries->set_null_mask(cudf::detail::copy_bitmask(lists_entries, stream), lists_entries.null_count()); // Replace all negative NaN values. thrust::transform(rmm::exec_policy(stream), lists_entries.template begin<Type>(), lists_entries.template end<Type>(), new_entries->mutable_view().template begin<Type>(), detail::replace_negative_nans_fn<Type>{}); return new_entries; } template <typename Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { std::vector<std::unique_ptr<cudf::column>> output_struct_members; std::transform( thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.num_children()), std::back_inserter(output_struct_members), [structs_view = structs_column_view{lists_entries}, stream](auto const child_idx) { auto const col = structs_view.get_sliced_child(child_idx); return type_dispatcher(col.type(), detail::replace_negative_nans_dispatch{}, col, stream); }); return cudf::make_structs_column(lists_entries.size(), std::move(output_struct_members), lists_entries.null_count(), cudf::detail::copy_bitmask(lists_entries, stream), stream); } }; /** * @brief Generate a 0-based offset column for a lists column. * * Given a lists_column_view, which may have a non-zero offset, generate a new column containing * 0-based list offsets. This is done by subtracting each of the input list offset by the first * offset. * * @code{.pseudo} * Given a list column having offsets = { 3, 7, 9, 13 }, * then output_offsets = { 0, 4, 6, 10 } * @endcode * * @param lists_column The input lists column. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A column containing 0-based list offsets. */ std::unique_ptr<column> generate_clean_offsets(lists_column_view const& lists_column, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto output_offsets = make_numeric_column(data_type{type_to_id<offset_type>()}, lists_column.size() + 1, mask_state::UNALLOCATED, stream, mr); thrust::transform( rmm::exec_policy(stream), lists_column.offsets_begin(), lists_column.offsets_end(), output_offsets->mutable_view().begin<offset_type>(), [first = lists_column.offsets_begin()] __device__(auto offset) { return offset - *first; }); return output_offsets; } /** * @brief Transform a given lists column to a new lists column in which all the list entries holding * -NaN value are replaced by (positive) NaN. * * Replacing -NaN by NaN is necessary before sorting (individual) lists because the sorting API is * using radix sort, which compares bits of the number thus it may separate -NaN by NaN to the two * ends of the result column. */ std::unique_ptr<column> replace_negative_nans_entries(column_view const& lists_entries, lists_column_view const& lists_column, rmm::cuda_stream_view stream) { // We need to copy the offsets column of the input lists_column. Since the input lists_column may // be sliced, we need to generate clean offsets (i.e., offsets starting from zero). auto new_offsets = generate_clean_offsets(lists_column, stream, rmm::mr::get_current_device_resource()); auto new_entries = type_dispatcher( lists_entries.type(), detail::replace_negative_nans_dispatch{}, lists_entries, stream); return make_lists_column( lists_column.size(), std::move(new_offsets), std::move(new_entries), lists_column.null_count(), cudf::detail::copy_bitmask( lists_column.parent(), stream, rmm::mr::get_current_device_resource())); } /** * @brief Populate list offsets for all list entries. * * Given an `offsets` column_view containing offsets of a lists column and a number of all list * entries in the column, generate an array that maps from each list entry to the offset of the list * containing that entry. * * @code{.pseudo} * num_entries = 10, offsets = { 0, 4, 6, 10 } * output = { 1, 1, 1, 1, 2, 2, 3, 3, 3, 3 } * @endcode * * @param num_entries The number of list entries. * @param offsets Column view to the list offsets. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A column containing entry list offsets. */ std::unique_ptr<column> generate_entry_list_offsets(size_type num_entries, column_view const& offsets, rmm::cuda_stream_view stream) { auto entry_list_offsets = make_numeric_column(offsets.type(), num_entries, mask_state::UNALLOCATED, stream, rmm::mr::get_current_device_resource()); thrust::upper_bound(rmm::exec_policy(stream), offsets.begin<offset_type>(), offsets.end<offset_type>(), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(num_entries), entry_list_offsets->mutable_view().begin<offset_type>()); return entry_list_offsets; } /** * @brief Performs an equality comparison between two entries in a lists column. * * For the two elements that are NOT in the same list in the lists column, they will always be * considered as different. If they are from the same list and their type is not floating point, * this functor will return the same comparison result as `cudf::element_equality_comparator`. * * For floating-point types, entries holding NaN value can be considered as different values or the * same value depending on the `nans_equal` parameter. * * @tparam Type The data type of entries * @tparam nans_equal Flag to specify whether NaN entries should be considered as equal value (only * applicable for floating-point data column) */ template <class Type> struct column_row_comparator_fn { offset_type const* const list_offsets; column_device_view const lhs; column_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; __host__ __device__ column_row_comparator_fn(offset_type const* const list_offsets, column_device_view const& lhs, column_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } template <typename T, std::enable_if_t<!cuda::std::is_floating_point_v<T>>* = nullptr> bool __device__ compare(T const& lhs_val, T const& rhs_val) const noexcept { return lhs_val == rhs_val; } template <typename T, std::enable_if_t<cuda::std::is_floating_point_v<T>>* = nullptr> bool __device__ compare(T const& lhs_val, T const& rhs_val) const noexcept { // If both element(i) and element(j) are NaNs and nans are considered as equal value then this // comparison will return `true`. This is the desired behavior in Pandas. if (nans_equal && std::isnan(lhs_val) && std::isnan(rhs_val)) { return true; } // If nans are considered as NOT equal, even both element(i) and element(j) are NaNs this // comparison will still return `false`. This is the desired behavior in Apache Spark. return lhs_val == rhs_val; } bool __device__ operator()(size_type i, size_type j) const noexcept { // Two entries are not considered for equality if they belong to different lists. if (list_offsets[i] != list_offsets[j]) { return false; } if (has_nulls) { bool const lhs_is_null{lhs.nullable() && lhs.is_null_nocheck(i)}; bool const rhs_is_null{rhs.nullable() && rhs.is_null_nocheck(j)}; if (lhs_is_null && rhs_is_null) { return nulls_equal == null_equality::EQUAL; } else if (lhs_is_null != rhs_is_null) { return false; } } return compare<Type>(lhs.element<Type>(i), lhs.element<Type>(j)); } }; /** * @brief Struct used in type_dispatcher for comparing two entries in a lists column. */ struct column_row_comparator_dispatch { offset_type const* const list_offsets; column_device_view const lhs; column_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; __device__ column_row_comparator_dispatch(offset_type const* const list_offsets, column_device_view const& lhs, column_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } template <class Type, std::enable_if_t<cudf::is_equality_comparable<Type, Type>()>* = nullptr> bool __device__ operator()(size_type i, size_type j) const noexcept { return column_row_comparator_fn<Type>{ list_offsets, lhs, rhs, nulls_equal, has_nulls, nans_equal}(i, j); } template <class Type, std::enable_if_t<!cudf::is_equality_comparable<Type, Type>()>* = nullptr> bool operator()(size_type i, size_type j) const { CUDF_FAIL( "`column_row_comparator_dispatch` cannot operate on types that are not equally comparable."); } }; /** * @brief Performs an equality comparison between rows of two tables using `column_row_comparator` * to compare rows of their corresponding columns. */ struct table_row_comparator_fn { offset_type const* const list_offsets; table_device_view const lhs; table_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; table_row_comparator_fn(offset_type const* const list_offsets, table_device_view const& lhs, table_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } bool __device__ operator()(size_type i, size_type j) const noexcept { auto column_comp = [=](column_device_view const& lhs, column_device_view const& rhs) { return type_dispatcher( lhs.type(), column_row_comparator_dispatch{list_offsets, lhs, rhs, nulls_equal, has_nulls, nans_equal}, i, j); }; return thrust::equal(thrust::seq, lhs.begin(), lhs.end(), rhs.begin(), column_comp); } }; /** * @brief Struct used in type_dispatcher for copying indices of the list entries ignoring * duplicates. */ struct get_unique_entries_dispatch { template <class Type, std::enable_if_t<!cudf::is_equality_comparable<Type, Type>() && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> offset_type* operator()(offset_type const*, column_view const&, size_type, offset_type*, null_equality, nan_equality, bool, rmm::cuda_stream_view) const { CUDF_FAIL( "`get_unique_entries_dispatch` cannot operate on types that are not equally comparable."); } template <class Type, std::enable_if_t<cudf::is_equality_comparable<Type, Type>()>* = nullptr> offset_type* operator()(offset_type const* list_offsets, column_view const& all_lists_entries, size_type num_entries, offset_type* output_begin, null_equality nulls_equal, nan_equality nans_equal, bool has_nulls, rmm::cuda_stream_view stream) const noexcept { auto const d_view = column_device_view::create(all_lists_entries, stream); auto const comp = column_row_comparator_fn<Type>{list_offsets, *d_view, *d_view, nulls_equal, has_nulls, nans_equal == nan_equality::ALL_EQUAL}; return thrust::unique_copy(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(num_entries), output_begin, comp); } template <class Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> offset_type* operator()(offset_type const* list_offsets, column_view const& all_lists_entries, size_type num_entries, offset_type* output_begin, null_equality nulls_equal, nan_equality nans_equal, bool has_nulls, rmm::cuda_stream_view stream) const noexcept { auto const entries_tview = table_view{{all_lists_entries}}; auto const flatten_nullability = has_nested_nulls(entries_tview) ? structs::detail::column_nullability::FORCE : structs::detail::column_nullability::MATCH_INCOMING; auto const entries_flattened = cudf::structs::detail::flatten_nested_columns( entries_tview, {order::ASCENDING}, {null_order::AFTER}, flatten_nullability); auto const d_view = table_device_view::create(std::get<0>(entries_flattened), stream); auto const comp = table_row_comparator_fn{list_offsets, *d_view, *d_view, nulls_equal, has_nulls, nans_equal == nan_equality::ALL_EQUAL}; return thrust::unique_copy(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(num_entries), output_begin, comp); } }; /** * @brief Copy list entries and entry list offsets ignoring duplicates. * * Given an array of all entries flattened from a list column and an array that maps each entry to * the offset of the list containing that entry, those entries and list offsets are copied into * new arrays such that the duplicated entries within each list will be ignored. * * @param all_lists_entries The input array containing all list entries. * @param entries_list_offsets A map from list entries to their corresponding list offsets. * @param nulls_equal Flag to specify whether null entries should be considered equal. * @param nans_equal Flag to specify whether NaN entries should be considered equal * (only applicable for floating-point data column). * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A pair of columns, the first one contains unique list entries and the second one * contains their corresponding list offsets. */ std::vector<std::unique_ptr<column>> get_unique_entries_and_list_offsets( column_view const& all_lists_entries, column_view const& entries_list_offsets, null_equality nulls_equal, nan_equality nans_equal, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const num_entries = all_lists_entries.size(); // Allocate memory to store the indices of the unique entries. auto unique_indices = rmm::device_uvector<offset_type>(num_entries, stream); auto const output_begin = unique_indices.begin(); auto const output_end = type_dispatcher(all_lists_entries.type(), get_unique_entries_dispatch{}, entries_list_offsets.begin<offset_type>(), all_lists_entries, num_entries, output_begin, nulls_equal, nans_equal, all_lists_entries.has_nulls(), stream); // Collect unique entries and entry list offsets. // The new null_count and bitmask of the unique entries will also be generated // by the gather function. return cudf::detail::gather(table_view{{all_lists_entries, entries_list_offsets}}, output_begin, output_end, cudf::out_of_bounds_policy::DONT_CHECK, stream, mr) ->release(); } /** * @brief Generate list offsets from entry offsets. * * Generate an array of list offsets for the final result lists column. The list offsets of the * original lists column are also taken into account to make sure the result lists column will have * the same empty list rows (if any) as in the original lists column. * * @param num_entries The number of unique entries after removing duplicates. * @param entries_list_offsets The mapping from list entries to their list offsets. * @param original_offsets The list offsets of the original lists column, which will also be used to * store the new list offsets. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. */ void generate_offsets(size_type num_entries, column_view const& entries_list_offsets, mutable_column_view const& original_offsets, rmm::cuda_stream_view stream) { // Firstly, generate temporary list offsets for the unique entries, ignoring empty lists (if any). // If entries_list_offsets = {1, 1, 1, 1, 2, 3, 3, 3, 4, 4 }, num_entries = 10, // then new_offsets = { 0, 4, 5, 8, 10 }. auto const new_offsets = allocate_like( original_offsets, mask_allocation_policy::NEVER, rmm::mr::get_current_device_resource()); thrust::copy_if(rmm::exec_policy(stream), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(num_entries + 1), new_offsets->mutable_view().begin<offset_type>(), [num_entries, offsets_ptr = entries_list_offsets.begin<offset_type>()] __device__( auto i) -> bool { return i == 0 || i == num_entries || offsets_ptr[i] != offsets_ptr[i - 1]; }); // Generate a prefix sum of number of empty lists, storing inplace to the original lists // offsets. // If the original list offsets is { 0, 0, 5, 5, 6, 6 } (there are 2 empty lists), // and new_offsets = { 0, 4, 6 }, then output = { 0, 1, 1, 2, 2, 3}. auto const iter_trans_begin = cudf::detail::make_counting_transform_iterator( 0, [offsets = original_offsets.begin<offset_type>()] __device__(auto i) { return (i > 0 && offsets[i] == offsets[i - 1]) ? 1 : 0; }); thrust::inclusive_scan(rmm::exec_policy(stream), iter_trans_begin, iter_trans_begin + original_offsets.size(), original_offsets.begin<offset_type>()); // Generate the final list offsets. // If the original list offsets are { 0, 0, 5, 5, 6, 6 }, the new offsets are { 0, 4, 6 }, // and the prefix sums of empty lists are { 0, 1, 1, 2, 2, 3 }, // then output = { 0, 0, 4, 4, 5, 5 }. thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(original_offsets.size()), original_offsets.begin<offset_type>(), [prefix_sum_empty_lists = original_offsets.begin<offset_type>(), offsets = new_offsets->view().begin<offset_type>()] __device__(auto i) { return offsets[i - prefix_sum_empty_lists[i]]; }); } } // anonymous namespace /** * @copydoc cudf::lists::drop_list_duplicates * * @param stream CUDA stream used for device memory operations and kernel launches. */ std::unique_ptr<column> drop_list_duplicates(lists_column_view const& lists_column, null_equality nulls_equal, nan_equality nans_equal, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (lists_column.is_empty()) return cudf::empty_like(lists_column.parent()); if (auto const child_type = lists_column.child().type(); cudf::is_nested(child_type) && child_type.id() != type_id::STRUCT) { CUDF_FAIL("Nested types other than STRUCT are not supported in `drop_list_duplicates`."); } // Flatten all entries (depth = 1) of the lists column. auto const lists_entries = lists_column.get_sliced_child(stream); // sorted_lists will store the results of the original lists after calling segmented_sort. auto const sorted_lists = [&]() { // If nans_equal == ALL_EQUAL and the column contains lists of floating-point data type, // we need to replace -NaN by NaN before sorting. auto const replace_negative_nan = nans_equal == nan_equality::ALL_EQUAL && type_dispatcher( lists_entries.type(), detail::has_negative_nans_dispatch{}, lists_entries, stream); if (replace_negative_nan) { auto const new_lists_column = detail::replace_negative_nans_entries(lists_entries, lists_column, stream); return detail::sort_lists( lists_column_view(new_lists_column->view()), order::ASCENDING, null_order::AFTER, stream); } else { return detail::sort_lists(lists_column, order::ASCENDING, null_order::AFTER, stream); } }(); auto const sorted_lists_entries = lists_column_view(sorted_lists->view()).get_sliced_child(stream); // Generate a 0-based offset column. auto lists_offsets = detail::generate_clean_offsets(lists_column, stream, mr); // Generate a mapping from list entries to offsets of the lists containing those entries. auto const entries_list_offsets = detail::generate_entry_list_offsets(sorted_lists_entries.size(), lists_offsets->view(), stream); // Copy non-duplicated entries (along with their list offsets) to new arrays. auto unique_entries_and_list_offsets = detail::get_unique_entries_and_list_offsets( sorted_lists_entries, entries_list_offsets->view(), nulls_equal, nans_equal, stream, mr); // Generate offsets for the new lists column. detail::generate_offsets(unique_entries_and_list_offsets.front()->size(), unique_entries_and_list_offsets.back()->view(), lists_offsets->mutable_view(), stream); // Construct a new lists column without duplicated entries. // Reuse the null_count and bitmask of the lists_column: those are the null information for // the list elements (rows). // For the entries of those lists (rows), their null_count and bitmask were generated separately // during the step `get_unique_entries_and_list_offsets` above. return make_lists_column(lists_column.size(), std::move(lists_offsets), std::move(unique_entries_and_list_offsets.front()), lists_column.null_count(), cudf::detail::copy_bitmask(lists_column.parent(), stream, mr)); } } // namespace detail /** * @copydoc cudf::lists::drop_list_duplicates */ std::unique_ptr<column> drop_list_duplicates(lists_column_view const& lists_column, null_equality nulls_equal, nan_equality nans_equal, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::drop_list_duplicates( lists_column, nulls_equal, nans_equal, rmm::cuda_stream_default, mr); } } // namespace lists } // namespace cudf
71921a03f87158d12a6474616a3292d11f56ce9d.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <structs/utilities.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/gather.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/lists/detail/sorting.hpp> #include <cudf/lists/drop_list_duplicates.hpp> #include <cudf/structs/struct_view.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/transform.h> namespace cudf { namespace lists { namespace detail { namespace { template <typename Type> struct has_negative_nans_fn { column_device_view const d_entries; bool const has_nulls; has_negative_nans_fn(column_device_view const d_entries, bool const has_nulls) : d_entries(d_entries), has_nulls(has_nulls) { } __device__ Type operator()(size_type idx) const noexcept { if (has_nulls && d_entries.is_null_nocheck(idx)) { return false; } auto const val = d_entries.element<Type>(idx); return std::isnan(val) && std::signbit(val); // std::signbit(x) == true if x is negative } }; /** * @brief A structure to be used along with type_dispatcher to check if a column has any * negative NaN value. * * This functor is used to check for replacing negative NaN if there exists one. It is neccessary * because when calling to `lists::detail::sort_lists`, the negative NaN and positive NaN values (if * both exist) are separated to the two ends of the output column. This is due to the API * `lists::detail::sort_lists` internally calls `cub::DeviceSegmentedRadixSort`, which performs * sorting by comparing bits of the input numbers. Since negative and positive NaN have * different bits representation, they may not be moved to be close to each other after sorted. */ struct has_negative_nans_dispatch { template <typename Type, std::enable_if_t<cuda::std::is_floating_point_v<Type>>* = nullptr> bool operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { auto const d_entries = column_device_view::create(lists_entries, stream); return thrust::count_if( rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.size()), detail::has_negative_nans_fn<Type>{*d_entries, lists_entries.has_nulls()}); } template <typename Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> bool operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const { // Recursively check negative NaN on the children columns. return std::any_of( thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.num_children()), [structs_view = structs_column_view{lists_entries}, stream](auto const child_idx) { auto const col = structs_view.get_sliced_child(child_idx); return type_dispatcher(col.type(), detail::has_negative_nans_dispatch{}, col, stream); }); } template <typename Type, std::enable_if_t<!cuda::std::is_floating_point_v<Type> && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> bool operator()(column_view const&, rmm::cuda_stream_view) const { // Columns of non floating-point data will never contain NaN. return false; } }; template <typename Type> struct replace_negative_nans_fn { __device__ Type operator()(Type val) const noexcept { return std::isnan(val) ? std::numeric_limits<Type>::quiet_NaN() : val; } }; /** * @brief A structure to be used along with type_dispatcher to replace -NaN by NaN for all rows * in a floating-point data column. */ struct replace_negative_nans_dispatch { template <typename Type, std::enable_if_t<!cuda::std::is_floating_point_v<Type> && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view) const noexcept { // For non floating point type and non struct, just return a copy of the input. return std::make_unique<column>(lists_entries); } template <typename Type, std::enable_if_t<cuda::std::is_floating_point_v<Type>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { auto new_entries = cudf::detail::allocate_like( lists_entries, lists_entries.size(), cudf::mask_allocation_policy::NEVER, stream); new_entries->set_null_mask(cudf::detail::copy_bitmask(lists_entries, stream), lists_entries.null_count()); // Replace all negative NaN values. thrust::transform(rmm::exec_policy(stream), lists_entries.template begin<Type>(), lists_entries.template end<Type>(), new_entries->mutable_view().template begin<Type>(), detail::replace_negative_nans_fn<Type>{}); return new_entries; } template <typename Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { std::vector<std::unique_ptr<cudf::column>> output_struct_members; std::transform( thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.num_children()), std::back_inserter(output_struct_members), [structs_view = structs_column_view{lists_entries}, stream](auto const child_idx) { auto const col = structs_view.get_sliced_child(child_idx); return type_dispatcher(col.type(), detail::replace_negative_nans_dispatch{}, col, stream); }); return cudf::make_structs_column(lists_entries.size(), std::move(output_struct_members), lists_entries.null_count(), cudf::detail::copy_bitmask(lists_entries, stream), stream); } }; /** * @brief Generate a 0-based offset column for a lists column. * * Given a lists_column_view, which may have a non-zero offset, generate a new column containing * 0-based list offsets. This is done by subtracting each of the input list offset by the first * offset. * * @code{.pseudo} * Given a list column having offsets = { 3, 7, 9, 13 }, * then output_offsets = { 0, 4, 6, 10 } * @endcode * * @param lists_column The input lists column. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A column containing 0-based list offsets. */ std::unique_ptr<column> generate_clean_offsets(lists_column_view const& lists_column, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto output_offsets = make_numeric_column(data_type{type_to_id<offset_type>()}, lists_column.size() + 1, mask_state::UNALLOCATED, stream, mr); thrust::transform( rmm::exec_policy(stream), lists_column.offsets_begin(), lists_column.offsets_end(), output_offsets->mutable_view().begin<offset_type>(), [first = lists_column.offsets_begin()] __device__(auto offset) { return offset - *first; }); return output_offsets; } /** * @brief Transform a given lists column to a new lists column in which all the list entries holding * -NaN value are replaced by (positive) NaN. * * Replacing -NaN by NaN is necessary before sorting (individual) lists because the sorting API is * using radix sort, which compares bits of the number thus it may separate -NaN by NaN to the two * ends of the result column. */ std::unique_ptr<column> replace_negative_nans_entries(column_view const& lists_entries, lists_column_view const& lists_column, rmm::cuda_stream_view stream) { // We need to copy the offsets column of the input lists_column. Since the input lists_column may // be sliced, we need to generate clean offsets (i.e., offsets starting from zero). auto new_offsets = generate_clean_offsets(lists_column, stream, rmm::mr::get_current_device_resource()); auto new_entries = type_dispatcher( lists_entries.type(), detail::replace_negative_nans_dispatch{}, lists_entries, stream); return make_lists_column( lists_column.size(), std::move(new_offsets), std::move(new_entries), lists_column.null_count(), cudf::detail::copy_bitmask( lists_column.parent(), stream, rmm::mr::get_current_device_resource())); } /** * @brief Populate list offsets for all list entries. * * Given an `offsets` column_view containing offsets of a lists column and a number of all list * entries in the column, generate an array that maps from each list entry to the offset of the list * containing that entry. * * @code{.pseudo} * num_entries = 10, offsets = { 0, 4, 6, 10 } * output = { 1, 1, 1, 1, 2, 2, 3, 3, 3, 3 } * @endcode * * @param num_entries The number of list entries. * @param offsets Column view to the list offsets. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A column containing entry list offsets. */ std::unique_ptr<column> generate_entry_list_offsets(size_type num_entries, column_view const& offsets, rmm::cuda_stream_view stream) { auto entry_list_offsets = make_numeric_column(offsets.type(), num_entries, mask_state::UNALLOCATED, stream, rmm::mr::get_current_device_resource()); thrust::upper_bound(rmm::exec_policy(stream), offsets.begin<offset_type>(), offsets.end<offset_type>(), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(num_entries), entry_list_offsets->mutable_view().begin<offset_type>()); return entry_list_offsets; } /** * @brief Performs an equality comparison between two entries in a lists column. * * For the two elements that are NOT in the same list in the lists column, they will always be * considered as different. If they are from the same list and their type is not floating point, * this functor will return the same comparison result as `cudf::element_equality_comparator`. * * For floating-point types, entries holding NaN value can be considered as different values or the * same value depending on the `nans_equal` parameter. * * @tparam Type The data type of entries * @tparam nans_equal Flag to specify whether NaN entries should be considered as equal value (only * applicable for floating-point data column) */ template <class Type> struct column_row_comparator_fn { offset_type const* const list_offsets; column_device_view const lhs; column_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; __host__ __device__ column_row_comparator_fn(offset_type const* const list_offsets, column_device_view const& lhs, column_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } template <typename T, std::enable_if_t<!cuda::std::is_floating_point_v<T>>* = nullptr> bool __device__ compare(T const& lhs_val, T const& rhs_val) const noexcept { return lhs_val == rhs_val; } template <typename T, std::enable_if_t<cuda::std::is_floating_point_v<T>>* = nullptr> bool __device__ compare(T const& lhs_val, T const& rhs_val) const noexcept { // If both element(i) and element(j) are NaNs and nans are considered as equal value then this // comparison will return `true`. This is the desired behavior in Pandas. if (nans_equal && std::isnan(lhs_val) && std::isnan(rhs_val)) { return true; } // If nans are considered as NOT equal, even both element(i) and element(j) are NaNs this // comparison will still return `false`. This is the desired behavior in Apache Spark. return lhs_val == rhs_val; } bool __device__ operator()(size_type i, size_type j) const noexcept { // Two entries are not considered for equality if they belong to different lists. if (list_offsets[i] != list_offsets[j]) { return false; } if (has_nulls) { bool const lhs_is_null{lhs.nullable() && lhs.is_null_nocheck(i)}; bool const rhs_is_null{rhs.nullable() && rhs.is_null_nocheck(j)}; if (lhs_is_null && rhs_is_null) { return nulls_equal == null_equality::EQUAL; } else if (lhs_is_null != rhs_is_null) { return false; } } return compare<Type>(lhs.element<Type>(i), lhs.element<Type>(j)); } }; /** * @brief Struct used in type_dispatcher for comparing two entries in a lists column. */ struct column_row_comparator_dispatch { offset_type const* const list_offsets; column_device_view const lhs; column_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; __device__ column_row_comparator_dispatch(offset_type const* const list_offsets, column_device_view const& lhs, column_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } template <class Type, std::enable_if_t<cudf::is_equality_comparable<Type, Type>()>* = nullptr> bool __device__ operator()(size_type i, size_type j) const noexcept { return column_row_comparator_fn<Type>{ list_offsets, lhs, rhs, nulls_equal, has_nulls, nans_equal}(i, j); } template <class Type, std::enable_if_t<!cudf::is_equality_comparable<Type, Type>()>* = nullptr> bool operator()(size_type i, size_type j) const { CUDF_FAIL( "`column_row_comparator_dispatch` cannot operate on types that are not equally comparable."); } }; /** * @brief Performs an equality comparison between rows of two tables using `column_row_comparator` * to compare rows of their corresponding columns. */ struct table_row_comparator_fn { offset_type const* const list_offsets; table_device_view const lhs; table_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; table_row_comparator_fn(offset_type const* const list_offsets, table_device_view const& lhs, table_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } bool __device__ operator()(size_type i, size_type j) const noexcept { auto column_comp = [=](column_device_view const& lhs, column_device_view const& rhs) { return type_dispatcher( lhs.type(), column_row_comparator_dispatch{list_offsets, lhs, rhs, nulls_equal, has_nulls, nans_equal}, i, j); }; return thrust::equal(thrust::seq, lhs.begin(), lhs.end(), rhs.begin(), column_comp); } }; /** * @brief Struct used in type_dispatcher for copying indices of the list entries ignoring * duplicates. */ struct get_unique_entries_dispatch { template <class Type, std::enable_if_t<!cudf::is_equality_comparable<Type, Type>() && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> offset_type* operator()(offset_type const*, column_view const&, size_type, offset_type*, null_equality, nan_equality, bool, rmm::cuda_stream_view) const { CUDF_FAIL( "`get_unique_entries_dispatch` cannot operate on types that are not equally comparable."); } template <class Type, std::enable_if_t<cudf::is_equality_comparable<Type, Type>()>* = nullptr> offset_type* operator()(offset_type const* list_offsets, column_view const& all_lists_entries, size_type num_entries, offset_type* output_begin, null_equality nulls_equal, nan_equality nans_equal, bool has_nulls, rmm::cuda_stream_view stream) const noexcept { auto const d_view = column_device_view::create(all_lists_entries, stream); auto const comp = column_row_comparator_fn<Type>{list_offsets, *d_view, *d_view, nulls_equal, has_nulls, nans_equal == nan_equality::ALL_EQUAL}; return thrust::unique_copy(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(num_entries), output_begin, comp); } template <class Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> offset_type* operator()(offset_type const* list_offsets, column_view const& all_lists_entries, size_type num_entries, offset_type* output_begin, null_equality nulls_equal, nan_equality nans_equal, bool has_nulls, rmm::cuda_stream_view stream) const noexcept { auto const entries_tview = table_view{{all_lists_entries}}; auto const flatten_nullability = has_nested_nulls(entries_tview) ? structs::detail::column_nullability::FORCE : structs::detail::column_nullability::MATCH_INCOMING; auto const entries_flattened = cudf::structs::detail::flatten_nested_columns( entries_tview, {order::ASCENDING}, {null_order::AFTER}, flatten_nullability); auto const d_view = table_device_view::create(std::get<0>(entries_flattened), stream); auto const comp = table_row_comparator_fn{list_offsets, *d_view, *d_view, nulls_equal, has_nulls, nans_equal == nan_equality::ALL_EQUAL}; return thrust::unique_copy(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(num_entries), output_begin, comp); } }; /** * @brief Copy list entries and entry list offsets ignoring duplicates. * * Given an array of all entries flattened from a list column and an array that maps each entry to * the offset of the list containing that entry, those entries and list offsets are copied into * new arrays such that the duplicated entries within each list will be ignored. * * @param all_lists_entries The input array containing all list entries. * @param entries_list_offsets A map from list entries to their corresponding list offsets. * @param nulls_equal Flag to specify whether null entries should be considered equal. * @param nans_equal Flag to specify whether NaN entries should be considered equal * (only applicable for floating-point data column). * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A pair of columns, the first one contains unique list entries and the second one * contains their corresponding list offsets. */ std::vector<std::unique_ptr<column>> get_unique_entries_and_list_offsets( column_view const& all_lists_entries, column_view const& entries_list_offsets, null_equality nulls_equal, nan_equality nans_equal, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const num_entries = all_lists_entries.size(); // Allocate memory to store the indices of the unique entries. auto unique_indices = rmm::device_uvector<offset_type>(num_entries, stream); auto const output_begin = unique_indices.begin(); auto const output_end = type_dispatcher(all_lists_entries.type(), get_unique_entries_dispatch{}, entries_list_offsets.begin<offset_type>(), all_lists_entries, num_entries, output_begin, nulls_equal, nans_equal, all_lists_entries.has_nulls(), stream); // Collect unique entries and entry list offsets. // The new null_count and bitmask of the unique entries will also be generated // by the gather function. return cudf::detail::gather(table_view{{all_lists_entries, entries_list_offsets}}, output_begin, output_end, cudf::out_of_bounds_policy::DONT_CHECK, stream, mr) ->release(); } /** * @brief Generate list offsets from entry offsets. * * Generate an array of list offsets for the final result lists column. The list offsets of the * original lists column are also taken into account to make sure the result lists column will have * the same empty list rows (if any) as in the original lists column. * * @param num_entries The number of unique entries after removing duplicates. * @param entries_list_offsets The mapping from list entries to their list offsets. * @param original_offsets The list offsets of the original lists column, which will also be used to * store the new list offsets. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. */ void generate_offsets(size_type num_entries, column_view const& entries_list_offsets, mutable_column_view const& original_offsets, rmm::cuda_stream_view stream) { // Firstly, generate temporary list offsets for the unique entries, ignoring empty lists (if any). // If entries_list_offsets = {1, 1, 1, 1, 2, 3, 3, 3, 4, 4 }, num_entries = 10, // then new_offsets = { 0, 4, 5, 8, 10 }. auto const new_offsets = allocate_like( original_offsets, mask_allocation_policy::NEVER, rmm::mr::get_current_device_resource()); thrust::copy_if(rmm::exec_policy(stream), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(num_entries + 1), new_offsets->mutable_view().begin<offset_type>(), [num_entries, offsets_ptr = entries_list_offsets.begin<offset_type>()] __device__( auto i) -> bool { return i == 0 || i == num_entries || offsets_ptr[i] != offsets_ptr[i - 1]; }); // Generate a prefix sum of number of empty lists, storing inplace to the original lists // offsets. // If the original list offsets is { 0, 0, 5, 5, 6, 6 } (there are 2 empty lists), // and new_offsets = { 0, 4, 6 }, then output = { 0, 1, 1, 2, 2, 3}. auto const iter_trans_begin = cudf::detail::make_counting_transform_iterator( 0, [offsets = original_offsets.begin<offset_type>()] __device__(auto i) { return (i > 0 && offsets[i] == offsets[i - 1]) ? 1 : 0; }); thrust::inclusive_scan(rmm::exec_policy(stream), iter_trans_begin, iter_trans_begin + original_offsets.size(), original_offsets.begin<offset_type>()); // Generate the final list offsets. // If the original list offsets are { 0, 0, 5, 5, 6, 6 }, the new offsets are { 0, 4, 6 }, // and the prefix sums of empty lists are { 0, 1, 1, 2, 2, 3 }, // then output = { 0, 0, 4, 4, 5, 5 }. thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(original_offsets.size()), original_offsets.begin<offset_type>(), [prefix_sum_empty_lists = original_offsets.begin<offset_type>(), offsets = new_offsets->view().begin<offset_type>()] __device__(auto i) { return offsets[i - prefix_sum_empty_lists[i]]; }); } } // anonymous namespace /** * @copydoc cudf::lists::drop_list_duplicates * * @param stream CUDA stream used for device memory operations and kernel launches. */ std::unique_ptr<column> drop_list_duplicates(lists_column_view const& lists_column, null_equality nulls_equal, nan_equality nans_equal, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (lists_column.is_empty()) return cudf::empty_like(lists_column.parent()); if (auto const child_type = lists_column.child().type(); cudf::is_nested(child_type) && child_type.id() != type_id::STRUCT) { CUDF_FAIL("Nested types other than STRUCT are not supported in `drop_list_duplicates`."); } // Flatten all entries (depth = 1) of the lists column. auto const lists_entries = lists_column.get_sliced_child(stream); // sorted_lists will store the results of the original lists after calling segmented_sort. auto const sorted_lists = [&]() { // If nans_equal == ALL_EQUAL and the column contains lists of floating-point data type, // we need to replace -NaN by NaN before sorting. auto const replace_negative_nan = nans_equal == nan_equality::ALL_EQUAL && type_dispatcher( lists_entries.type(), detail::has_negative_nans_dispatch{}, lists_entries, stream); if (replace_negative_nan) { auto const new_lists_column = detail::replace_negative_nans_entries(lists_entries, lists_column, stream); return detail::sort_lists( lists_column_view(new_lists_column->view()), order::ASCENDING, null_order::AFTER, stream); } else { return detail::sort_lists(lists_column, order::ASCENDING, null_order::AFTER, stream); } }(); auto const sorted_lists_entries = lists_column_view(sorted_lists->view()).get_sliced_child(stream); // Generate a 0-based offset column. auto lists_offsets = detail::generate_clean_offsets(lists_column, stream, mr); // Generate a mapping from list entries to offsets of the lists containing those entries. auto const entries_list_offsets = detail::generate_entry_list_offsets(sorted_lists_entries.size(), lists_offsets->view(), stream); // Copy non-duplicated entries (along with their list offsets) to new arrays. auto unique_entries_and_list_offsets = detail::get_unique_entries_and_list_offsets( sorted_lists_entries, entries_list_offsets->view(), nulls_equal, nans_equal, stream, mr); // Generate offsets for the new lists column. detail::generate_offsets(unique_entries_and_list_offsets.front()->size(), unique_entries_and_list_offsets.back()->view(), lists_offsets->mutable_view(), stream); // Construct a new lists column without duplicated entries. // Reuse the null_count and bitmask of the lists_column: those are the null information for // the list elements (rows). // For the entries of those lists (rows), their null_count and bitmask were generated separately // during the step `get_unique_entries_and_list_offsets` above. return make_lists_column(lists_column.size(), std::move(lists_offsets), std::move(unique_entries_and_list_offsets.front()), lists_column.null_count(), cudf::detail::copy_bitmask(lists_column.parent(), stream, mr)); } } // namespace detail /** * @copydoc cudf::lists::drop_list_duplicates */ std::unique_ptr<column> drop_list_duplicates(lists_column_view const& lists_column, null_equality nulls_equal, nan_equality nans_equal, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::drop_list_duplicates( lists_column, nulls_equal, nans_equal, rmm::cuda_stream_default, mr); } } // namespace lists } // namespace cudf
6f3cff19baf7493daf67a98d2687e37012da3da9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <torch/serialize/tensor.h> #include <ATen/ATen.h> #include "ATen/NativeFunctions.h" #include <ATen/hip/HIPContext.h> template <typename scalar> __device__ __forceinline__ scalar fmin(scalar a, scalar b) { return a > b ? b : a; } template <typename scalar> __device__ __forceinline__ scalar fmax(scalar a, scalar b) { return a > b ? a : b; } template <typename scalar> __device__ __forceinline__ scalar IoU(const scalar* box_x, const scalar* box_y) { // Calculate IoU between the boxes. scalar rightmost_l = fmax(box_x[0], box_y[0]); scalar leftmost_r = fmin(box_x[0] + box_x[2], box_y[0] + box_y[2]); scalar delta_x = fmax((scalar)0., leftmost_r - rightmost_l); scalar bottommost_tp = fmax(box_x[1], box_y[1]); scalar topmost_b = fmin(box_x[1] + box_x[3], box_y[1] + box_y[3]); scalar delta_y = fmax((scalar)0., topmost_b - bottommost_tp); scalar uni = box_x[2] * box_x[3] + box_y[2] * box_y[3]; return delta_x * delta_y / (uni - delta_x * delta_y); } template <typename scalar> __global__ void nms_kernel(unsigned char* mask, const scalar* boxes, const int64_t* inds, const int64_t num_boxes, double thresh) { //A pretty straightforward implementation, analogous to the standard serial //version but with the IoUs computed and mask updated in parallel. We access //the box data through an array of sorted indices rather than physically //sorting it: unless one has an inordinate number of boxes (O(10^5), whereas //for example in the faster rcnn paper they feed 6000 per batch) the //data will fit in L2 so sorting it won't actually reduce the number of //messy reads from global. int col = 0; while(col < num_boxes-1) { for(int i = threadIdx.x; i < num_boxes-1; i+=blockDim.x) if(i >= col) { scalar iou = IoU(&boxes[4*inds[i+1+num_boxes*blockIdx.x] + 4*num_boxes*blockIdx.x], &boxes[4*inds[col+num_boxes*blockIdx.x] + 4*num_boxes*blockIdx.x]); mask[i+1+blockIdx.x*num_boxes] *= (iou>thresh) ? 0 : 1; } __syncthreads(); ++col; while((col < num_boxes - 1) && (mask[col+blockIdx.x*num_boxes]==0)) ++col; } } std::vector<at::Tensor> Non_Max_Suppression_CUDA( const at::Tensor& input, const at::Tensor& scores, double thresh) { AT_ASSERT(input.ndimension() == 3); AT_ASSERT(scores.ndimension() == 2); AT_ASSERT(input.size(0) == scores.size(0)); AT_ASSERT(input.size(1) == scores.size(1)); AT_ASSERT(input.size(2) == 4); AT_ASSERT(input.is_contiguous()); AT_ASSERT(scores.is_contiguous()); AT_ASSERT(input.type().scalarType() == at::kFloat || input.type().scalarType() == at::kDouble) AT_ASSERT(scores.type().scalarType() == at::kFloat || scores.type().scalarType() == at::kDouble) auto num_boxes = input.size(1); auto batch_size = input.size(0); //auto mask = input.type().toScalarType(at::kByte).tensor({batch_size, num_boxes}); auto mask = torch::zeros({batch_size, num_boxes}, input.type().toScalarType(at::kByte)); mask.fill_(1); //need the indices of the boxes sorted by score. at::Tensor sorted_inds = std::get<1>(scores.sort(-1, true)); dim3 mask_block(512); //would be nice to have 1024 here for gpus that support it, //but not sure how to do this cleanly without calling //hipGetDeviceProperties in the funcion body... dim3 mask_grid(batch_size); if(input.type().scalarType() == at::kFloat) { hipLaunchKernelGGL(( nms_kernel), dim3(mask_grid), dim3(mask_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), mask.data<unsigned char>(), input.data<float>(), sorted_inds.data<int64_t>(), num_boxes, thresh); AT_ASSERT(hipGetLastError() == hipSuccess); } else { hipLaunchKernelGGL(( nms_kernel), dim3(mask_grid), dim3(mask_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), mask.data<unsigned char>(), input.data<double>(), sorted_inds.data<int64_t>(), num_boxes, thresh); AT_ASSERT(hipGetLastError() == hipSuccess); } //It's not entirely clear what the best thing to return is here. The algorithm will //produce a different number of boxes for each batch, so there is no obvious way of //way of returning the surving boxes/indices as a tensor. Returning a mask on the //sorted boxes together with the sorted indices seems reasonable; that way, the user //can easily take the N highest-scoring surviving boxes to form a tensor if they wish. return {mask, sorted_inds}; }
6f3cff19baf7493daf67a98d2687e37012da3da9.cu
#include <torch/serialize/tensor.h> #include <ATen/ATen.h> #include "ATen/NativeFunctions.h" #include <ATen/cuda/CUDAContext.h> template <typename scalar> __device__ __forceinline__ scalar fmin(scalar a, scalar b) { return a > b ? b : a; } template <typename scalar> __device__ __forceinline__ scalar fmax(scalar a, scalar b) { return a > b ? a : b; } template <typename scalar> __device__ __forceinline__ scalar IoU(const scalar* box_x, const scalar* box_y) { // Calculate IoU between the boxes. scalar rightmost_l = fmax(box_x[0], box_y[0]); scalar leftmost_r = fmin(box_x[0] + box_x[2], box_y[0] + box_y[2]); scalar delta_x = fmax((scalar)0., leftmost_r - rightmost_l); scalar bottommost_tp = fmax(box_x[1], box_y[1]); scalar topmost_b = fmin(box_x[1] + box_x[3], box_y[1] + box_y[3]); scalar delta_y = fmax((scalar)0., topmost_b - bottommost_tp); scalar uni = box_x[2] * box_x[3] + box_y[2] * box_y[3]; return delta_x * delta_y / (uni - delta_x * delta_y); } template <typename scalar> __global__ void nms_kernel(unsigned char* mask, const scalar* boxes, const int64_t* inds, const int64_t num_boxes, double thresh) { //A pretty straightforward implementation, analogous to the standard serial //version but with the IoUs computed and mask updated in parallel. We access //the box data through an array of sorted indices rather than physically //sorting it: unless one has an inordinate number of boxes (O(10^5), whereas //for example in the faster rcnn paper they feed 6000 per batch) the //data will fit in L2 so sorting it won't actually reduce the number of //messy reads from global. int col = 0; while(col < num_boxes-1) { for(int i = threadIdx.x; i < num_boxes-1; i+=blockDim.x) if(i >= col) { scalar iou = IoU(&boxes[4*inds[i+1+num_boxes*blockIdx.x] + 4*num_boxes*blockIdx.x], &boxes[4*inds[col+num_boxes*blockIdx.x] + 4*num_boxes*blockIdx.x]); mask[i+1+blockIdx.x*num_boxes] *= (iou>thresh) ? 0 : 1; } __syncthreads(); ++col; while((col < num_boxes - 1) && (mask[col+blockIdx.x*num_boxes]==0)) ++col; } } std::vector<at::Tensor> Non_Max_Suppression_CUDA( const at::Tensor& input, const at::Tensor& scores, double thresh) { AT_ASSERT(input.ndimension() == 3); AT_ASSERT(scores.ndimension() == 2); AT_ASSERT(input.size(0) == scores.size(0)); AT_ASSERT(input.size(1) == scores.size(1)); AT_ASSERT(input.size(2) == 4); AT_ASSERT(input.is_contiguous()); AT_ASSERT(scores.is_contiguous()); AT_ASSERT(input.type().scalarType() == at::kFloat || input.type().scalarType() == at::kDouble) AT_ASSERT(scores.type().scalarType() == at::kFloat || scores.type().scalarType() == at::kDouble) auto num_boxes = input.size(1); auto batch_size = input.size(0); //auto mask = input.type().toScalarType(at::kByte).tensor({batch_size, num_boxes}); auto mask = torch::zeros({batch_size, num_boxes}, input.type().toScalarType(at::kByte)); mask.fill_(1); //need the indices of the boxes sorted by score. at::Tensor sorted_inds = std::get<1>(scores.sort(-1, true)); dim3 mask_block(512); //would be nice to have 1024 here for gpus that support it, //but not sure how to do this cleanly without calling //cudaGetDeviceProperties in the funcion body... dim3 mask_grid(batch_size); if(input.type().scalarType() == at::kFloat) { nms_kernel<<<mask_grid, mask_block, 0, at::cuda::getCurrentCUDAStream()>>>( mask.data<unsigned char>(), input.data<float>(), sorted_inds.data<int64_t>(), num_boxes, thresh); AT_ASSERT(cudaGetLastError() == cudaSuccess); } else { nms_kernel<<<mask_grid, mask_block, 0, at::cuda::getCurrentCUDAStream()>>>( mask.data<unsigned char>(), input.data<double>(), sorted_inds.data<int64_t>(), num_boxes, thresh); AT_ASSERT(cudaGetLastError() == cudaSuccess); } //It's not entirely clear what the best thing to return is here. The algorithm will //produce a different number of boxes for each batch, so there is no obvious way of //way of returning the surving boxes/indices as a tensor. Returning a mask on the //sorted boxes together with the sorted indices seems reasonable; that way, the user //can easily take the N highest-scoring surviving boxes to form a tensor if they wish. return {mask, sorted_inds}; }
2d33881be5bbc06457de2d5bcde373a8eabd9eb8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* compile command compile : nvcc -arch=sm_35 a.cu -o a.out profile : nvprof -o log1.o ./a.out view : nvvp log1.o */ #include <cstdio> __global__ void init_data_kernel( int n, double* x){ int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < n ) { x[i] = n - i; } } __global__ void daxpy_kernel(int n, double a, double * x, double * y){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { y[i] = a*x[i] + y[i]; } } __global__ void check_results_kernel( int n, double correctvalue, double * x ){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { if ( x[i] != correctvalue ) { printf("ERROR at index = %d, expected = %f, actual: %f\n",i,correctvalue,x[i]); } } } void init_host_data( int n, double * x ){ for (int i=0; i<n; ++i) { x[i] = i; } } void init_data(int n, double* x, double* x_d, double* y_d){ hipStream_t copy_stream; hipStream_t compute_stream; hipStreamCreate(&copy_stream); hipStreamCreate(&compute_stream); hipMemcpyAsync( x_d, x, n*sizeof(double), hipMemcpyDefault, copy_stream ); hipLaunchKernelGGL(( init_data_kernel), dim3(ceil(n/256)),dim3(256),0,compute_stream, n, y_d); hipStreamSynchronize(copy_stream); hipStreamSynchronize(compute_stream); hipStreamDestroy(compute_stream); hipStreamDestroy(copy_stream); } void daxpy(int n, double a, double* x_d, double* y_d){ hipLaunchKernelGGL(( daxpy_kernel), dim3(ceil(n/256)),dim3(256), 0, 0, n,a,x_d,y_d); hipDeviceSynchronize(); } void check_results( int n, double correctvalue, double* x_d ){ hipLaunchKernelGGL(( check_results_kernel), dim3(ceil(n/256)),dim3(256), 0, 0, n,correctvalue,x_d); } void run_test(int n){ double* x; double* x_d; double* y_d; hipSetDevice(0); hipHostMalloc((void**) &x, n*sizeof(double)); hipMalloc((void**)&x_d,n*sizeof(double)); hipMalloc((void**)&y_d,n*sizeof(double)); init_host_data(n, x); init_data(n,x,x_d,y_d); daxpy(n,1.0,x_d,y_d); check_results(n, n, y_d); hipFree(y_d); hipFree(x_d); hipHostFree(x); hipDeviceSynchronize(); } int main(){ int n = 1<<22; run_test(n); return 0; }
2d33881be5bbc06457de2d5bcde373a8eabd9eb8.cu
/* compile command compile : nvcc -arch=sm_35 a.cu -o a.out profile : nvprof -o log1.o ./a.out view : nvvp log1.o */ #include <cstdio> __global__ void init_data_kernel( int n, double* x){ int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < n ) { x[i] = n - i; } } __global__ void daxpy_kernel(int n, double a, double * x, double * y){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { y[i] = a*x[i] + y[i]; } } __global__ void check_results_kernel( int n, double correctvalue, double * x ){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { if ( x[i] != correctvalue ) { printf("ERROR at index = %d, expected = %f, actual: %f\n",i,correctvalue,x[i]); } } } void init_host_data( int n, double * x ){ for (int i=0; i<n; ++i) { x[i] = i; } } void init_data(int n, double* x, double* x_d, double* y_d){ cudaStream_t copy_stream; cudaStream_t compute_stream; cudaStreamCreate(&copy_stream); cudaStreamCreate(&compute_stream); cudaMemcpyAsync( x_d, x, n*sizeof(double), cudaMemcpyDefault, copy_stream ); init_data_kernel<<<ceil(n/256),256,0,compute_stream>>>(n, y_d); cudaStreamSynchronize(copy_stream); cudaStreamSynchronize(compute_stream); cudaStreamDestroy(compute_stream); cudaStreamDestroy(copy_stream); } void daxpy(int n, double a, double* x_d, double* y_d){ daxpy_kernel<<<ceil(n/256),256>>>(n,a,x_d,y_d); cudaDeviceSynchronize(); } void check_results( int n, double correctvalue, double* x_d ){ check_results_kernel<<<ceil(n/256),256>>>(n,correctvalue,x_d); } void run_test(int n){ double* x; double* x_d; double* y_d; cudaSetDevice(0); cudaMallocHost((void**) &x, n*sizeof(double)); cudaMalloc((void**)&x_d,n*sizeof(double)); cudaMalloc((void**)&y_d,n*sizeof(double)); init_host_data(n, x); init_data(n,x,x_d,y_d); daxpy(n,1.0,x_d,y_d); check_results(n, n, y_d); cudaFree(y_d); cudaFree(x_d); cudaFreeHost(x); cudaDeviceSynchronize(); } int main(){ int n = 1<<22; run_test(n); return 0; }
ae9eae474669ea2af3ad5c4c1926f6f43e1116d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "common_cuda.h" #include "chemistry.h" #include "equilibrium_solver/minimizer_options.h" #include "equilibrium_solver/equilibrium_state.h" #include "equilibrium_solver/minimization_result_info_cuda.h" namespace equilibrium_solver { // make variables on the device visible #ifdef __CUDA_ARCH__ using namespace common_device; #else using namespace common; #endif using chemistry::ThermodynamicProperties; __global__ void minimization_assembly_kernel_fused(ThermodynamicProperties thermo_props, MinimizerOptions options, MinimizationResultInfoCuda& info, Vector<numeric_t, common::num_species>* xs, Vector<numeric_t, common::num_components>* ys, Vector<numeric_t, common::num_species>* zs, numeric_t* bs_ptr, numeric_t* Js, numeric_t* Fs) { int idx = threadIdx.x + blockIdx.x * blockDim.x; constexpr size_t m = formula_matrix_t::RowsAtCompileTime; constexpr size_t n = formula_matrix_t::ColsAtCompileTime; constexpr size_t p = m + n; constexpr size_t t = m + 2 * n; const numeric_t mu = options.mu; if (idx < info.n_active) { int cidx = info.active_indices[idx]; // initialize variables Map<component_amounts_t> b(bs_ptr+cidx*num_components); auto& A = formula_matrix; auto& x = xs[cidx]; auto& y = ys[cidx]; auto& z = zs[cidx]; // compute gibbs energy ObjectiveResult obj_res = gibbs_energy(thermo_props, x); auto& g = obj_res.g; auto& H = obj_res.H; // assemble vector Map<Vector<numeric_t, t>> F(Fs+cidx*t); F.head(n) = g - A.transpose()*y - z; F.segment(n, m) = A*x - b; F.tail(n) = (x.array() * z.array()).matrix() - mu*Vector<numeric_t, common::num_species>::Ones(); F = -F; // calculate error numeric_t error = F.template lpNorm<Infinity>(); info.error[cidx] = error; info.converged[cidx] = error < options.tol; // assemble matrix Map<Matrix<numeric_t, t, t>> J(Js+cidx*t*t); if (!info.converged[cidx]) { J.setConstant(0); J.block(0, 0, n, n) = H; J.block(0, n, n, p-n) = -A.transpose(); J.block(0, t-n, n, n).diagonal().setConstant(-1); J.block(n, 0, p-n, n) = A; J.block(t-n, 0, n, n).diagonal() = z; J.block(t-n, t-n, n, n).diagonal() = x; ++info.iterations[cidx]; } } } }
ae9eae474669ea2af3ad5c4c1926f6f43e1116d6.cu
#include "common.h" #include "common_cuda.h" #include "chemistry.h" #include "equilibrium_solver/minimizer_options.h" #include "equilibrium_solver/equilibrium_state.h" #include "equilibrium_solver/minimization_result_info_cuda.h" namespace equilibrium_solver { // make variables on the device visible #ifdef __CUDA_ARCH__ using namespace common_device; #else using namespace common; #endif using chemistry::ThermodynamicProperties; __global__ void minimization_assembly_kernel_fused(ThermodynamicProperties thermo_props, MinimizerOptions options, MinimizationResultInfoCuda& info, Vector<numeric_t, common::num_species>* xs, Vector<numeric_t, common::num_components>* ys, Vector<numeric_t, common::num_species>* zs, numeric_t* bs_ptr, numeric_t* Js, numeric_t* Fs) { int idx = threadIdx.x + blockIdx.x * blockDim.x; constexpr size_t m = formula_matrix_t::RowsAtCompileTime; constexpr size_t n = formula_matrix_t::ColsAtCompileTime; constexpr size_t p = m + n; constexpr size_t t = m + 2 * n; const numeric_t mu = options.mu; if (idx < info.n_active) { int cidx = info.active_indices[idx]; // initialize variables Map<component_amounts_t> b(bs_ptr+cidx*num_components); auto& A = formula_matrix; auto& x = xs[cidx]; auto& y = ys[cidx]; auto& z = zs[cidx]; // compute gibbs energy ObjectiveResult obj_res = gibbs_energy(thermo_props, x); auto& g = obj_res.g; auto& H = obj_res.H; // assemble vector Map<Vector<numeric_t, t>> F(Fs+cidx*t); F.head(n) = g - A.transpose()*y - z; F.segment(n, m) = A*x - b; F.tail(n) = (x.array() * z.array()).matrix() - mu*Vector<numeric_t, common::num_species>::Ones(); F = -F; // calculate error numeric_t error = F.template lpNorm<Infinity>(); info.error[cidx] = error; info.converged[cidx] = error < options.tol; // assemble matrix Map<Matrix<numeric_t, t, t>> J(Js+cidx*t*t); if (!info.converged[cidx]) { J.setConstant(0); J.block(0, 0, n, n) = H; J.block(0, n, n, p-n) = -A.transpose(); J.block(0, t-n, n, n).diagonal().setConstant(-1); J.block(n, 0, p-n, n) = A; J.block(t-n, 0, n, n).diagonal() = z; J.block(t-n, t-n, n, n).diagonal() = x; ++info.iterations[cidx]; } } } }
3e566ec6afded5080b8e19f8dd29422ec6b3c922.hip
// !!! This is a file automatically generated by hipify!!! #define CUB_STDERR // print CUDA runtime errors to console #include <stdio.h> #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include "test/test_util.h" using namespace cub; CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory // CustomMin functor struct CustomMin { template <typename T> __host__ __device__ __forceinline__ T operator()(const T& a, const T& b) const { return (b < a) ? b : a; } }; int main(int argc, char** argv) { const int num_items = 10; // host-side data int h_keys_in[num_items] = { 0, 2, 2, 2, 10, 10, 4, 4, 4, 4 }; int h_values_in[num_items] = { -2, 4, 5, 2, 1, -1, 0, 2, -1, -1 }; // input data, on the device int* d_keys_in = NULL; int* d_values_in = NULL; // set up device input arrays CubDebugExit(g_allocator.DeviceAllocate((void**)& d_keys_in, sizeof(int) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)& d_values_in, sizeof(int) * num_items)); // set up data on the device CubDebugExit(hipMemcpy(d_keys_in, h_keys_in, sizeof(int) * num_items, hipMemcpyHostToDevice)); CubDebugExit(hipMemcpy(d_values_in, h_values_in, sizeof(int) * num_items, hipMemcpyHostToDevice)); // allocate device output arrays int* d_unique_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)& d_unique_out, sizeof(int) * num_items)); int* d_aggregates_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)& d_aggregates_out, sizeof(int) * num_items)); int* d_num_runs_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)& d_num_runs_out, sizeof(int))); CustomMin reduction_op; // get temporary device storage requirements void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipcub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items); // allocate temporary storage CubDebugExit(g_allocator.DeviceAllocate((void**)& d_temp_storage, temp_storage_bytes)); // run the reduce-by-key operation hipcub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items); // get data back on the host; print out results int h_unique_out[num_items]; int h_aggregates_out[num_items]; int h_num_runs_out; CubDebugExit(hipMemcpy(&h_num_runs_out, d_num_runs_out, sizeof(int), hipMemcpyDeviceToHost)); CubDebugExit(hipMemcpy(h_unique_out, d_unique_out, sizeof(int) * h_num_runs_out, hipMemcpyDeviceToHost)); CubDebugExit(hipMemcpy(h_aggregates_out, d_aggregates_out, sizeof(int) * h_num_runs_out, hipMemcpyDeviceToHost)); for (int i = 0; i < h_num_runs_out; i++) std::cout << "i: " << i << "\tKey: " << h_unique_out[i] << "\tAggregate Value: " << h_aggregates_out[i] << std::endl; // cleanup if (d_keys_in) CubDebugExit(g_allocator.DeviceFree(d_keys_in)); if (d_values_in) CubDebugExit(g_allocator.DeviceFree(d_values_in)); if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); if (d_unique_out) CubDebugExit(g_allocator.DeviceFree(d_unique_out)); if (d_aggregates_out) CubDebugExit(g_allocator.DeviceFree(d_aggregates_out)); if (d_num_runs_out) CubDebugExit(g_allocator.DeviceFree(d_num_runs_out)); return 0; }
3e566ec6afded5080b8e19f8dd29422ec6b3c922.cu
#define CUB_STDERR // print CUDA runtime errors to console #include <stdio.h> #include <cub/util_allocator.cuh> #include <cub/device/device_reduce.cuh> #include "test/test_util.h" using namespace cub; CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory // CustomMin functor struct CustomMin { template <typename T> __host__ __device__ __forceinline__ T operator()(const T& a, const T& b) const { return (b < a) ? b : a; } }; int main(int argc, char** argv) { const int num_items = 10; // host-side data int h_keys_in[num_items] = { 0, 2, 2, 2, 10, 10, 4, 4, 4, 4 }; int h_values_in[num_items] = { -2, 4, 5, 2, 1, -1, 0, 2, -1, -1 }; // input data, on the device int* d_keys_in = NULL; int* d_values_in = NULL; // set up device input arrays CubDebugExit(g_allocator.DeviceAllocate((void**)& d_keys_in, sizeof(int) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)& d_values_in, sizeof(int) * num_items)); // set up data on the device CubDebugExit(cudaMemcpy(d_keys_in, h_keys_in, sizeof(int) * num_items, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemcpy(d_values_in, h_values_in, sizeof(int) * num_items, cudaMemcpyHostToDevice)); // allocate device output arrays int* d_unique_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)& d_unique_out, sizeof(int) * num_items)); int* d_aggregates_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)& d_aggregates_out, sizeof(int) * num_items)); int* d_num_runs_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)& d_num_runs_out, sizeof(int))); CustomMin reduction_op; // get temporary device storage requirements void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items); // allocate temporary storage CubDebugExit(g_allocator.DeviceAllocate((void**)& d_temp_storage, temp_storage_bytes)); // run the reduce-by-key operation cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items); // get data back on the host; print out results int h_unique_out[num_items]; int h_aggregates_out[num_items]; int h_num_runs_out; CubDebugExit(cudaMemcpy(&h_num_runs_out, d_num_runs_out, sizeof(int), cudaMemcpyDeviceToHost)); CubDebugExit(cudaMemcpy(h_unique_out, d_unique_out, sizeof(int) * h_num_runs_out, cudaMemcpyDeviceToHost)); CubDebugExit(cudaMemcpy(h_aggregates_out, d_aggregates_out, sizeof(int) * h_num_runs_out, cudaMemcpyDeviceToHost)); for (int i = 0; i < h_num_runs_out; i++) std::cout << "i: " << i << "\tKey: " << h_unique_out[i] << "\tAggregate Value: " << h_aggregates_out[i] << std::endl; // cleanup if (d_keys_in) CubDebugExit(g_allocator.DeviceFree(d_keys_in)); if (d_values_in) CubDebugExit(g_allocator.DeviceFree(d_values_in)); if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); if (d_unique_out) CubDebugExit(g_allocator.DeviceFree(d_unique_out)); if (d_aggregates_out) CubDebugExit(g_allocator.DeviceFree(d_aggregates_out)); if (d_num_runs_out) CubDebugExit(g_allocator.DeviceFree(d_num_runs_out)); return 0; }
3c26aaa2de18b15245c21c22736a0176108a8e0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> __global__ void add(int *A, int *B, int *C, int ha, int wa, int wb) { // Get the 1D Array index of the matrix int id = threadIdx.x; int sum; for (int i = 0; i < ha; ++i) { sum = 0; for (int j = 0; j < wa; ++j){ sum += (A[i*wa + j] * B[j*wb + id]); } C[i*wb + id] = sum; } } int main(){ int a[100], b[100], c[100], n1, m1, n2, m2; printf("Enter m1: "); scanf("%d",&m1); printf("Enter n1: "); scanf("%d",&n1); printf("Enter Matrix 1:\n"); for(int i=0;i<n1*m1;i++) scanf("%d",&a[i]); printf("Enter m2: "); scanf("%d",&m2); if (m2 != n1){ printf("cannot be multiplied\n"); exit(0); } printf("Enter n2: "); scanf("%d",&n2); printf("Enter Matrix 2:\n"); for(int i=0;i<n2*m2;i++) scanf("%d",&b[i]); int *d_a,*d_b,*d_c; hipMalloc((void**)&d_a,sizeof(int)*n1*m1); hipMalloc((void**)&d_b,sizeof(int)*n2*m2); hipMalloc((void**)&d_c,sizeof(int)*m1*n2); hipMemcpy(d_a,&a,sizeof(int)*n1*m1,hipMemcpyHostToDevice); hipMemcpy(d_b,&b,sizeof(int)*n2*m2,hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(1), dim3(n2), 0, 0, d_a, d_b, d_c, m1, n1, n2); hipMemcpy(&c,d_c,sizeof(int)*n2*m1,hipMemcpyDeviceToHost); for(int i=0;i<m1*n2;i++){ if (i % n2 == 0) printf("\n"); printf("%d ",c[i]); } printf("\n"); hipFree(d_a); hipFree(d_b); }
3c26aaa2de18b15245c21c22736a0176108a8e0d.cu
#include<stdio.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> __global__ void add(int *A, int *B, int *C, int ha, int wa, int wb) { // Get the 1D Array index of the matrix int id = threadIdx.x; int sum; for (int i = 0; i < ha; ++i) { sum = 0; for (int j = 0; j < wa; ++j){ sum += (A[i*wa + j] * B[j*wb + id]); } C[i*wb + id] = sum; } } int main(){ int a[100], b[100], c[100], n1, m1, n2, m2; printf("Enter m1: "); scanf("%d",&m1); printf("Enter n1: "); scanf("%d",&n1); printf("Enter Matrix 1:\n"); for(int i=0;i<n1*m1;i++) scanf("%d",&a[i]); printf("Enter m2: "); scanf("%d",&m2); if (m2 != n1){ printf("cannot be multiplied\n"); exit(0); } printf("Enter n2: "); scanf("%d",&n2); printf("Enter Matrix 2:\n"); for(int i=0;i<n2*m2;i++) scanf("%d",&b[i]); int *d_a,*d_b,*d_c; cudaMalloc((void**)&d_a,sizeof(int)*n1*m1); cudaMalloc((void**)&d_b,sizeof(int)*n2*m2); cudaMalloc((void**)&d_c,sizeof(int)*m1*n2); cudaMemcpy(d_a,&a,sizeof(int)*n1*m1,cudaMemcpyHostToDevice); cudaMemcpy(d_b,&b,sizeof(int)*n2*m2,cudaMemcpyHostToDevice); add<<<1, n2>>>(d_a, d_b, d_c, m1, n1, n2); cudaMemcpy(&c,d_c,sizeof(int)*n2*m1,cudaMemcpyDeviceToHost); for(int i=0;i<m1*n2;i++){ if (i % n2 == 0) printf("\n"); printf("%d ",c[i]); } printf("\n"); cudaFree(d_a); cudaFree(d_b); }
fcc1bc8e857cb404f4b53173abe86ec33f5f7427.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BlockDim = 16x16 //GridDim = w/16*h/16 extern "C" __global__ void InterleaveUV( unsigned char *yuv_cb, unsigned char *yuv_cr, unsigned char *nv12_chroma, int chroma_width, int chroma_height, int cb_pitch, int cr_pitch, int nv12_pitch ) { int x,y; unsigned char *pCb; unsigned char *pCr; unsigned char *pDst; x = blockIdx.x*blockDim.x+threadIdx.x; y = blockIdx.y*blockDim.y+threadIdx.y; if ((x < chroma_width) && (y < chroma_height)) { pCb = yuv_cb + (y*cb_pitch); pCr = yuv_cr + (y*cr_pitch); pDst = nv12_chroma + y*nv12_pitch; pDst[x << 1] = pCb[x]; pDst[(x << 1) + 1] = pCr[x]; } }
fcc1bc8e857cb404f4b53173abe86ec33f5f7427.cu
// BlockDim = 16x16 //GridDim = w/16*h/16 extern "C" __global__ void InterleaveUV( unsigned char *yuv_cb, unsigned char *yuv_cr, unsigned char *nv12_chroma, int chroma_width, int chroma_height, int cb_pitch, int cr_pitch, int nv12_pitch ) { int x,y; unsigned char *pCb; unsigned char *pCr; unsigned char *pDst; x = blockIdx.x*blockDim.x+threadIdx.x; y = blockIdx.y*blockDim.y+threadIdx.y; if ((x < chroma_width) && (y < chroma_height)) { pCb = yuv_cb + (y*cb_pitch); pCr = yuv_cr + (y*cr_pitch); pDst = nv12_chroma + y*nv12_pitch; pDst[x << 1] = pCb[x]; pDst[(x << 1) + 1] = pCr[x]; } }
8b1bde8a47ebefa988afa53f8fda8169b7678928.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* function for projecting lidar points * */ #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include "../common.h" __global__ void LinearInterpolateKernel(const float* const imageIn, float* const out, const size_t height, const size_t width, const float* const x, const float* const y, const size_t numPoints){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= numPoints){ return; } if((x[i] < 0) || (y[i] < 0) || (x[i] >= (width-1)) || (y[i] >= (height-1))){ out[i] = 0; return; } int xF = (int)x[i]; int yF = (int)y[i]; float xD = x[i] - (float)xF; float yD = y[i] - (float)yF; //linear interpolate out[i] = (1-yD)*(1-xD)*imageIn[yF + xF*height] + (1-yD)*xD*imageIn[yF + (xF+1)*height] + yD*(1-xD)*imageIn[yF+1 + xF*height] + yD*xD*imageIn[yF+1 + (xF+1)*height]; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { //initialize the MathWorks GPU API. mxInitGPU(); //read data mxGPUArray const * image = mxGPUCreateFromMxArray(prhs[0]); mxGPUArray const * points = mxGPUCreateFromMxArray(prhs[1]); size_t imageWidth = mxGPUGetDimensions(image)[1]; size_t imageHeight = mxGPUGetDimensions(image)[0]; size_t numPoints = mxGPUGetDimensions(points)[0]; size_t imageDepth = 1; if(mxGPUGetNumberOfDimensions(image) > 2){ imageDepth = mxGPUGetDimensions(image)[2]; } //create pointers from data float* imagePtr = (float*)(mxGPUGetDataReadOnly(image)); float* xPtr = (float*)(mxGPUGetDataReadOnly(points)); float* yPtr = &(xPtr[numPoints]); //create output mwSize outSize[] = {numPoints,imageDepth}; mxGPUArray* out = mxGPUCreateGPUArray(2, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); float* outPtr = (float*)(mxGPUGetDataReadOnly(out)); //run and get ouputs for(size_t i = 0; i < imageDepth; i++){ float* imageLayerPtr = &(imagePtr[imageHeight*imageWidth*i]); float* outLayerPtr = &(outPtr[numPoints*i]); hipLaunchKernelGGL(( LinearInterpolateKernel), dim3(gridSize(numPoints)), dim3(BLOCK_SIZE), 0, 0, imageLayerPtr, outLayerPtr, imageHeight, imageWidth, xPtr, yPtr, numPoints); CudaCheckError(); } plhs[0] = mxGPUCreateMxArrayOnGPU(out); //destroy reference structures mxGPUDestroyGPUArray(points); mxGPUDestroyGPUArray(image); mxGPUDestroyGPUArray(out); }
8b1bde8a47ebefa988afa53f8fda8169b7678928.cu
/* function for projecting lidar points * */ #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include "../common.h" __global__ void LinearInterpolateKernel(const float* const imageIn, float* const out, const size_t height, const size_t width, const float* const x, const float* const y, const size_t numPoints){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= numPoints){ return; } if((x[i] < 0) || (y[i] < 0) || (x[i] >= (width-1)) || (y[i] >= (height-1))){ out[i] = 0; return; } int xF = (int)x[i]; int yF = (int)y[i]; float xD = x[i] - (float)xF; float yD = y[i] - (float)yF; //linear interpolate out[i] = (1-yD)*(1-xD)*imageIn[yF + xF*height] + (1-yD)*xD*imageIn[yF + (xF+1)*height] + yD*(1-xD)*imageIn[yF+1 + xF*height] + yD*xD*imageIn[yF+1 + (xF+1)*height]; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { //initialize the MathWorks GPU API. mxInitGPU(); //read data mxGPUArray const * image = mxGPUCreateFromMxArray(prhs[0]); mxGPUArray const * points = mxGPUCreateFromMxArray(prhs[1]); size_t imageWidth = mxGPUGetDimensions(image)[1]; size_t imageHeight = mxGPUGetDimensions(image)[0]; size_t numPoints = mxGPUGetDimensions(points)[0]; size_t imageDepth = 1; if(mxGPUGetNumberOfDimensions(image) > 2){ imageDepth = mxGPUGetDimensions(image)[2]; } //create pointers from data float* imagePtr = (float*)(mxGPUGetDataReadOnly(image)); float* xPtr = (float*)(mxGPUGetDataReadOnly(points)); float* yPtr = &(xPtr[numPoints]); //create output mwSize outSize[] = {numPoints,imageDepth}; mxGPUArray* out = mxGPUCreateGPUArray(2, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); float* outPtr = (float*)(mxGPUGetDataReadOnly(out)); //run and get ouputs for(size_t i = 0; i < imageDepth; i++){ float* imageLayerPtr = &(imagePtr[imageHeight*imageWidth*i]); float* outLayerPtr = &(outPtr[numPoints*i]); LinearInterpolateKernel<<<gridSize(numPoints), BLOCK_SIZE>>>(imageLayerPtr, outLayerPtr, imageHeight, imageWidth, xPtr, yPtr, numPoints); CudaCheckError(); } plhs[0] = mxGPUCreateMxArrayOnGPU(out); //destroy reference structures mxGPUDestroyGPUArray(points); mxGPUDestroyGPUArray(image); mxGPUDestroyGPUArray(out); }
bea534751f8bfa4a8e7d68e104a7ec849474a60e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" texture<int, 1, hipReadModeElementType> tex_1d; __global__ void read_texture_1d(int nx){ int x = threadIdx.x + blockDim.x * blockIdx.x; if (x < nx){ int value = tex1Dfetch(tex_1d, x); printf("my id is %d, my value is %d\n", x, value); } }
bea534751f8bfa4a8e7d68e104a7ec849474a60e.cu
texture<int, 1, cudaReadModeElementType> tex_1d; __global__ void read_texture_1d(int nx){ int x = threadIdx.x + blockDim.x * blockIdx.x; if (x < nx){ int value = tex1Dfetch(tex_1d, x); printf("my id is %d, my value is %d\n", x, value); } }
d1c6cda9dc7d1ff1d8d9e14bcf7993e887c2a623.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" #include "THHTensor.hpp" #include "THHDeviceTensor.cuh" #include "THHDeviceTensorUtils.cuh" #include "THHDeviceUtils.cuh" const int WARP_SIZE = 32; // The maximum number of threads in a block const int MAX_BLOCK_SIZE = 512; // Number of threads in a block given an input size up to MAX_BLOCK_SIZE static int getNumThreads(int nElem) { int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; for (int i = 0; i != 5; ++i) { if (nElem <= threadSizes[i]) { return threadSizes[i]; } } return MAX_BLOCK_SIZE; } // Returns the index of the most significant 1 bit in `val`. __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } template <typename Dtype, typename Acctype> struct Float2 { Acctype v1, v2; __device__ Float2() {} __device__ Float2(Dtype v1, Dtype v2) : v1(ScalarConvert<Dtype, Acctype>::to(v1)), v2(ScalarConvert<Dtype, Acctype>::to(v2)) {} __device__ Float2(Dtype v) : v1(ScalarConvert<Dtype, Acctype>::to(v)), v2(ScalarConvert<Dtype, Acctype>::to(v)) {} __device__ Float2(int v) : v1(ScalarConvert<int, Acctype>::to(v)), v2(ScalarConvert<int, Acctype>::to(v)) {} __device__ Float2& operator+=(const Float2& a) { v1 += a.v1; v2 += a.v2; return *this; } }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct SumOp { __device__ SumOp(const DeviceTensor3 t) : tensor(t) {} __device__ __forceinline__ Acctype operator()(int batch, int plane, int n) { return ScalarConvert<Dtype, Acctype>::to(tensor[batch][plane][n]); } const DeviceTensor3 tensor; }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct VarOp { __device__ VarOp(Acctype m, const DeviceTensor3 t) : mean(m), tensor(t) {} __device__ __forceinline__ Acctype operator()(int batch, int plane, int n) { Dtype val = tensor[batch][plane][n]; return (val - mean) * (val - mean); } const Acctype mean; const DeviceTensor3 tensor; }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct GradOp { __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) : mean(m), input(i), gradOutput(g) {} __device__ __forceinline__ Float2<Dtype, Acctype> operator()(int batch, int plane, int n) { Dtype g = gradOutput[batch][plane][n]; Dtype c = ScalarConvert<Acctype, Dtype>::to(input[batch][plane][n] - mean); return Float2<Dtype, Acctype>(g, g * c); } const Acctype mean; const DeviceTensor3 input; const DeviceTensor3 gradOutput; }; // Sum across all threads within a warp template <typename T> static __device__ __forceinline__ T warpSum(T val) { #if __CUDA_ARCH__ >= 300 for (int i = 0; i < getMSB(WARP_SIZE); ++i) { val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); } #else __shared__ T values[MAX_BLOCK_SIZE]; values[threadIdx.x] = val; __threadfence_block(); const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; for (int i = 1; i < WARP_SIZE; i++) { val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; } #endif return val; } template <typename Dtype, typename Acctype> static __device__ __forceinline__ Float2<Dtype, Acctype> warpSum(Float2<Dtype, Acctype> value) { value.v1 = warpSum(value.v1); value.v2 = warpSum(value.v2); return value; } // Sum across (batch, x/y/z) applying Op() pointwise template<typename T, typename Op, typename DeviceTensor3> __device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3> __global__ void BatchNormalizationUpdateOutputInference_kernel( const DeviceTensor3 input, DeviceTensor3 output, const DeviceTensor1 runningMean, const DeviceTensor1 runningVar, const DeviceTensor1 weight, const DeviceTensor1 bias, Acctype epsilon) { int plane = blockIdx.x; Acctype invstd = Acctype(1) / sqrt(runningVar[plane].ldg() + epsilon); Acctype mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane].ldg()); Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane].ldg()) : Acctype(1); Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane].ldg()) : Acctype(0); // Write normalized and update the output for (int batch = 0; batch < input.getSize(0); batch++) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { Dtype inp = input[batch][plane][x].ldg(); output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invstd + beta); } } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3> __global__ void BatchNormalizationUpdateOutput_kernel( const DeviceTensor3 input, DeviceTensor3 output, const DeviceTensor1 weight, const DeviceTensor1 bias, const Acctype epsilon, const Acctype momentum, DeviceTensor1 runningMean, DeviceTensor1 runningVar, DeviceTensor1 saveMean, DeviceTensor1 saveStd) { int plane = blockIdx.x; int N = input.getSize(0) * input.getSize(2); Acctype norm = Acctype(1) / N; // Compute the mean and variance across (batch, x/y/z) Acctype mean = reduce<Acctype>(SumOp<Dtype, Acctype, DeviceTensor3>(input), input, plane) * norm; __syncthreads(); Acctype varN = reduce<Acctype>(VarOp<Dtype, Acctype, DeviceTensor3>(mean, input), input, plane); Acctype invStd = 0; if (varN != Acctype(0) || epsilon != Acctype(0)) { invStd = 1 / sqrt(varN * norm + epsilon); } // Save the mean, variance, and moving averages if (threadIdx.x == 0) { // Momentum based writeback Acctype unbiasedVar = varN / (N - 1); saveMean[plane] = ScalarConvert<Acctype, Dtype>::to(mean); saveStd[plane] = ScalarConvert<Acctype, Dtype>::to(invStd); if (runningMean.data() != NULL) { runningMean[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningMean[plane] + momentum * mean); } if (runningVar.data() != NULL) { runningVar[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningVar[plane] + momentum * unbiasedVar); } } // Write normalized and update the output Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : ScalarConvert<int, Acctype>::to(1); Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane]) : ScalarConvert<int, Acctype>::to(0); for (int batch = 0; batch < input.getSize(0); ++batch) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { Dtype inp = input[batch][plane][x].ldg(); output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invStd + beta); } } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3> __global__ void BatchNormalizationBackward_kernel( const DeviceTensor3 input, const DeviceTensor3 gradOutput, DeviceTensor3 gradInput, DeviceTensor1 gradWeight, DeviceTensor1 gradBias, const DeviceTensor1 weight, const DeviceTensor1 runningMean, const DeviceTensor1 runningVar, const DeviceTensor1 saveMean, const DeviceTensor1 saveStd, bool train, Acctype scale, double eps) { int plane = blockIdx.x; int N = gradOutput.getSize(0) * gradOutput.getSize(2); Acctype mean, stdVal; if (train) { mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]); stdVal = ScalarConvert<Dtype, Acctype>::to(saveStd[plane]); } else { mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]); stdVal = 1 / sqrt(runningVar[plane] + eps); } Acctype weightVal = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : Acctype(1); Acctype norm = Acctype(1) / N; // Compute two values across (batch, x/y/z) in one pass: // 1. Sum(gradOutput) // 2. DotProduct(input - mean, gradOutput) GradOp<Dtype, Acctype, DeviceTensor3> g(mean, input, gradOutput); Float2<Dtype, Acctype> res = reduce<Float2<Dtype, Acctype>, GradOp<Dtype, Acctype, DeviceTensor3>, DeviceTensor3>(g, gradOutput, plane); Acctype gradOutputSum = res.v1; Acctype dotP = res.v2; Acctype gradMean = gradOutputSum * norm; Acctype projScale = dotP * norm * stdVal * stdVal; Acctype gradScale = stdVal * weightVal; if (gradInput.numElements() > 0) { for (int batch = 0; batch < gradOutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradOutput.getSize(2); x += blockDim.x) { Dtype gradOut = gradOutput[batch][plane][x]; if (train) { Dtype inp = input[batch][plane][x]; Acctype proj = (inp - mean) * projScale; gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to((gradOut - proj - gradMean) * gradScale); } else { gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gradOut * gradScale); } } } } if (gradWeight.numElements() > 0) { if (threadIdx.x == 0) { gradWeight[plane] += ScalarConvert<Acctype, Dtype>::to(scale * dotP * stdVal); } } if (gradBias.numElements() > 0) { if (threadIdx.x == 0) { gradBias[plane] += ScalarConvert<Acctype, Dtype>::to(scale * gradOutputSum); } } } #include "generic/BatchNormalization.cu" #include "THHGenerateFloatTypes.h"
d1c6cda9dc7d1ff1d8d9e14bcf7993e887c2a623.cu
#include "THCUNN.h" #include "common.h" #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" #include "THCTensor.hpp" #include "THCDeviceTensor.cuh" #include "THCDeviceTensorUtils.cuh" #include "THCDeviceUtils.cuh" const int WARP_SIZE = 32; // The maximum number of threads in a block const int MAX_BLOCK_SIZE = 512; // Number of threads in a block given an input size up to MAX_BLOCK_SIZE static int getNumThreads(int nElem) { int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; for (int i = 0; i != 5; ++i) { if (nElem <= threadSizes[i]) { return threadSizes[i]; } } return MAX_BLOCK_SIZE; } // Returns the index of the most significant 1 bit in `val`. __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } template <typename Dtype, typename Acctype> struct Float2 { Acctype v1, v2; __device__ Float2() {} __device__ Float2(Dtype v1, Dtype v2) : v1(ScalarConvert<Dtype, Acctype>::to(v1)), v2(ScalarConvert<Dtype, Acctype>::to(v2)) {} __device__ Float2(Dtype v) : v1(ScalarConvert<Dtype, Acctype>::to(v)), v2(ScalarConvert<Dtype, Acctype>::to(v)) {} __device__ Float2(int v) : v1(ScalarConvert<int, Acctype>::to(v)), v2(ScalarConvert<int, Acctype>::to(v)) {} __device__ Float2& operator+=(const Float2& a) { v1 += a.v1; v2 += a.v2; return *this; } }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct SumOp { __device__ SumOp(const DeviceTensor3 t) : tensor(t) {} __device__ __forceinline__ Acctype operator()(int batch, int plane, int n) { return ScalarConvert<Dtype, Acctype>::to(tensor[batch][plane][n]); } const DeviceTensor3 tensor; }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct VarOp { __device__ VarOp(Acctype m, const DeviceTensor3 t) : mean(m), tensor(t) {} __device__ __forceinline__ Acctype operator()(int batch, int plane, int n) { Dtype val = tensor[batch][plane][n]; return (val - mean) * (val - mean); } const Acctype mean; const DeviceTensor3 tensor; }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct GradOp { __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) : mean(m), input(i), gradOutput(g) {} __device__ __forceinline__ Float2<Dtype, Acctype> operator()(int batch, int plane, int n) { Dtype g = gradOutput[batch][plane][n]; Dtype c = ScalarConvert<Acctype, Dtype>::to(input[batch][plane][n] - mean); return Float2<Dtype, Acctype>(g, g * c); } const Acctype mean; const DeviceTensor3 input; const DeviceTensor3 gradOutput; }; // Sum across all threads within a warp template <typename T> static __device__ __forceinline__ T warpSum(T val) { #if __CUDA_ARCH__ >= 300 for (int i = 0; i < getMSB(WARP_SIZE); ++i) { val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); } #else __shared__ T values[MAX_BLOCK_SIZE]; values[threadIdx.x] = val; __threadfence_block(); const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; for (int i = 1; i < WARP_SIZE; i++) { val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; } #endif return val; } template <typename Dtype, typename Acctype> static __device__ __forceinline__ Float2<Dtype, Acctype> warpSum(Float2<Dtype, Acctype> value) { value.v1 = warpSum(value.v1); value.v2 = warpSum(value.v2); return value; } // Sum across (batch, x/y/z) applying Op() pointwise template<typename T, typename Op, typename DeviceTensor3> __device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3> __global__ void BatchNormalizationUpdateOutputInference_kernel( const DeviceTensor3 input, DeviceTensor3 output, const DeviceTensor1 runningMean, const DeviceTensor1 runningVar, const DeviceTensor1 weight, const DeviceTensor1 bias, Acctype epsilon) { int plane = blockIdx.x; Acctype invstd = Acctype(1) / sqrt(runningVar[plane].ldg() + epsilon); Acctype mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane].ldg()); Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane].ldg()) : Acctype(1); Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane].ldg()) : Acctype(0); // Write normalized and update the output for (int batch = 0; batch < input.getSize(0); batch++) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { Dtype inp = input[batch][plane][x].ldg(); output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invstd + beta); } } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3> __global__ void BatchNormalizationUpdateOutput_kernel( const DeviceTensor3 input, DeviceTensor3 output, const DeviceTensor1 weight, const DeviceTensor1 bias, const Acctype epsilon, const Acctype momentum, DeviceTensor1 runningMean, DeviceTensor1 runningVar, DeviceTensor1 saveMean, DeviceTensor1 saveStd) { int plane = blockIdx.x; int N = input.getSize(0) * input.getSize(2); Acctype norm = Acctype(1) / N; // Compute the mean and variance across (batch, x/y/z) Acctype mean = reduce<Acctype>(SumOp<Dtype, Acctype, DeviceTensor3>(input), input, plane) * norm; __syncthreads(); Acctype varN = reduce<Acctype>(VarOp<Dtype, Acctype, DeviceTensor3>(mean, input), input, plane); Acctype invStd = 0; if (varN != Acctype(0) || epsilon != Acctype(0)) { invStd = 1 / sqrt(varN * norm + epsilon); } // Save the mean, variance, and moving averages if (threadIdx.x == 0) { // Momentum based writeback Acctype unbiasedVar = varN / (N - 1); saveMean[plane] = ScalarConvert<Acctype, Dtype>::to(mean); saveStd[plane] = ScalarConvert<Acctype, Dtype>::to(invStd); if (runningMean.data() != NULL) { runningMean[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningMean[plane] + momentum * mean); } if (runningVar.data() != NULL) { runningVar[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningVar[plane] + momentum * unbiasedVar); } } // Write normalized and update the output Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : ScalarConvert<int, Acctype>::to(1); Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane]) : ScalarConvert<int, Acctype>::to(0); for (int batch = 0; batch < input.getSize(0); ++batch) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { Dtype inp = input[batch][plane][x].ldg(); output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invStd + beta); } } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3> __global__ void BatchNormalizationBackward_kernel( const DeviceTensor3 input, const DeviceTensor3 gradOutput, DeviceTensor3 gradInput, DeviceTensor1 gradWeight, DeviceTensor1 gradBias, const DeviceTensor1 weight, const DeviceTensor1 runningMean, const DeviceTensor1 runningVar, const DeviceTensor1 saveMean, const DeviceTensor1 saveStd, bool train, Acctype scale, double eps) { int plane = blockIdx.x; int N = gradOutput.getSize(0) * gradOutput.getSize(2); Acctype mean, stdVal; if (train) { mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]); stdVal = ScalarConvert<Dtype, Acctype>::to(saveStd[plane]); } else { mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]); stdVal = 1 / sqrt(runningVar[plane] + eps); } Acctype weightVal = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : Acctype(1); Acctype norm = Acctype(1) / N; // Compute two values across (batch, x/y/z) in one pass: // 1. Sum(gradOutput) // 2. DotProduct(input - mean, gradOutput) GradOp<Dtype, Acctype, DeviceTensor3> g(mean, input, gradOutput); Float2<Dtype, Acctype> res = reduce<Float2<Dtype, Acctype>, GradOp<Dtype, Acctype, DeviceTensor3>, DeviceTensor3>(g, gradOutput, plane); Acctype gradOutputSum = res.v1; Acctype dotP = res.v2; Acctype gradMean = gradOutputSum * norm; Acctype projScale = dotP * norm * stdVal * stdVal; Acctype gradScale = stdVal * weightVal; if (gradInput.numElements() > 0) { for (int batch = 0; batch < gradOutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradOutput.getSize(2); x += blockDim.x) { Dtype gradOut = gradOutput[batch][plane][x]; if (train) { Dtype inp = input[batch][plane][x]; Acctype proj = (inp - mean) * projScale; gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to((gradOut - proj - gradMean) * gradScale); } else { gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gradOut * gradScale); } } } } if (gradWeight.numElements() > 0) { if (threadIdx.x == 0) { gradWeight[plane] += ScalarConvert<Acctype, Dtype>::to(scale * dotP * stdVal); } } if (gradBias.numElements() > 0) { if (threadIdx.x == 0) { gradBias[plane] += ScalarConvert<Acctype, Dtype>::to(scale * gradOutputSum); } } } #include "generic/BatchNormalization.cu" #include "THCGenerateFloatTypes.h"
f1b75523cc3d45ecfa5874e2ac7b5182b64fa8ec.hip
// !!! This is a file automatically generated by hipify!!! #include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h // softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h #include "gpu.h" /* // from: box.h typedef struct { float x, y, w, h; } box; */ // ------------- GPU cuDNN --------------- #define checkCUDNN(status) { \ if (status != CUDNN_STATUS_SUCCESS) { \ printf("CUDNN failure\nError: %d - %s \n", status, cudnnGetErrorString(status)); \ getchar(); \ } \ } // 4 layers in 1: convolution, batch-normalization, BIAS and activation void forward_convolutional_layer_gpu_cudnn(layer l, network_state state) { // XNOR-net if (l.xnor) { if (l.align_bit_weights_gpu && l.c >= 32) { //return; hipError_t status = hipSuccess; int input_size = l.c*l.h*l.w*l.batch; int m = l.n; int k = l.size*l.size*l.c; int n = l.out_w*l.out_h; //float * a = l.weights_gpu; int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; size_t t_intput_size = new_ldb * n; size_t t_bit_input_size = t_intput_size / 8;// +1; if (l.c % 32 == 0) { //printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - new XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad); //printf("l.align_workspace_size = %d, (l.c * l.w * l.h) = %d \n", l.align_workspace_size, (l.c * l.w * l.h)); int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; size_t t_intput_size = new_ldb * l.bit_align;// n; size_t t_bit_input_size = t_intput_size / 8;// +1; const int new_c = l.c / 32; repack_input_gpu_bin(state.input, (uint32_t *)l.align_workspace_gpu, l.w, l.h, l.c); im2col_ongpu(l.align_workspace_gpu, new_c, l.h, l.w, l.size, l.stride, l.pad, state.workspace); int new_k = l.size*l.size*l.c / 32; transpose_uint32_gpu((uint32_t *)state.workspace, (uint32_t *)l.transposed_align_workspace_gpu, new_k, n, n, new_ldb); gemm_nn_custom_bin_mean_transposed_gpu(m, n, k, (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY, l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu); } else { //printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - old XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad); int i = 0; im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align); float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size); transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.transposed_align_workspace_gpu, k, n, l.bit_align, new_ldb, 8); gemm_nn_custom_bin_mean_transposed_gpu(m, n, k, (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY, l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu); } //add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); if (l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); return; } if (!l.align_bit_weights_gpu) { binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu); } l.weights_gpu = l.binary_weights_gpu; binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu); state.input = l.binary_input_gpu; } // blas_kernels.cu //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); int size = l.inputs * l.batch; float one = 1; float zero = 0; // cuDNN >= v5.1 cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, state.input, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &zero, l.dstTensorDesc, l.output_gpu); if (l.batch_normalize) { // blas_kernels.cu //normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w); //scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w); } // blas_kernels.cu add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); // blas_kernels.cu if (l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } // 4 layers in 1: convolution, batch-normalization, BIAS and activation void forward_convolutional_layer_gpu_cudnn_quantized(layer l, network_state state) { int i; // blas_kernels.cu //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); int size = l.inputs * l.batch; hipError_t status; /* static int once = 1; if (once) { //printf(" l.input_quant_multipler = %f \n", l.input_quant_multipler); once = 0; cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign) //cuda_convert_int8_to_f32(state.input_int8, size, state.input, 1.0F / l.input_quant_multipler); } else { //printf(" NEXT!!! \n"); //cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign) cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign) } */ //#if(CUDNN_MAJOR >= 7 ) #define INT8CONV //#endif // #if(CUDNN_MAJOR >= 7 ) #ifdef INT8CONV { float one = 1; float zero = 0; // input cudnnTensorDescriptor_t srcTransformDesc; cudnnCreateTensorDescriptor(&srcTransformDesc); cudnnSetTensor4dDescriptor(srcTransformDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_INT8, l.batch, l.c, l.h, l.w); cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign) //cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign) //printf("\n l.input_quant_multipler = %f \n", l.input_quant_multipler); cudnnStatus_t transform_status = cudnnTransformTensor( cudnn_handle(), &one, srcTransformDesc, state.input_int8, //input_init_int8, &zero, l.srcTensorDesc, state.input); checkCUDNN(transform_status); //float ALPHA1 = l.output_multipler / R_MULT; float ALPHA1 = 1 / (l.input_quant_multipler * l.weights_quant_multipler); //float ALPHA2 = 0; //printf(" ALPHA1 = %f \n", ALPHA1); // x w y and z bias alpha1/alpha2 // X_INT8 X_INT8 X_INT8 X_FLOAT X_FLOAT // y = act ( alpha1 * conv(x) + alpha2 * z + bias ) cudnnStatus_t cudnnstat = cudnnConvolutionBiasActivationForward(cudnn_handle(), &ALPHA1, // ALPHA l.srcTensorDesc, state.input, l.weightDesc, l.weights_int8_int8x4_gpu, //l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &zero, // ALPHA2 l.dstTensorDesc, l.output_gpu, l.biasTensorDesc, l.biases_gpu, l.activationDesc, l.dstTensorDesc, l.output_gpu); /* // cuDNN >= v5.1 cudnnStatus_t cudnnstat = cudnnConvolutionForward(cudnn_handle(), &ALPHA1,//&one, l.srcTensorDesc, state.input, //state.input_int8, // state.input, l.weightDesc, l.weights_int8_int8x4_gpu, //l.weights_int8_gpu, //l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &zero, l.dstTensorDesc, l.output_gpu); */ //printf(" l.w = %d, l.h = %d, l.c = %d, l.n = %d \n", l.w, l.h, l.c, l.n); if (cudnnstat != CUDNN_STATUS_SUCCESS) { if (cudnnstat == CUDNN_STATUS_ARCH_MISMATCH) { printf("\n Error: CUDNN_STATUS_ARCH_MISMATCH - This GPU doesn't support DP4A (INT8 weights and input) \n"); } else if (cudnnstat == CUDNN_STATUS_NOT_SUPPORTED) { printf("\n Error: CUDNN_STATUS_NOT_SUPPORTED (INT8 weights and input) \n"); } else if (cudnnstat == CUDNN_STATUS_BAD_PARAM) { printf("\n Error: CUDNN_STATUS_BAD_PARAM (INT8 weights and input) \n"); } printf("\n cudnnstat = %d \n", cudnnstat); getchar(); } else { //printf("\n cudnnstat == CUDNN_STATUS_SUCCESS \n"); } //status = hipMemcpy(l.output, l.output_gpu, sizeof(float)*l.outputs*l.batch, hipMemcpyDeviceToHost); //for (i = 0; i < l.outputs && i < 100; ++i) printf(" %f, ", l.output[i]); //draw_distribution(l.output, l.outputs*l.batch, "Output"); //add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } #else // INT8CONV float one = 1; float zero = 0; // cuDNN >= v5.1 cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, state.input, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &zero, l.dstTensorDesc, l.output_gpu); if (l.batch_normalize) { // blas_kernels.cu //normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w); //scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w); } // blas_kernels.cu add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); #endif // INT8CONV // blas_kernels.cu activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } // MAX pooling layer void forward_maxpool_layer_gpu_cuda(const layer l, network_state state) { // maxpool_layer_kernels.cu forward_maxpool_layer_gpu(l, state); } // route layer void forward_route_layer_gpu_cuda(const layer l, network_state state) { int i, j; int offset = 0; for (i = 0; i < l.n; ++i) { int index = l.input_layers[i]; float *input = state.net.layers[index].output_gpu; int input_size = l.input_sizes[i]; for (j = 0; j < l.batch; ++j) { // CUDA hipMemcpy(l.output_gpu + offset + j*l.outputs, input + j*input_size, sizeof(float)*input_size, hipMemcpyDeviceToDevice); } offset += input_size; } } // reorg layer void forward_reorg_layer_gpu_cuda(layer l, network_state state) { // blas_kernels.cu //reorg_ongpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output_gpu); reorg_ongpu(state.input, l.out_w, l.out_h, l.out_c, l.batch, l.stride, 0, l.output_gpu); } // upsample_layer.c void forward_upsample_layer_cuda(const layer l, network_state state) { fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); //printf(" l.reverse = %d \n", l.reverse); if (l.reverse) { upsample_gpu(l.output_gpu, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, state.input); } else { upsample_gpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output_gpu); } } // shortcut_layer.c void forward_shortcut_layer_cuda(const layer l, network_state state) { //copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1); //shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); //activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } // region layer void forward_region_layer_gpu_cuda(const layer l, network_state state) { // blas_kernels.cu flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu); if (l.softmax_tree) { // Yolo 9000 int i; int count = 5; for (i = 0; i < l.softmax_tree->groups; ++i) { int group_size = l.softmax_tree->group_size[i]; // blas_kernels.cu softmax_gpu(l.output_gpu + count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count); count += group_size; } } else if (l.softmax) { // Yolo v2 // blas_kernels.cu softmax_gpu(l.output_gpu + 5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5); } float *in_cpu = (float *)calloc(l.batch*l.inputs, sizeof(float)); float *truth_cpu = 0; if (state.truth) { int num_truth = l.batch*l.truths; truth_cpu = (float *)calloc(num_truth, sizeof(float)); hipError_t status = hipMemcpy(state.truth, truth_cpu, num_truth * sizeof(float), hipMemcpyDeviceToHost); } hipError_t status = hipMemcpy(in_cpu, l.output_gpu, l.batch*l.inputs * sizeof(float), hipMemcpyDeviceToHost); network_state cpu_state = state; cpu_state.train = state.train; cpu_state.truth = truth_cpu; cpu_state.input = in_cpu; int i, b; int size = l.coords + l.classes + 1; memcpy(l.output, cpu_state.input, l.outputs*l.batch * sizeof(float)); for (b = 0; b < l.batch; ++b) { for (i = 0; i < l.h*l.w*l.n; ++i) { int index = size*i + b*l.outputs; float x = l.output[index + 4]; l.output[index + 4] = 1.0F / (1.0F + expf(-x)); // logistic_activate_cpu(l.output[index + 4]); } } free(cpu_state.input); } // yolo_layer.c Yolo v3 void forward_yolo_layer_cuda(const layer l, network_state state) { copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1); int b, n; for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { int index = entry_index(l, b, n*l.w*l.h, 0); activate_array_ongpu(l.output_gpu + index, 2 * l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, 4); activate_array_ongpu(l.output_gpu + index, (1 + l.classes)*l.w*l.h, LOGISTIC); } } cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs); //return; } void forward_network_gpu_cudnn(network net, network_state state) { state.workspace = net.workspace; int i; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { forward_convolutional_layer_gpu_cudnn(l, state); //printf("\n CONVOLUTIONAL \t\t l.size = %d \n", l.size); } else if (l.type == MAXPOOL) { forward_maxpool_layer_gpu_cuda(l, state); //printf("\n MAXPOOL \t\t l.size = %d \n", l.size); } else if (l.type == ROUTE) { forward_route_layer_gpu_cuda(l, state); //printf("\n ROUTE \n"); } else if (l.type == REORG) { forward_reorg_layer_gpu_cuda(l, state); //printf("\n REORG \n"); } else if (l.type == UPSAMPLE) { forward_upsample_layer_cuda(l, state); //printf("\n UPSAMPLE \n"); } else if (l.type == SHORTCUT) { forward_shortcut_layer_cuda(l, state); //printf("\n SHORTCUT \n"); } else if (l.type == YOLO) { forward_yolo_layer_cuda(l, state); //printf("\n YOLO \n"); } else if (l.type == REGION) { forward_region_layer_gpu_cuda(l, state); //printf("\n REGION \n"); } else if (l.type == BLANK) { //printf("\n layer: BLANK - %d \n", i); } else { printf("\n layer: %d \n", l.type); } state.input = l.output_gpu; } } void forward_network_gpu_cudnn_quantized(network net, network_state state) { state.workspace = net.workspace; int i; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { //printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size); //if (l.quantized && i != 80 && i != 92 && i != 104) forward_convolutional_layer_gpu_cudnn_quantized(l, state); // mAP = 0, very strange if (l.quantized) forward_convolutional_layer_gpu_cudnn_quantized(l, state); else forward_convolutional_layer_gpu_cudnn(l, state); } else if (l.type == MAXPOOL) { forward_maxpool_layer_gpu_cuda(l, state); //printf("\n MAXPOOL \t\t l.size = %d \n", l.size); } else if (l.type == ROUTE) { forward_route_layer_gpu_cuda(l, state); //printf("\n ROUTE \n"); } else if (l.type == REORG) { forward_reorg_layer_gpu_cuda(l, state); //printf("\n REORG \n"); } else if (l.type == UPSAMPLE) { forward_upsample_layer_cuda(l, state); //printf("\n UPSAMPLE \n"); } else if (l.type == SHORTCUT) { forward_shortcut_layer_cuda(l, state); //printf("\n SHORTCUT \n"); } else if (l.type == YOLO) { forward_yolo_layer_cuda(l, state); //printf("\n YOLO \n"); } else if (l.type == REGION) { forward_region_layer_gpu_cuda(l, state); //printf("\n REGION \n"); } else { printf("\n layer: %d \n", l.type); } state.input = l.output_gpu; state.input_int8 = l.output_gpu_int8; } } // detect on GPU float *network_predict_gpu_cudnn(network net, float *input) { hipError_t status = hipSetDevice(net.gpu_index); //check_error(status); int size = net.layers[0].inputs * net.batch; network_state state; state.index = 0; state.net = net; //status = hipMalloc((void **)&(state.input), sizeof(float)*size); state.input = net.input_state_gpu; memcpy(net.input_pinned_cpu, input, size * sizeof(float)); status = hipMemcpy(state.input, net.input_pinned_cpu, sizeof(float)*size, hipMemcpyHostToDevice); state.truth = 0; state.train = 0; state.delta = 0; forward_network_gpu_cudnn(net, state); // network on GPU //status = hipFree(state.input); //status = hipFree(state.input_int8); //float *out = get_network_output_gpu(net); int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; layer l = net.layers[i]; if (l.type != REGION && l.type != YOLO) status = hipMemcpy(l.output, l.output_gpu, l.outputs*l.batch * sizeof(float), hipMemcpyDeviceToHost); return l.output; } // detect on GPU float *network_predict_gpu_cudnn_quantized(network net, float *input) { hipError_t status = hipSetDevice(net.gpu_index); //check_error(status); int size = net.layers[0].inputs * net.batch; network_state state; state.index = 0; state.net = net; status = hipMalloc((void **)&(state.input), sizeof(float)*size); memcpy(net.input_pinned_cpu, input, size * sizeof(float)); status = hipMemcpy(state.input, net.input_pinned_cpu, sizeof(float)*size, hipMemcpyHostToDevice); state.truth = 0; state.train = 0; state.delta = 0; forward_network_gpu_cudnn_quantized(net, state); // network on GPU status = hipFree(state.input); //status = hipFree(state.input_int8); //float *out = get_network_output_gpu(net); int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; layer l = net.layers[i]; if (l.type != REGION && l.type != YOLO) status = hipMemcpy(l.output, l.output_gpu, l.outputs*l.batch * sizeof(float), hipMemcpyDeviceToHost); return l.output; } // init weights and cuDNN for quantized IINT8x4 void init_gpu_int8x4(network net) { hipError_t status = hipSetDevice(net.gpu_index); int k; for (k = 0; k < net.n; ++k) { layer &l = net.layers[k]; if (l.type == CONVOLUTIONAL && k > 0) { if (l.weights_int8_gpu == NULL) { size_t const weights_size = l.size*l.size*l.c*l.n; status = hipMalloc((void **)&(l.weights_int8_gpu), sizeof(int8_t)*weights_size); status = hipMalloc((void **)&(l.weights_int8_int8x4_gpu), sizeof(int8_t)*weights_size); status = hipMemcpy(l.weights_int8_gpu, l.weights_int8, sizeof(int8_t)*weights_size, hipMemcpyHostToDevice); // convert weights CUDNN_TENSOR_NCHW -> CUDNN_TENSOR_NCHW_VECT_C cudnnTensorDescriptor_t src_weights_desc; cudnnCreateTensorDescriptor(&src_weights_desc); cudnnSetTensor4dDescriptor(src_weights_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_INT8, l.n, l.c, l.size, l.size); cudnnDataType_t cudnn_data_type = CUDNN_DATA_INT8x4; #if((CUDNN_MAJOR*10 + CUDNN_MINOR) >= 72) //if (l.c % 32 == 0) cudnn_data_type = CUDNN_DATA_INT8x32; // Tensor Cores for INT8 #endif //(CUDNN_MAJOR >= 7.2) cudnnTensorDescriptor_t dst_weights_desc; cudnnCreateTensorDescriptor(&dst_weights_desc); cudnnSetTensor4dDescriptor(dst_weights_desc, CUDNN_TENSOR_NCHW_VECT_C, cudnn_data_type, l.n, l.c, l.size, l.size); float one = 1; float zero = 0; cudnnStatus_t transform_status; transform_status = cudnnTransformTensor( cudnn_handle(), &one, src_weights_desc, l.weights_int8_gpu, &zero, dst_weights_desc, l.weights_int8_int8x4_gpu); checkCUDNN(transform_status); cudnnDestroyTensorDescriptor(src_weights_desc); cudnnDestroyTensorDescriptor(dst_weights_desc); status = hipMalloc((void **)&(l.biases_quant_gpu), sizeof(float)*l.n); status = hipMemcpy(l.biases_quant_gpu, l.biases_quant, sizeof(float)*l.n, hipMemcpyHostToDevice); } } } }
f1b75523cc3d45ecfa5874e2ac7b5182b64fa8ec.cu
#include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h // softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h #include "gpu.h" /* // from: box.h typedef struct { float x, y, w, h; } box; */ // ------------- GPU cuDNN --------------- #define checkCUDNN(status) { \ if (status != CUDNN_STATUS_SUCCESS) { \ printf("CUDNN failure\nError: %d - %s \n", status, cudnnGetErrorString(status)); \ getchar(); \ } \ } // 4 layers in 1: convolution, batch-normalization, BIAS and activation void forward_convolutional_layer_gpu_cudnn(layer l, network_state state) { // XNOR-net if (l.xnor) { if (l.align_bit_weights_gpu && l.c >= 32) { //return; cudaError_t status = cudaSuccess; int input_size = l.c*l.h*l.w*l.batch; int m = l.n; int k = l.size*l.size*l.c; int n = l.out_w*l.out_h; //float * a = l.weights_gpu; int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; size_t t_intput_size = new_ldb * n; size_t t_bit_input_size = t_intput_size / 8;// +1; if (l.c % 32 == 0) { //printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - new XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad); //printf("l.align_workspace_size = %d, (l.c * l.w * l.h) = %d \n", l.align_workspace_size, (l.c * l.w * l.h)); int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; size_t t_intput_size = new_ldb * l.bit_align;// n; size_t t_bit_input_size = t_intput_size / 8;// +1; const int new_c = l.c / 32; repack_input_gpu_bin(state.input, (uint32_t *)l.align_workspace_gpu, l.w, l.h, l.c); im2col_ongpu(l.align_workspace_gpu, new_c, l.h, l.w, l.size, l.stride, l.pad, state.workspace); int new_k = l.size*l.size*l.c / 32; transpose_uint32_gpu((uint32_t *)state.workspace, (uint32_t *)l.transposed_align_workspace_gpu, new_k, n, n, new_ldb); gemm_nn_custom_bin_mean_transposed_gpu(m, n, k, (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY, l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu); } else { //printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - old XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad); int i = 0; im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align); float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size); transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.transposed_align_workspace_gpu, k, n, l.bit_align, new_ldb, 8); gemm_nn_custom_bin_mean_transposed_gpu(m, n, k, (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY, l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu); } //add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); if (l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); return; } if (!l.align_bit_weights_gpu) { binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu); } l.weights_gpu = l.binary_weights_gpu; binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu); state.input = l.binary_input_gpu; } // blas_kernels.cu //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); int size = l.inputs * l.batch; float one = 1; float zero = 0; // cuDNN >= v5.1 cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, state.input, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &zero, l.dstTensorDesc, l.output_gpu); if (l.batch_normalize) { // blas_kernels.cu //normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w); //scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w); } // blas_kernels.cu add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); // blas_kernels.cu if (l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } // 4 layers in 1: convolution, batch-normalization, BIAS and activation void forward_convolutional_layer_gpu_cudnn_quantized(layer l, network_state state) { int i; // blas_kernels.cu //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); int size = l.inputs * l.batch; cudaError_t status; /* static int once = 1; if (once) { //printf(" l.input_quant_multipler = %f \n", l.input_quant_multipler); once = 0; cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign) //cuda_convert_int8_to_f32(state.input_int8, size, state.input, 1.0F / l.input_quant_multipler); } else { //printf(" NEXT!!! \n"); //cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign) cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign) } */ //#if(CUDNN_MAJOR >= 7 ) #define INT8CONV //#endif // #if(CUDNN_MAJOR >= 7 ) #ifdef INT8CONV { float one = 1; float zero = 0; // input cudnnTensorDescriptor_t srcTransformDesc; cudnnCreateTensorDescriptor(&srcTransformDesc); cudnnSetTensor4dDescriptor(srcTransformDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_INT8, l.batch, l.c, l.h, l.w); cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign) //cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign) //printf("\n l.input_quant_multipler = %f \n", l.input_quant_multipler); cudnnStatus_t transform_status = cudnnTransformTensor( cudnn_handle(), &one, srcTransformDesc, state.input_int8, //input_init_int8, &zero, l.srcTensorDesc, state.input); checkCUDNN(transform_status); //float ALPHA1 = l.output_multipler / R_MULT; float ALPHA1 = 1 / (l.input_quant_multipler * l.weights_quant_multipler); //float ALPHA2 = 0; //printf(" ALPHA1 = %f \n", ALPHA1); // x w y and z bias alpha1/alpha2 // X_INT8 X_INT8 X_INT8 X_FLOAT X_FLOAT // y = act ( alpha1 * conv(x) + alpha2 * z + bias ) cudnnStatus_t cudnnstat = cudnnConvolutionBiasActivationForward(cudnn_handle(), &ALPHA1, // ALPHA l.srcTensorDesc, state.input, l.weightDesc, l.weights_int8_int8x4_gpu, //l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &zero, // ALPHA2 l.dstTensorDesc, l.output_gpu, l.biasTensorDesc, l.biases_gpu, l.activationDesc, l.dstTensorDesc, l.output_gpu); /* // cuDNN >= v5.1 cudnnStatus_t cudnnstat = cudnnConvolutionForward(cudnn_handle(), &ALPHA1,//&one, l.srcTensorDesc, state.input, //state.input_int8, // state.input, l.weightDesc, l.weights_int8_int8x4_gpu, //l.weights_int8_gpu, //l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &zero, l.dstTensorDesc, l.output_gpu); */ //printf(" l.w = %d, l.h = %d, l.c = %d, l.n = %d \n", l.w, l.h, l.c, l.n); if (cudnnstat != CUDNN_STATUS_SUCCESS) { if (cudnnstat == CUDNN_STATUS_ARCH_MISMATCH) { printf("\n Error: CUDNN_STATUS_ARCH_MISMATCH - This GPU doesn't support DP4A (INT8 weights and input) \n"); } else if (cudnnstat == CUDNN_STATUS_NOT_SUPPORTED) { printf("\n Error: CUDNN_STATUS_NOT_SUPPORTED (INT8 weights and input) \n"); } else if (cudnnstat == CUDNN_STATUS_BAD_PARAM) { printf("\n Error: CUDNN_STATUS_BAD_PARAM (INT8 weights and input) \n"); } printf("\n cudnnstat = %d \n", cudnnstat); getchar(); } else { //printf("\n cudnnstat == CUDNN_STATUS_SUCCESS \n"); } //status = cudaMemcpy(l.output, l.output_gpu, sizeof(float)*l.outputs*l.batch, cudaMemcpyDeviceToHost); //for (i = 0; i < l.outputs && i < 100; ++i) printf(" %f, ", l.output[i]); //draw_distribution(l.output, l.outputs*l.batch, "Output"); //add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } #else // INT8CONV float one = 1; float zero = 0; // cuDNN >= v5.1 cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, state.input, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &zero, l.dstTensorDesc, l.output_gpu); if (l.batch_normalize) { // blas_kernels.cu //normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w); //scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w); } // blas_kernels.cu add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); #endif // INT8CONV // blas_kernels.cu activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } // MAX pooling layer void forward_maxpool_layer_gpu_cuda(const layer l, network_state state) { // maxpool_layer_kernels.cu forward_maxpool_layer_gpu(l, state); } // route layer void forward_route_layer_gpu_cuda(const layer l, network_state state) { int i, j; int offset = 0; for (i = 0; i < l.n; ++i) { int index = l.input_layers[i]; float *input = state.net.layers[index].output_gpu; int input_size = l.input_sizes[i]; for (j = 0; j < l.batch; ++j) { // CUDA cudaMemcpy(l.output_gpu + offset + j*l.outputs, input + j*input_size, sizeof(float)*input_size, cudaMemcpyDeviceToDevice); } offset += input_size; } } // reorg layer void forward_reorg_layer_gpu_cuda(layer l, network_state state) { // blas_kernels.cu //reorg_ongpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output_gpu); reorg_ongpu(state.input, l.out_w, l.out_h, l.out_c, l.batch, l.stride, 0, l.output_gpu); } // upsample_layer.c void forward_upsample_layer_cuda(const layer l, network_state state) { fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); //printf(" l.reverse = %d \n", l.reverse); if (l.reverse) { upsample_gpu(l.output_gpu, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, state.input); } else { upsample_gpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output_gpu); } } // shortcut_layer.c void forward_shortcut_layer_cuda(const layer l, network_state state) { //copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1); //shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); //activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } // region layer void forward_region_layer_gpu_cuda(const layer l, network_state state) { // blas_kernels.cu flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu); if (l.softmax_tree) { // Yolo 9000 int i; int count = 5; for (i = 0; i < l.softmax_tree->groups; ++i) { int group_size = l.softmax_tree->group_size[i]; // blas_kernels.cu softmax_gpu(l.output_gpu + count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count); count += group_size; } } else if (l.softmax) { // Yolo v2 // blas_kernels.cu softmax_gpu(l.output_gpu + 5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5); } float *in_cpu = (float *)calloc(l.batch*l.inputs, sizeof(float)); float *truth_cpu = 0; if (state.truth) { int num_truth = l.batch*l.truths; truth_cpu = (float *)calloc(num_truth, sizeof(float)); cudaError_t status = cudaMemcpy(state.truth, truth_cpu, num_truth * sizeof(float), cudaMemcpyDeviceToHost); } cudaError_t status = cudaMemcpy(in_cpu, l.output_gpu, l.batch*l.inputs * sizeof(float), cudaMemcpyDeviceToHost); network_state cpu_state = state; cpu_state.train = state.train; cpu_state.truth = truth_cpu; cpu_state.input = in_cpu; int i, b; int size = l.coords + l.classes + 1; memcpy(l.output, cpu_state.input, l.outputs*l.batch * sizeof(float)); for (b = 0; b < l.batch; ++b) { for (i = 0; i < l.h*l.w*l.n; ++i) { int index = size*i + b*l.outputs; float x = l.output[index + 4]; l.output[index + 4] = 1.0F / (1.0F + expf(-x)); // logistic_activate_cpu(l.output[index + 4]); } } free(cpu_state.input); } // yolo_layer.c Yolo v3 void forward_yolo_layer_cuda(const layer l, network_state state) { copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1); int b, n; for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { int index = entry_index(l, b, n*l.w*l.h, 0); activate_array_ongpu(l.output_gpu + index, 2 * l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, 4); activate_array_ongpu(l.output_gpu + index, (1 + l.classes)*l.w*l.h, LOGISTIC); } } cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs); //return; } void forward_network_gpu_cudnn(network net, network_state state) { state.workspace = net.workspace; int i; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { forward_convolutional_layer_gpu_cudnn(l, state); //printf("\n CONVOLUTIONAL \t\t l.size = %d \n", l.size); } else if (l.type == MAXPOOL) { forward_maxpool_layer_gpu_cuda(l, state); //printf("\n MAXPOOL \t\t l.size = %d \n", l.size); } else if (l.type == ROUTE) { forward_route_layer_gpu_cuda(l, state); //printf("\n ROUTE \n"); } else if (l.type == REORG) { forward_reorg_layer_gpu_cuda(l, state); //printf("\n REORG \n"); } else if (l.type == UPSAMPLE) { forward_upsample_layer_cuda(l, state); //printf("\n UPSAMPLE \n"); } else if (l.type == SHORTCUT) { forward_shortcut_layer_cuda(l, state); //printf("\n SHORTCUT \n"); } else if (l.type == YOLO) { forward_yolo_layer_cuda(l, state); //printf("\n YOLO \n"); } else if (l.type == REGION) { forward_region_layer_gpu_cuda(l, state); //printf("\n REGION \n"); } else if (l.type == BLANK) { //printf("\n layer: BLANK - %d \n", i); } else { printf("\n layer: %d \n", l.type); } state.input = l.output_gpu; } } void forward_network_gpu_cudnn_quantized(network net, network_state state) { state.workspace = net.workspace; int i; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { //printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size); //if (l.quantized && i != 80 && i != 92 && i != 104) forward_convolutional_layer_gpu_cudnn_quantized(l, state); // mAP = 0, very strange if (l.quantized) forward_convolutional_layer_gpu_cudnn_quantized(l, state); else forward_convolutional_layer_gpu_cudnn(l, state); } else if (l.type == MAXPOOL) { forward_maxpool_layer_gpu_cuda(l, state); //printf("\n MAXPOOL \t\t l.size = %d \n", l.size); } else if (l.type == ROUTE) { forward_route_layer_gpu_cuda(l, state); //printf("\n ROUTE \n"); } else if (l.type == REORG) { forward_reorg_layer_gpu_cuda(l, state); //printf("\n REORG \n"); } else if (l.type == UPSAMPLE) { forward_upsample_layer_cuda(l, state); //printf("\n UPSAMPLE \n"); } else if (l.type == SHORTCUT) { forward_shortcut_layer_cuda(l, state); //printf("\n SHORTCUT \n"); } else if (l.type == YOLO) { forward_yolo_layer_cuda(l, state); //printf("\n YOLO \n"); } else if (l.type == REGION) { forward_region_layer_gpu_cuda(l, state); //printf("\n REGION \n"); } else { printf("\n layer: %d \n", l.type); } state.input = l.output_gpu; state.input_int8 = l.output_gpu_int8; } } // detect on GPU float *network_predict_gpu_cudnn(network net, float *input) { cudaError_t status = cudaSetDevice(net.gpu_index); //check_error(status); int size = net.layers[0].inputs * net.batch; network_state state; state.index = 0; state.net = net; //status = cudaMalloc((void **)&(state.input), sizeof(float)*size); state.input = net.input_state_gpu; memcpy(net.input_pinned_cpu, input, size * sizeof(float)); status = cudaMemcpy(state.input, net.input_pinned_cpu, sizeof(float)*size, cudaMemcpyHostToDevice); state.truth = 0; state.train = 0; state.delta = 0; forward_network_gpu_cudnn(net, state); // network on GPU //status = cudaFree(state.input); //status = cudaFree(state.input_int8); //float *out = get_network_output_gpu(net); int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; layer l = net.layers[i]; if (l.type != REGION && l.type != YOLO) status = cudaMemcpy(l.output, l.output_gpu, l.outputs*l.batch * sizeof(float), cudaMemcpyDeviceToHost); return l.output; } // detect on GPU float *network_predict_gpu_cudnn_quantized(network net, float *input) { cudaError_t status = cudaSetDevice(net.gpu_index); //check_error(status); int size = net.layers[0].inputs * net.batch; network_state state; state.index = 0; state.net = net; status = cudaMalloc((void **)&(state.input), sizeof(float)*size); memcpy(net.input_pinned_cpu, input, size * sizeof(float)); status = cudaMemcpy(state.input, net.input_pinned_cpu, sizeof(float)*size, cudaMemcpyHostToDevice); state.truth = 0; state.train = 0; state.delta = 0; forward_network_gpu_cudnn_quantized(net, state); // network on GPU status = cudaFree(state.input); //status = cudaFree(state.input_int8); //float *out = get_network_output_gpu(net); int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; layer l = net.layers[i]; if (l.type != REGION && l.type != YOLO) status = cudaMemcpy(l.output, l.output_gpu, l.outputs*l.batch * sizeof(float), cudaMemcpyDeviceToHost); return l.output; } // init weights and cuDNN for quantized IINT8x4 void init_gpu_int8x4(network net) { cudaError_t status = cudaSetDevice(net.gpu_index); int k; for (k = 0; k < net.n; ++k) { layer &l = net.layers[k]; if (l.type == CONVOLUTIONAL && k > 0) { if (l.weights_int8_gpu == NULL) { size_t const weights_size = l.size*l.size*l.c*l.n; status = cudaMalloc((void **)&(l.weights_int8_gpu), sizeof(int8_t)*weights_size); status = cudaMalloc((void **)&(l.weights_int8_int8x4_gpu), sizeof(int8_t)*weights_size); status = cudaMemcpy(l.weights_int8_gpu, l.weights_int8, sizeof(int8_t)*weights_size, cudaMemcpyHostToDevice); // convert weights CUDNN_TENSOR_NCHW -> CUDNN_TENSOR_NCHW_VECT_C cudnnTensorDescriptor_t src_weights_desc; cudnnCreateTensorDescriptor(&src_weights_desc); cudnnSetTensor4dDescriptor(src_weights_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_INT8, l.n, l.c, l.size, l.size); cudnnDataType_t cudnn_data_type = CUDNN_DATA_INT8x4; #if((CUDNN_MAJOR*10 + CUDNN_MINOR) >= 72) //if (l.c % 32 == 0) cudnn_data_type = CUDNN_DATA_INT8x32; // Tensor Cores for INT8 #endif //(CUDNN_MAJOR >= 7.2) cudnnTensorDescriptor_t dst_weights_desc; cudnnCreateTensorDescriptor(&dst_weights_desc); cudnnSetTensor4dDescriptor(dst_weights_desc, CUDNN_TENSOR_NCHW_VECT_C, cudnn_data_type, l.n, l.c, l.size, l.size); float one = 1; float zero = 0; cudnnStatus_t transform_status; transform_status = cudnnTransformTensor( cudnn_handle(), &one, src_weights_desc, l.weights_int8_gpu, &zero, dst_weights_desc, l.weights_int8_int8x4_gpu); checkCUDNN(transform_status); cudnnDestroyTensorDescriptor(src_weights_desc); cudnnDestroyTensorDescriptor(dst_weights_desc); status = cudaMalloc((void **)&(l.biases_quant_gpu), sizeof(float)*l.n); status = cudaMemcpy(l.biases_quant_gpu, l.biases_quant, sizeof(float)*l.n, cudaMemcpyHostToDevice); } } } }
c8afef07c63074ab2551b8ae42da19c0255b44ab.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <iostream> #include <device_launch_parameters.h> #include <opencv2/core.hpp> #include <opencv2/opencv.hpp> #include <opencv2/imgproc.hpp> #include <opencv2/highgui.hpp> #include <utilities.hpp> __global__ void median_filter(float *a,float *b, int N, int M, int win_size) { const int row = blockIdx.x * blockDim.x + threadIdx.x; const int col = blockIdx.y * blockDim.y + threadIdx.y; const int factor = win_size / 2; const float elems = win_size * win_size; if (row < (N - factor) && col < (M - factor) && row >= factor && col >= factor) { float sum = 0.f; for (int i = row - factor; i <= row + factor; ++i) { for (int j = col - factor; j <= col + factor; ++j) { sum += a[i*N + j]; } } b[row * N + col] = sum/ elems; } } __global__ void shared_median_filter(float *a, float *b, int N, int M, int win_size) { __shared__ float img_copy[32][32]; const int row = blockIdx.x * blockDim.x + threadIdx.x; const int col = blockIdx.y * blockDim.y + threadIdx.y; const int grid_i = threadIdx.x; const int grid_j = threadIdx.y; const int factor = win_size / 2; const float elems = win_size * win_size; if (row < N && col < M) { img_copy[grid_i][grid_j] = a[row*N + col]; __syncthreads(); if (grid_i < (32 - factor) && grid_j < (32 - factor) && grid_i >= factor && grid_j >= factor) { float sum = 0.f; for (int i = grid_i - factor; i <= grid_i + factor; ++i) { for (int j = grid_j - factor; j <= grid_j + factor; ++j) { sum += img_copy[i][j]; } } b[row * N + col] = sum / elems; } } } // main routine that executes on the host int main(int argc, char* argv[]) { int filter_size; cv::Mat input_img, filtered_img; if (argc > 2) { int size = atoi(argv[1]); if (size % 2) { filter_size = size; } input_img = cv::imread(argv[2], cv::IMREAD_GRAYSCALE); } else { input_img = cv::imread("../../res/ex_sp.png", cv::IMREAD_GRAYSCALE); filter_size = 3; } float *a_h, *a_d, *b_d; cv::imshow("image unfiltered", input_img); input_img.convertTo(input_img, CV_32FC1); filtered_img.create(cv::Size(input_img.rows, input_img.cols), CV_32FC1); size_t size = input_img.rows * input_img.cols * sizeof(float); //alocare host a_h = (float*)malloc(size); for (int i = 0; i < input_img.rows; ++i) { for (int j = 0; j < input_img.cols; ++j) { a_h[i*input_img.rows + j] = input_img.at<float>(i,j); } } //alocare device hipMalloc((void**)&a_d, size); hipMalloc((void**)&b_d, size); //copiere date pe device hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); //dimensiuni grid si threads const int thread_x = 32; const int thread_y = 32; const int grid_x = input_img.rows / thread_x + input_img.rows % thread_x; const int grid_y = input_img.cols / thread_y + input_img.cols % thread_y; dim3 grid(grid_x,grid_y,1); dim3 thread(thread_y, thread_y,1); //utilities::timeit([&] {median_filter <<< grid, thread >>> (a_d, b_d, input_img.rows, input_img.cols, filter_size); }); utilities::timeit([&] {shared_median_filter << < grid, thread >> > (a_d, b_d, input_img.rows, input_img.cols, filter_size); }); //copiere data pe host hipMemcpy(a_h, b_d, size, hipMemcpyDeviceToHost); for (int i = 0; i < input_img.rows; ++i) { for (int j = 0; j < input_img.cols; ++j) { filtered_img.at<float>(i, j) = a_h[i*input_img.rows + j]; } } filtered_img.convertTo(filtered_img, CV_8UC1); cv::imshow("image filtered", filtered_img); std::stringstream sstream; sstream << "filter_" << filter_size << ".jpg"; cv::imwrite(sstream.str(), filtered_img); cv::waitKey(0); //cuda cleanup free(a_h); hipFree(a_d); hipFree(b_d); return 0; }
c8afef07c63074ab2551b8ae42da19c0255b44ab.cu
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> #include <device_launch_parameters.h> #include <opencv2/core.hpp> #include <opencv2/opencv.hpp> #include <opencv2/imgproc.hpp> #include <opencv2/highgui.hpp> #include <utilities.hpp> __global__ void median_filter(float *a,float *b, int N, int M, int win_size) { const int row = blockIdx.x * blockDim.x + threadIdx.x; const int col = blockIdx.y * blockDim.y + threadIdx.y; const int factor = win_size / 2; const float elems = win_size * win_size; if (row < (N - factor) && col < (M - factor) && row >= factor && col >= factor) { float sum = 0.f; for (int i = row - factor; i <= row + factor; ++i) { for (int j = col - factor; j <= col + factor; ++j) { sum += a[i*N + j]; } } b[row * N + col] = sum/ elems; } } __global__ void shared_median_filter(float *a, float *b, int N, int M, int win_size) { __shared__ float img_copy[32][32]; const int row = blockIdx.x * blockDim.x + threadIdx.x; const int col = blockIdx.y * blockDim.y + threadIdx.y; const int grid_i = threadIdx.x; const int grid_j = threadIdx.y; const int factor = win_size / 2; const float elems = win_size * win_size; if (row < N && col < M) { img_copy[grid_i][grid_j] = a[row*N + col]; __syncthreads(); if (grid_i < (32 - factor) && grid_j < (32 - factor) && grid_i >= factor && grid_j >= factor) { float sum = 0.f; for (int i = grid_i - factor; i <= grid_i + factor; ++i) { for (int j = grid_j - factor; j <= grid_j + factor; ++j) { sum += img_copy[i][j]; } } b[row * N + col] = sum / elems; } } } // main routine that executes on the host int main(int argc, char* argv[]) { int filter_size; cv::Mat input_img, filtered_img; if (argc > 2) { int size = atoi(argv[1]); if (size % 2) { filter_size = size; } input_img = cv::imread(argv[2], cv::IMREAD_GRAYSCALE); } else { input_img = cv::imread("../../res/ex_sp.png", cv::IMREAD_GRAYSCALE); filter_size = 3; } float *a_h, *a_d, *b_d; cv::imshow("image unfiltered", input_img); input_img.convertTo(input_img, CV_32FC1); filtered_img.create(cv::Size(input_img.rows, input_img.cols), CV_32FC1); size_t size = input_img.rows * input_img.cols * sizeof(float); //alocare host a_h = (float*)malloc(size); for (int i = 0; i < input_img.rows; ++i) { for (int j = 0; j < input_img.cols; ++j) { a_h[i*input_img.rows + j] = input_img.at<float>(i,j); } } //alocare device cudaMalloc((void**)&a_d, size); cudaMalloc((void**)&b_d, size); //copiere date pe device cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); //dimensiuni grid si threads const int thread_x = 32; const int thread_y = 32; const int grid_x = input_img.rows / thread_x + input_img.rows % thread_x; const int grid_y = input_img.cols / thread_y + input_img.cols % thread_y; dim3 grid(grid_x,grid_y,1); dim3 thread(thread_y, thread_y,1); //utilities::timeit([&] {median_filter <<< grid, thread >>> (a_d, b_d, input_img.rows, input_img.cols, filter_size); }); utilities::timeit([&] {shared_median_filter << < grid, thread >> > (a_d, b_d, input_img.rows, input_img.cols, filter_size); }); //copiere data pe host cudaMemcpy(a_h, b_d, size, cudaMemcpyDeviceToHost); for (int i = 0; i < input_img.rows; ++i) { for (int j = 0; j < input_img.cols; ++j) { filtered_img.at<float>(i, j) = a_h[i*input_img.rows + j]; } } filtered_img.convertTo(filtered_img, CV_8UC1); cv::imshow("image filtered", filtered_img); std::stringstream sstream; sstream << "filter_" << filter_size << ".jpg"; cv::imwrite(sstream.str(), filtered_img); cv::waitKey(0); //cuda cleanup free(a_h); cudaFree(a_d); cudaFree(b_d); return 0; }
ff19125b74c1478aea6c42999522765cd158b64f.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <cusparse_v2.h> #include "rocblas.h" #include <hiprand/hiprand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" //rough summing kernel (does not need to be efficient) __global__ void kcSumLangevinVars(KC_FP_TYPE * der, KC_FP_TYPE * der_sum, KC_FP_TYPE * G, KC_FP_TYPE * G_sum, KC_FP_TYPE * ll, KC_FP_TYPE * ll_sum, int * mBlkIdx, int NT, int NH, KC_FP_TYPE * gPrior, KC_FP_TYPE * lPrior) { int nsum = blockIdx.x*blockDim.x+threadIdx.x; if(nsum == 0) { for(int idx = 0; idx < NH+2; idx++){ der_sum[idx]=lPrior[idx]; } for(int jj = 0; jj < NH+2; jj++){ for(int idx = 0; idx<NT; idx++){ der_sum[jj] += der[jj*NT+idx]; } } } else if(nsum == 1) { for(int idx = 0; idx < NH+2; idx++) { for(int idx2 = 0; idx2 < NH+2; idx2++) { G_sum[idx+idx2*(NH+2)] = 0; G_sum[idx+idx2*(NH+2)] = gPrior[idx*(NH+2)+idx2]; } } for(int jj = 0; jj < NH+2; jj++) { for(int kk = 0; kk < NH+2; kk++) { for(int idx =0; idx < NT; idx++) { G_sum[jj*(NH+2)+kk] -= G[idx+(jj*(NH+2)+kk)*NT]; } } } } else if(nsum == 2) { ll_sum[0] = 0; for(int idx = 0; idx < NT; idx++) { ll_sum[0] += ll[idx]; } } } //derivates of firing rate function w.r.t. gamma (assuming fixed latent variables) __device__ KC_FP_TYPE h(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE bias, KC_FP_TYPE log_power) { KC_FP_TYPE logex = KC_MAX(KC_MINN,(gamma*lambda>100)?(gamma*lambda):KC_MIN(log1p(exp(lambda*gamma)),KC_MAXN)); return KC_MIN((KC_POW(logex*1.00000,log_power)+bias)*KC_EXP(sh)*dt,KC_MAXN); } __device__ KC_FP_TYPE dhg(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE yh, KC_FP_TYPE log_power) { KC_FP_TYPE logex = KC_MAX(KC_MINN,(gamma*lambda>100)?(gamma*lambda):KC_MIN(log1p(exp(lambda*gamma)),KC_MAXN)); KC_FP_TYPE log_der = lambda/(1+KC_MIN(KC_MAXN,KC_MAX(exp(-lambda*gamma),KC_MINN))); KC_FP_TYPE der = log_power*KC_POW(logex*1.00000,log_power-1.00)*log_der; return der*dt*KC_EXP(sh); } __device__ KC_FP_TYPE dhs(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE yh, KC_FP_TYPE bias, KC_FP_TYPE log_power) { KC_FP_TYPE logex = KC_MAX(KC_MINN,(gamma*lambda>100)?(gamma*lambda):KC_MIN(log1p(exp(lambda*gamma)),KC_MAXN)); KC_FP_TYPE fr = KC_MIN((KC_POW(logex*1.00000,log_power)+bias),KC_MAXN); return dt*yh*KC_EXP(sh)*fr; } // computes log p(single trial | gamma, fixed lambdas, spike history) __global__ void kcBoundaryLikelihoodTrialHist(KC_FP_TYPE * y, KC_FP_TYPE * spe, KC_FP_TYPE * lambdas, int * crossingTimes, int * mBlkIdx, KC_FP_TYPE g, KC_FP_TYPE dt, int NT, KC_FP_TYPE * llSum, KC_FP_TYPE * trialSum, KC_FP_TYPE * trialSumRiemann, KC_FP_TYPE * h_filt, KC_FP_TYPE * y_hist, int NH, KC_FP_TYPE bias, KC_FP_TYPE log_power) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { for(int jj = 0; jj<NH+2; jj++){ trialSum[idx+jj*NT]=0; for(int kk = 0; kk<NH+2; kk++){ trialSumRiemann[idx+(jj*(NH+2)+kk)*NT]=0; } } llSum[idx] = 0; for(int ii = mBlkIdx[idx]; ii < mBlkIdx[idx+1]; ii++) { KC_FP_TYPE trueLambda = fmin(1, ((ii-mBlkIdx[idx]) < crossingTimes[idx])?lambdas[ii]:1); //KC_FP_TYPE trueLambda = fmin(1, lambdas[ii]); KC_FP_TYPE sh = spe[ii]; KC_FP_TYPE r = h(trueLambda,g,1,sh,bias,log_power); llSum[idx] += y[ii]*(KC_LOG(r)+KC_LOG(dt)) - dt*r - KC_GAMMALN(y[ii]+1.0); for(int jj = 0; jj < NH+2 ; jj++) { KC_FP_TYPE yh1 = 0; KC_FP_TYPE dr = 0; // if index is one of the first NH indices of y, the spike history depends on spikes in the time before the analyzed spike train y // in that case, we want the ii - jj spike of the y history if(jj < NH && ii<(mBlkIdx[idx]+jj+1)) { yh1 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - jj-1]; dr = dhs(trueLambda,g,1,sh,yh1,bias,log_power); } else if(jj < NH) { yh1 = y[ii-jj-1]; dr = dhs(trueLambda,g,1,sh,yh1,bias,log_power); } else if(jj == NH) { yh1 = trueLambda; dr = dhg(trueLambda,g,1,sh,yh1,log_power); } else if(jj == NH+1) { dr = KC_EXP(sh); } trialSum[idx+jj*NT] += (y[ii]/r-dt)*dr; //for(int kk = jj+1; kk < NH+2; kk++) for(int kk = 0; kk < NH+2; kk++) { KC_FP_TYPE yh2 = 0; KC_FP_TYPE dr2 = 0; // if index is one of the first NH indices of y, the spike history depends on spikes in the time before the analyzed spike train y // in that case, we want the ii - jj spike of the y history if(kk < NH && ii<(mBlkIdx[idx]+kk+1)) { yh2 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - kk -1]; dr2 = dhs(trueLambda,g,1,sh,yh2,bias,log_power); } else if(kk < NH) { yh2 = y[ii-kk-1]; dr2 = dhs(trueLambda,g,1,sh,yh2,bias,log_power); } else if(kk == NH) { yh2 = trueLambda; dr2 = dhg(trueLambda,g,1,sh,yh2,log_power); } else if(kk == NH+1) { dr2 = KC_EXP(sh); } trialSumRiemann[idx+(NH+2)*NT*jj+NT*kk] += -1*dt*dr*dr2/r; } } } } } //Computes the the log probability of a set of spike trains under the ramping model given a fixed set of latent variable // as a function of \gamma (the bound height) along with first/second derivates w.r.t. \gamma //args // 0 = lambda (latent variables, on GPU. Same size as y) // 1 = auxillary variable - threshold crossing time (latent variable boundary crossing time, on GPU. vector length number of trials: NT) // 2 = y (observations, on GPU) // 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 4 = g (absorbing boundary effective height) // 5 = spe (spike history effect, TT x 1) // 6 = dt (bin size in seconds) // 7 = gPrior (Fisher information of log prior probability of filters and gamma) // 8 = spike history filters // 9 = spike history (spikes before start of trials, NH*NT x 1) // 10 = lPrior (derivative of log prior probability of filters and gamma) // 11 = bias // 12 = power // //outputs (left-hand side) // 0 = log p(y|lambdas,gamma) // 1 = d/dg log p(y|lambdas,gamma) // 2 = d^2/d^2g log p(y|lambdas,gamma) void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { hipError_t ce; //loads up trial information unsigned int TT = kcGetArrayNumEl(prhs[0]); int * crossingTimes = kcGetArrayDataInt(prhs[1]); KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT); int * trIdx = kcGetArrayDataInt(prhs[3]); unsigned int NT = kcGetArrayNumEl(prhs[3])-1; KC_FP_TYPE dt = mxGetScalar(prhs[6]); //loads gamma and latent variables KC_FP_TYPE g = mxGetScalar(prhs[4]); KC_FP_TYPE * lambda = kcGetArrayData(prhs[0]); //loads spike history effect KC_FP_TYPE * spe = kcGetArrayData(prhs[5],TT); int NH = mxGetNumberOfElements(prhs[8]); KC_FP_TYPE bias = mxGetScalar(prhs[11]); KC_FP_TYPE log_power = mxGetScalar(prhs[12]); //loads Fisher information prior if(mxGetClassID(prhs[7]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Prior matrix input wrong floating point type (kcLangevinStep)!"); } KC_FP_TYPE * gPrior; checkCudaErrors(hipMalloc((void**)&gPrior,sizeof(KC_FP_TYPE)*(NH+2)*(NH+2))); checkCudaErrors(hipMemcpy(gPrior,(KC_FP_TYPE*)mxGetPr(prhs[7]),sizeof(KC_FP_TYPE)*((NH+2)*(NH+2)),hipMemcpyHostToDevice)); //loads derivative of log prior probability of parameters if(mxGetClassID(prhs[10]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Prior matrix input wrong floating point type (kcLangevinStep)!"); } KC_FP_TYPE * lPrior; checkCudaErrors(hipMalloc((void**)&lPrior,sizeof(KC_FP_TYPE)*(NH+2))); checkCudaErrors(hipMemcpy(lPrior,(KC_FP_TYPE*)mxGetPr(prhs[10]),sizeof(KC_FP_TYPE)*(NH+2),hipMemcpyHostToDevice)); //loads filter values KC_FP_TYPE * h_filt; checkCudaErrors(hipMalloc((void**)&h_filt,sizeof(KC_FP_TYPE)*NH)); checkCudaErrors(hipMemcpy(h_filt,(KC_FP_TYPE*)mxGetPr(prhs[8]),sizeof(KC_FP_TYPE)*NH,hipMemcpyHostToDevice)); //loads spike history before trials KC_FP_TYPE * y_hist = kcGetArrayData(prhs[9],NH*NT); //sets up space for computations on GPU KC_FP_TYPE * der_log_p_y; checkCudaErrors(hipMalloc((void**)&der_log_p_y,sizeof(KC_FP_TYPE)*(NT)*(NH+2))); KC_FP_TYPE * der_log_p_y_sum; checkCudaErrors(hipMalloc((void**)&der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+2)*1)); KC_FP_TYPE * log_p_y; checkCudaErrors(hipMalloc((void**)&log_p_y,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * log_p_y_sum; checkCudaErrors(hipMalloc((void**)&log_p_y_sum,sizeof(KC_FP_TYPE)*1)); KC_FP_TYPE * G_log_p_y1; checkCudaErrors(hipMalloc((void**)&G_log_p_y1,sizeof(KC_FP_TYPE)*(NT)*(NH+2)*(NH+2))); KC_FP_TYPE * G_log_p_y_sum; checkCudaErrors(hipMalloc((void**)&G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+2)*(NH+2))); //sets up CUDA variables int blockSize = 2; int numBlocks = (int)NT/(int)blockSize + ((NT%blockSize==0)?0:1); //gets each trials likelihood + derivatives of filter hipLaunchKernelGGL(( kcBoundaryLikelihoodTrialHist), dim3(numBlocks),dim3(blockSize) , 0, 0, y,spe,lambda,crossingTimes,trIdx,g,dt, NT,log_p_y,der_log_p_y,G_log_p_y1,h_filt,y_hist,NH,bias,log_power); checkCudaErrors(hipDeviceSynchronize()); //sums up all the trials' likelihoods and derivatives with respect to gamma int nBlocksC = 3; int blockSizeC = 1; hipLaunchKernelGGL(( kcSumLangevinVars) , dim3(nBlocksC),dim3(blockSizeC) , 0, 0, der_log_p_y, der_log_p_y_sum, G_log_p_y1, G_log_p_y_sum, log_p_y, log_p_y_sum, trIdx, NT, NH, gPrior, lPrior); checkCudaErrors(hipDeviceSynchronize()); //pushes answers back to MATLAB if(nlhs > 0) { plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),log_p_y_sum,sizeof(KC_FP_TYPE)*1,hipMemcpyDeviceToHost)); } if(nlhs > 1) { plhs[1] = mxCreateNumericMatrix(NH+2,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+2)*(1),hipMemcpyDeviceToHost)); } if(nlhs > 2) { plhs[2] = mxCreateNumericMatrix(NH+2,NH+2,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[2]),G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+2)*(NH+2),hipMemcpyDeviceToHost)); } //clears up GPU variables checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipFree(log_p_y)); checkCudaErrors(hipFree(log_p_y_sum)); checkCudaErrors(hipFree(der_log_p_y)); checkCudaErrors(hipFree(der_log_p_y_sum)); checkCudaErrors(hipFree(G_log_p_y1)); checkCudaErrors(hipFree(G_log_p_y_sum)); checkCudaErrors(hipFree(h_filt)); checkCudaErrors(hipFree(lPrior)); checkCudaErrors(hipFree(gPrior)); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error at the end of kcLangevinStep.cu "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA errors"); } }
ff19125b74c1478aea6c42999522765cd158b64f.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #include <cusparse_v2.h> #include "cublas_v2.h" #include <curand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" //rough summing kernel (does not need to be efficient) __global__ void kcSumLangevinVars(KC_FP_TYPE * der, KC_FP_TYPE * der_sum, KC_FP_TYPE * G, KC_FP_TYPE * G_sum, KC_FP_TYPE * ll, KC_FP_TYPE * ll_sum, int * mBlkIdx, int NT, int NH, KC_FP_TYPE * gPrior, KC_FP_TYPE * lPrior) { int nsum = blockIdx.x*blockDim.x+threadIdx.x; if(nsum == 0) { for(int idx = 0; idx < NH+2; idx++){ der_sum[idx]=lPrior[idx]; } for(int jj = 0; jj < NH+2; jj++){ for(int idx = 0; idx<NT; idx++){ der_sum[jj] += der[jj*NT+idx]; } } } else if(nsum == 1) { for(int idx = 0; idx < NH+2; idx++) { for(int idx2 = 0; idx2 < NH+2; idx2++) { G_sum[idx+idx2*(NH+2)] = 0; G_sum[idx+idx2*(NH+2)] = gPrior[idx*(NH+2)+idx2]; } } for(int jj = 0; jj < NH+2; jj++) { for(int kk = 0; kk < NH+2; kk++) { for(int idx =0; idx < NT; idx++) { G_sum[jj*(NH+2)+kk] -= G[idx+(jj*(NH+2)+kk)*NT]; } } } } else if(nsum == 2) { ll_sum[0] = 0; for(int idx = 0; idx < NT; idx++) { ll_sum[0] += ll[idx]; } } } //derivates of firing rate function w.r.t. gamma (assuming fixed latent variables) __device__ KC_FP_TYPE h(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE bias, KC_FP_TYPE log_power) { KC_FP_TYPE logex = KC_MAX(KC_MINN,(gamma*lambda>100)?(gamma*lambda):KC_MIN(log1p(exp(lambda*gamma)),KC_MAXN)); return KC_MIN((KC_POW(logex*1.00000,log_power)+bias)*KC_EXP(sh)*dt,KC_MAXN); } __device__ KC_FP_TYPE dhg(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE yh, KC_FP_TYPE log_power) { KC_FP_TYPE logex = KC_MAX(KC_MINN,(gamma*lambda>100)?(gamma*lambda):KC_MIN(log1p(exp(lambda*gamma)),KC_MAXN)); KC_FP_TYPE log_der = lambda/(1+KC_MIN(KC_MAXN,KC_MAX(exp(-lambda*gamma),KC_MINN))); KC_FP_TYPE der = log_power*KC_POW(logex*1.00000,log_power-1.00)*log_der; return der*dt*KC_EXP(sh); } __device__ KC_FP_TYPE dhs(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE yh, KC_FP_TYPE bias, KC_FP_TYPE log_power) { KC_FP_TYPE logex = KC_MAX(KC_MINN,(gamma*lambda>100)?(gamma*lambda):KC_MIN(log1p(exp(lambda*gamma)),KC_MAXN)); KC_FP_TYPE fr = KC_MIN((KC_POW(logex*1.00000,log_power)+bias),KC_MAXN); return dt*yh*KC_EXP(sh)*fr; } // computes log p(single trial | gamma, fixed lambdas, spike history) __global__ void kcBoundaryLikelihoodTrialHist(KC_FP_TYPE * y, KC_FP_TYPE * spe, KC_FP_TYPE * lambdas, int * crossingTimes, int * mBlkIdx, KC_FP_TYPE g, KC_FP_TYPE dt, int NT, KC_FP_TYPE * llSum, KC_FP_TYPE * trialSum, KC_FP_TYPE * trialSumRiemann, KC_FP_TYPE * h_filt, KC_FP_TYPE * y_hist, int NH, KC_FP_TYPE bias, KC_FP_TYPE log_power) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { for(int jj = 0; jj<NH+2; jj++){ trialSum[idx+jj*NT]=0; for(int kk = 0; kk<NH+2; kk++){ trialSumRiemann[idx+(jj*(NH+2)+kk)*NT]=0; } } llSum[idx] = 0; for(int ii = mBlkIdx[idx]; ii < mBlkIdx[idx+1]; ii++) { KC_FP_TYPE trueLambda = fmin(1, ((ii-mBlkIdx[idx]) < crossingTimes[idx])?lambdas[ii]:1); //KC_FP_TYPE trueLambda = fmin(1, lambdas[ii]); KC_FP_TYPE sh = spe[ii]; KC_FP_TYPE r = h(trueLambda,g,1,sh,bias,log_power); llSum[idx] += y[ii]*(KC_LOG(r)+KC_LOG(dt)) - dt*r - KC_GAMMALN(y[ii]+1.0); for(int jj = 0; jj < NH+2 ; jj++) { KC_FP_TYPE yh1 = 0; KC_FP_TYPE dr = 0; // if index is one of the first NH indices of y, the spike history depends on spikes in the time before the analyzed spike train y // in that case, we want the ii - jj spike of the y history if(jj < NH && ii<(mBlkIdx[idx]+jj+1)) { yh1 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - jj-1]; dr = dhs(trueLambda,g,1,sh,yh1,bias,log_power); } else if(jj < NH) { yh1 = y[ii-jj-1]; dr = dhs(trueLambda,g,1,sh,yh1,bias,log_power); } else if(jj == NH) { yh1 = trueLambda; dr = dhg(trueLambda,g,1,sh,yh1,log_power); } else if(jj == NH+1) { dr = KC_EXP(sh); } trialSum[idx+jj*NT] += (y[ii]/r-dt)*dr; //for(int kk = jj+1; kk < NH+2; kk++) for(int kk = 0; kk < NH+2; kk++) { KC_FP_TYPE yh2 = 0; KC_FP_TYPE dr2 = 0; // if index is one of the first NH indices of y, the spike history depends on spikes in the time before the analyzed spike train y // in that case, we want the ii - jj spike of the y history if(kk < NH && ii<(mBlkIdx[idx]+kk+1)) { yh2 = y_hist[NH*(idx+1) + (ii-mBlkIdx[idx]) - kk -1]; dr2 = dhs(trueLambda,g,1,sh,yh2,bias,log_power); } else if(kk < NH) { yh2 = y[ii-kk-1]; dr2 = dhs(trueLambda,g,1,sh,yh2,bias,log_power); } else if(kk == NH) { yh2 = trueLambda; dr2 = dhg(trueLambda,g,1,sh,yh2,log_power); } else if(kk == NH+1) { dr2 = KC_EXP(sh); } trialSumRiemann[idx+(NH+2)*NT*jj+NT*kk] += -1*dt*dr*dr2/r; } } } } } //Computes the the log probability of a set of spike trains under the ramping model given a fixed set of latent variable // as a function of \gamma (the bound height) along with first/second derivates w.r.t. \gamma //args // 0 = lambda (latent variables, on GPU. Same size as y) // 1 = auxillary variable - threshold crossing time (latent variable boundary crossing time, on GPU. vector length number of trials: NT) // 2 = y (observations, on GPU) // 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 4 = g (absorbing boundary effective height) // 5 = spe (spike history effect, TT x 1) // 6 = dt (bin size in seconds) // 7 = gPrior (Fisher information of log prior probability of filters and gamma) // 8 = spike history filters // 9 = spike history (spikes before start of trials, NH*NT x 1) // 10 = lPrior (derivative of log prior probability of filters and gamma) // 11 = bias // 12 = power // //outputs (left-hand side) // 0 = log p(y|lambdas,gamma) // 1 = d/dg log p(y|lambdas,gamma) // 2 = d^2/d^2g log p(y|lambdas,gamma) void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { cudaError_t ce; //loads up trial information unsigned int TT = kcGetArrayNumEl(prhs[0]); int * crossingTimes = kcGetArrayDataInt(prhs[1]); KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT); int * trIdx = kcGetArrayDataInt(prhs[3]); unsigned int NT = kcGetArrayNumEl(prhs[3])-1; KC_FP_TYPE dt = mxGetScalar(prhs[6]); //loads gamma and latent variables KC_FP_TYPE g = mxGetScalar(prhs[4]); KC_FP_TYPE * lambda = kcGetArrayData(prhs[0]); //loads spike history effect KC_FP_TYPE * spe = kcGetArrayData(prhs[5],TT); int NH = mxGetNumberOfElements(prhs[8]); KC_FP_TYPE bias = mxGetScalar(prhs[11]); KC_FP_TYPE log_power = mxGetScalar(prhs[12]); //loads Fisher information prior if(mxGetClassID(prhs[7]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Prior matrix input wrong floating point type (kcLangevinStep)!"); } KC_FP_TYPE * gPrior; checkCudaErrors(cudaMalloc((void**)&gPrior,sizeof(KC_FP_TYPE)*(NH+2)*(NH+2))); checkCudaErrors(cudaMemcpy(gPrior,(KC_FP_TYPE*)mxGetPr(prhs[7]),sizeof(KC_FP_TYPE)*((NH+2)*(NH+2)),cudaMemcpyHostToDevice)); //loads derivative of log prior probability of parameters if(mxGetClassID(prhs[10]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Prior matrix input wrong floating point type (kcLangevinStep)!"); } KC_FP_TYPE * lPrior; checkCudaErrors(cudaMalloc((void**)&lPrior,sizeof(KC_FP_TYPE)*(NH+2))); checkCudaErrors(cudaMemcpy(lPrior,(KC_FP_TYPE*)mxGetPr(prhs[10]),sizeof(KC_FP_TYPE)*(NH+2),cudaMemcpyHostToDevice)); //loads filter values KC_FP_TYPE * h_filt; checkCudaErrors(cudaMalloc((void**)&h_filt,sizeof(KC_FP_TYPE)*NH)); checkCudaErrors(cudaMemcpy(h_filt,(KC_FP_TYPE*)mxGetPr(prhs[8]),sizeof(KC_FP_TYPE)*NH,cudaMemcpyHostToDevice)); //loads spike history before trials KC_FP_TYPE * y_hist = kcGetArrayData(prhs[9],NH*NT); //sets up space for computations on GPU KC_FP_TYPE * der_log_p_y; checkCudaErrors(cudaMalloc((void**)&der_log_p_y,sizeof(KC_FP_TYPE)*(NT)*(NH+2))); KC_FP_TYPE * der_log_p_y_sum; checkCudaErrors(cudaMalloc((void**)&der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+2)*1)); KC_FP_TYPE * log_p_y; checkCudaErrors(cudaMalloc((void**)&log_p_y,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * log_p_y_sum; checkCudaErrors(cudaMalloc((void**)&log_p_y_sum,sizeof(KC_FP_TYPE)*1)); KC_FP_TYPE * G_log_p_y1; checkCudaErrors(cudaMalloc((void**)&G_log_p_y1,sizeof(KC_FP_TYPE)*(NT)*(NH+2)*(NH+2))); KC_FP_TYPE * G_log_p_y_sum; checkCudaErrors(cudaMalloc((void**)&G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+2)*(NH+2))); //sets up CUDA variables int blockSize = 2; int numBlocks = (int)NT/(int)blockSize + ((NT%blockSize==0)?0:1); //gets each trials likelihood + derivatives of filter kcBoundaryLikelihoodTrialHist<<< numBlocks,blockSize >>>(y,spe,lambda,crossingTimes,trIdx,g,dt, NT,log_p_y,der_log_p_y,G_log_p_y1,h_filt,y_hist,NH,bias,log_power); checkCudaErrors(cudaDeviceSynchronize()); //sums up all the trials' likelihoods and derivatives with respect to gamma int nBlocksC = 3; int blockSizeC = 1; kcSumLangevinVars <<< nBlocksC,blockSizeC >>> (der_log_p_y, der_log_p_y_sum, G_log_p_y1, G_log_p_y_sum, log_p_y, log_p_y_sum, trIdx, NT, NH, gPrior, lPrior); checkCudaErrors(cudaDeviceSynchronize()); //pushes answers back to MATLAB if(nlhs > 0) { plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),log_p_y_sum,sizeof(KC_FP_TYPE)*1,cudaMemcpyDeviceToHost)); } if(nlhs > 1) { plhs[1] = mxCreateNumericMatrix(NH+2,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),der_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+2)*(1),cudaMemcpyDeviceToHost)); } if(nlhs > 2) { plhs[2] = mxCreateNumericMatrix(NH+2,NH+2,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[2]),G_log_p_y_sum,sizeof(KC_FP_TYPE)*(NH+2)*(NH+2),cudaMemcpyDeviceToHost)); } //clears up GPU variables checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaFree(log_p_y)); checkCudaErrors(cudaFree(log_p_y_sum)); checkCudaErrors(cudaFree(der_log_p_y)); checkCudaErrors(cudaFree(der_log_p_y_sum)); checkCudaErrors(cudaFree(G_log_p_y1)); checkCudaErrors(cudaFree(G_log_p_y_sum)); checkCudaErrors(cudaFree(h_filt)); checkCudaErrors(cudaFree(lPrior)); checkCudaErrors(cudaFree(gPrior)); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error at the end of kcLangevinStep.cu "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA errors"); } }
aa2de045f4cb478b151fa29eb906d6bfbef367d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> void checkCUDAError(const char*); // Sup N es potencia de dos #define N 64 #define ARR_SIZE N*N #define NUM_BLOCKS N/4 #define THREADS_PER_BLOCK N*2 __global__ void matrix_mult(int* M_d, int* N_d, int* P_d, int N) { int idx = threadIdx.x + blockIdx.x*blockDim; int idy = threadIdx.y + blockIdx.y*blockDim; int k; int aux = 0; if(idx < N){ for(k = 0; k < ARR_SIZE; k ++){ int m_element, n_element; m_element = M_d[idy*N + k]; // Cada id para cada rengln, por eso idy*N. n_element = M_d[k*N + idx]; // Los elementos estn colocados por row major por eso idx*N. aux += n_element*m_element; } P_d[idy*N + idx] = aux; } } int main(int argc, char *argv[]) { hipEvent_t start, stop; float time; int* M_h, N_h, P_h; // Matrices en el host. int* M_d, N_d, P_d; // Matrices en el device. // Tamao de la matriz. size_t sz = N * N * sizeof(int); // Alojar espacio en el host. h_a = (int *) malloc(sz); h_b = (int *) malloc(sz); h_c = (int *) malloc(sz); // Alojar espacio en el device. hipMalloc((void**) &M_d, sz); hipMalloc((void**) &N_d, sz); hipMalloc((void**) &P_d, sz); // Create timer for timing CUDA calculation //PPunsigned int timer = 0; //PPcutCreateTimer( &timer ); hipEventCreate(&start); hipEventCreate(&stop); // Valores iniciales a las matrices. for (i = 0; i < ARR_SIZE; i++) { M_h[i] = rand()%255; N_h[i] = rand()%255; P_h[i] = 0; } // Copiar del CPU al GPU hipMemcpy(M_d, M_h, sz, hipMemcpyHostToDevice); hipMemcpy(N_d, N_h, sz, hipMemcpyHostToDevice); hipMemcpy(P_d, P_h, sz, hipMemcpyHostToDevice); // Dimensiones para ejecutar el kernel dim3 dimBlock(THREADS_PER_BLOCK, THREADS_PER_BLOCK); dim3 dimGrid(ARR_SIZE/THREADS_PER_BLOCK, ARR_SIZE/THREADS_PER_BLOCK); hipEventRecord(start,0); // Ejecutar kernel hipLaunchKernelGGL(( matrix_mult), dim3(dimGrid),dim3(dimBlock), 0, 0, M_d, N_d, P_d, N); hipDeviceSynchronize(); checkCUDAError("kernel invocation"); // Copiar del GPU al CPU hipMemcpy(P_h, P_d, sz, hipMemcpyDeviceToHost); checkCUDAError("memcpy"); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &time, start, stop ); printf("\nTIEMPO DE EJECUCIN: %f mSeg\n\n", time); // Liberar memoria en el GPU hipFree(M_d); hipFree(N_d); hipFree(P_d); // Liberar memoria en el CPU free(M_h); free(N_h); free(P_h); return 0; } /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
aa2de045f4cb478b151fa29eb906d6bfbef367d0.cu
#include <stdio.h> #include <stdlib.h> void checkCUDAError(const char*); // Sup N es potencia de dos #define N 64 #define ARR_SIZE N*N #define NUM_BLOCKS N/4 #define THREADS_PER_BLOCK N*2 __global__ void matrix_mult(int* M_d, int* N_d, int* P_d, int N) { int idx = threadIdx.x + blockIdx.x*blockDim; int idy = threadIdx.y + blockIdx.y*blockDim; int k; int aux = 0; if(idx < N){ for(k = 0; k < ARR_SIZE; k ++){ int m_element, n_element; m_element = M_d[idy*N + k]; // Cada id para cada renglón, por eso idy*N. n_element = M_d[k*N + idx]; // Los elementos están colocados por row major por eso idx*N. aux += n_element*m_element; } P_d[idy*N + idx] = aux; } } int main(int argc, char *argv[]) { cudaEvent_t start, stop; float time; int* M_h, N_h, P_h; // Matrices en el host. int* M_d, N_d, P_d; // Matrices en el device. // Tamaño de la matriz. size_t sz = N * N * sizeof(int); // Alojar espacio en el host. h_a = (int *) malloc(sz); h_b = (int *) malloc(sz); h_c = (int *) malloc(sz); // Alojar espacio en el device. cudaMalloc((void**) &M_d, sz); cudaMalloc((void**) &N_d, sz); cudaMalloc((void**) &P_d, sz); // Create timer for timing CUDA calculation //PPunsigned int timer = 0; //PPcutCreateTimer( &timer ); cudaEventCreate(&start); cudaEventCreate(&stop); // Valores iniciales a las matrices. for (i = 0; i < ARR_SIZE; i++) { M_h[i] = rand()%255; N_h[i] = rand()%255; P_h[i] = 0; } // Copiar del CPU al GPU cudaMemcpy(M_d, M_h, sz, cudaMemcpyHostToDevice); cudaMemcpy(N_d, N_h, sz, cudaMemcpyHostToDevice); cudaMemcpy(P_d, P_h, sz, cudaMemcpyHostToDevice); // Dimensiones para ejecutar el kernel dim3 dimBlock(THREADS_PER_BLOCK, THREADS_PER_BLOCK); dim3 dimGrid(ARR_SIZE/THREADS_PER_BLOCK, ARR_SIZE/THREADS_PER_BLOCK); cudaEventRecord(start,0); // Ejecutar kernel matrix_mult<<<dimGrid,dimBlock>>>(M_d, N_d, P_d, N); cudaThreadSynchronize(); checkCUDAError("kernel invocation"); // Copiar del GPU al CPU cudaMemcpy(P_h, P_d, sz, cudaMemcpyDeviceToHost); checkCUDAError("memcpy"); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &time, start, stop ); printf("\nTIEMPO DE EJECUCIÓN: %f mSeg\n\n", time); // Liberar memoria en el GPU cudaFree(M_d); cudaFree(N_d); cudaFree(P_d); // Liberar memoria en el CPU free(M_h); free(N_h); free(P_h); return 0; } /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
db9eecf1486237a854f130a514fcb13b960064a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel_z3 [4][2]; static int dims_advec_mom_kernel_z3_h [4][2] = {0}; //user function __device__ inline void advec_mom_kernel_z3_gpu(ACC<double> &pre_vol, ACC<double> &post_vol, const ACC<double> &volume, const ACC<double> &vol_flux_z) { post_vol(0,0,0) = volume(0,0,0); pre_vol(0,0,0) = post_vol(0,0,0) + vol_flux_z(0,0,1) - vol_flux_z(0,0,0); } __global__ void ops_advec_mom_kernel_z3( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[0][0] * dims_advec_mom_kernel_z3[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[1][0] * dims_advec_mom_kernel_z3[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[2][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[2][0] * dims_advec_mom_kernel_z3[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[3][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[3][0] * dims_advec_mom_kernel_z3[3][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_advec_mom_kernel_z3[0][0], dims_advec_mom_kernel_z3[0][1], arg0); ACC<double> argp1(dims_advec_mom_kernel_z3[1][0], dims_advec_mom_kernel_z3[1][1], arg1); const ACC<double> argp2(dims_advec_mom_kernel_z3[2][0], dims_advec_mom_kernel_z3[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel_z3[3][0], dims_advec_mom_kernel_z3[3][1], arg3); advec_mom_kernel_z3_gpu(argp0, argp1, argp2, argp3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel_z3_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,125)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(125,"advec_mom_kernel_z3"); OPS_kernels[125].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != dims_advec_mom_kernel_z3_h[0][0] || ydim0 != dims_advec_mom_kernel_z3_h[0][1] || xdim1 != dims_advec_mom_kernel_z3_h[1][0] || ydim1 != dims_advec_mom_kernel_z3_h[1][1] || xdim2 != dims_advec_mom_kernel_z3_h[2][0] || ydim2 != dims_advec_mom_kernel_z3_h[2][1] || xdim3 != dims_advec_mom_kernel_z3_h[3][0] || ydim3 != dims_advec_mom_kernel_z3_h[3][1]) { dims_advec_mom_kernel_z3_h[0][0] = xdim0; dims_advec_mom_kernel_z3_h[0][1] = ydim0; dims_advec_mom_kernel_z3_h[1][0] = xdim1; dims_advec_mom_kernel_z3_h[1][1] = ydim1; dims_advec_mom_kernel_z3_h[2][0] = xdim2; dims_advec_mom_kernel_z3_h[2][1] = ydim2; dims_advec_mom_kernel_z3_h[3][0] = xdim3; dims_advec_mom_kernel_z3_h[3][1] = ydim3; cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel_z3, dims_advec_mom_kernel_z3_h, sizeof(dims_advec_mom_kernel_z3))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[125].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel_z3), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[125].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[125].mpi_time += t2-t1; OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 125; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 125; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel_z3_execute; if (OPS_diags > 1) { ops_timing_realloc(125,"advec_mom_kernel_z3"); } ops_enqueue_kernel(desc); } #endif
db9eecf1486237a854f130a514fcb13b960064a4.cu
// // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel_z3 [4][2]; static int dims_advec_mom_kernel_z3_h [4][2] = {0}; //user function __device__ inline void advec_mom_kernel_z3_gpu(ACC<double> &pre_vol, ACC<double> &post_vol, const ACC<double> &volume, const ACC<double> &vol_flux_z) { post_vol(0,0,0) = volume(0,0,0); pre_vol(0,0,0) = post_vol(0,0,0) + vol_flux_z(0,0,1) - vol_flux_z(0,0,0); } __global__ void ops_advec_mom_kernel_z3( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[0][0] * dims_advec_mom_kernel_z3[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[1][0] * dims_advec_mom_kernel_z3[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[2][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[2][0] * dims_advec_mom_kernel_z3[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[3][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[3][0] * dims_advec_mom_kernel_z3[3][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_advec_mom_kernel_z3[0][0], dims_advec_mom_kernel_z3[0][1], arg0); ACC<double> argp1(dims_advec_mom_kernel_z3[1][0], dims_advec_mom_kernel_z3[1][1], arg1); const ACC<double> argp2(dims_advec_mom_kernel_z3[2][0], dims_advec_mom_kernel_z3[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel_z3[3][0], dims_advec_mom_kernel_z3[3][1], arg3); advec_mom_kernel_z3_gpu(argp0, argp1, argp2, argp3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel_z3_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,125)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(125,"advec_mom_kernel_z3"); OPS_kernels[125].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != dims_advec_mom_kernel_z3_h[0][0] || ydim0 != dims_advec_mom_kernel_z3_h[0][1] || xdim1 != dims_advec_mom_kernel_z3_h[1][0] || ydim1 != dims_advec_mom_kernel_z3_h[1][1] || xdim2 != dims_advec_mom_kernel_z3_h[2][0] || ydim2 != dims_advec_mom_kernel_z3_h[2][1] || xdim3 != dims_advec_mom_kernel_z3_h[3][0] || ydim3 != dims_advec_mom_kernel_z3_h[3][1]) { dims_advec_mom_kernel_z3_h[0][0] = xdim0; dims_advec_mom_kernel_z3_h[0][1] = ydim0; dims_advec_mom_kernel_z3_h[1][0] = xdim1; dims_advec_mom_kernel_z3_h[1][1] = ydim1; dims_advec_mom_kernel_z3_h[2][0] = xdim2; dims_advec_mom_kernel_z3_h[2][1] = ydim2; dims_advec_mom_kernel_z3_h[3][0] = xdim3; dims_advec_mom_kernel_z3_h[3][1] = ydim3; cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel_z3, dims_advec_mom_kernel_z3_h, sizeof(dims_advec_mom_kernel_z3))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[125].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel_z3<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[125].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[125].mpi_time += t2-t1; OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 125; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 125; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel_z3_execute; if (OPS_diags > 1) { ops_timing_realloc(125,"advec_mom_kernel_z3"); } ops_enqueue_kernel(desc); } #endif
f1b88e7d283f3670faedf58d11e2dcddece2eafd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (C) 2016 Yusuke Suzuki <[email protected]> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <gloop/statistics.h> #include "mergesort_inlines.cuh" #include "mergesort.cuh" static __global__ void mergepackKernel(float* orig, float* result) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int division = blockIdx.y; if ((finalStartAddr[division] + idx) >= finalStartAddr[division + 1]) return; result[finalStartAddr[division] + idx] = orig[constStartAddr[division] * 4 + nullElems[division] + idx]; } void mergepack(Context*, dim3 grid, dim3 threads, float* d_resultList, float* d_origList) { gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope; hipLaunchKernelGGL(( mergepackKernel), dim3(grid), dim3(threads), 0, 0, d_resultList, d_origList); hipDeviceSynchronize(); }
f1b88e7d283f3670faedf58d11e2dcddece2eafd.cu
/* Copyright (C) 2016 Yusuke Suzuki <[email protected]> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <gloop/statistics.h> #include "mergesort_inlines.cuh" #include "mergesort.cuh" static __global__ void mergepackKernel(float* orig, float* result) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int division = blockIdx.y; if ((finalStartAddr[division] + idx) >= finalStartAddr[division + 1]) return; result[finalStartAddr[division] + idx] = orig[constStartAddr[division] * 4 + nullElems[division] + idx]; } void mergepack(Context*, dim3 grid, dim3 threads, float* d_resultList, float* d_origList) { gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope; mergepackKernel<<<grid, threads>>>(d_resultList, d_origList); cudaThreadSynchronize(); }
ca4c7c898c4db9b591a23f222f479d0336d0c970.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #define DATATYPE int #define SMEMSIZE 1024 #define REP 128 //#define conflictnum 32 __global__ void global_broadcast(double *time,const DATATYPE *in1,const DATATYPE *in2,DATATYPE *out,int its, int conflictnum) { DATATYPE p,q=(threadIdx.x/conflictnum*conflictnum); double time_tmp=0.0; unsigned int start_time=0,stop_time=0; unsigned int i,j; for (i=0;i<its;i++) { __syncthreads(); start_time=clock(); #pragma unroll for (j=0;j<REP;j++) { p=in1[q]; q=in2[p]; } stop_time=clock(); time_tmp+=(stop_time-start_time); } time_tmp=time_tmp/REP/its; out[blockDim.x*blockIdx.x+threadIdx.x] = p+q; time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp; } int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2, int conflictnum) { int its=30; DATATYPE *d_in1,*d_in2; hipMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE); hipMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE); hipMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice); hipMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice); double *h_time,*d_time; DATATYPE *d_out; h_time=(double*)malloc(sizeof(double)*blocks*threads); hipMalloc((void**)&d_time,sizeof(double)*blocks*threads); hipMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads); hipLaunchKernelGGL(( global_broadcast), dim3(blocks),dim3(threads), 0, 0, d_time,d_in1,d_in1,d_out,its, conflictnum); hipMemcpy(h_time,d_time,sizeof(double)*blocks*threads,hipMemcpyDeviceToHost); double avert=0.0,maxt=0.0,mint=99999.9; int nn=0; for (int i=0;i<blocks;i++) { for (int j=0;j<threads;j+=32) { avert+=h_time[i*threads+j]; nn++; if (maxt<h_time[i*threads+j]) { maxt=h_time[i*threads+j]; } if (mint>h_time[i*threads+j]) { mint=h_time[i*threads+j]; } } } avert/=nn; printf("%d\t%d\t\t%f\t%f\t%f\n", blocks,threads,avert,mint,maxt); hipFree(d_time); hipFree(d_out); hipFree(d_in1); hipFree(d_in2); free(h_time); return 0; } void init_order(DATATYPE *a,int n) { for (int i=0;i<n;i++) { a[i]=i; } } int main(int argc, char* argv[]) { if (argc != 2) { printf("%s <conflictnum> \n", argv[0]); } else { int value = atoi(argv[1]); DATATYPE *h_in1; h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE); init_order(h_in1, SMEMSIZE); printf("blocks\t threads\t aver \t min \t max \t(clocks)\n"); int blocks = 1; for (int j = 0; j <= 512; j += 32) { int threads = (j == 0 ? 1 : j); main_test(blocks, threads, h_in1, h_in1, value); } free(h_in1); } return 0; }
ca4c7c898c4db9b591a23f222f479d0336d0c970.cu
#include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #define DATATYPE int #define SMEMSIZE 1024 #define REP 128 //#define conflictnum 32 __global__ void global_broadcast(double *time,const DATATYPE *in1,const DATATYPE *in2,DATATYPE *out,int its, int conflictnum) { DATATYPE p,q=(threadIdx.x/conflictnum*conflictnum); double time_tmp=0.0; unsigned int start_time=0,stop_time=0; unsigned int i,j; for (i=0;i<its;i++) { __syncthreads(); start_time=clock(); #pragma unroll for (j=0;j<REP;j++) { p=in1[q]; q=in2[p]; } stop_time=clock(); time_tmp+=(stop_time-start_time); } time_tmp=time_tmp/REP/its; out[blockDim.x*blockIdx.x+threadIdx.x] = p+q; time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp; } int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2, int conflictnum) { int its=30; DATATYPE *d_in1,*d_in2; cudaMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE); cudaMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE); cudaMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice); cudaMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice); double *h_time,*d_time; DATATYPE *d_out; h_time=(double*)malloc(sizeof(double)*blocks*threads); cudaMalloc((void**)&d_time,sizeof(double)*blocks*threads); cudaMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads); global_broadcast<<<blocks,threads>>>(d_time,d_in1,d_in1,d_out,its, conflictnum); cudaMemcpy(h_time,d_time,sizeof(double)*blocks*threads,cudaMemcpyDeviceToHost); double avert=0.0,maxt=0.0,mint=99999.9; int nn=0; for (int i=0;i<blocks;i++) { for (int j=0;j<threads;j+=32) { avert+=h_time[i*threads+j]; nn++; if (maxt<h_time[i*threads+j]) { maxt=h_time[i*threads+j]; } if (mint>h_time[i*threads+j]) { mint=h_time[i*threads+j]; } } } avert/=nn; printf("%d\t%d\t\t%f\t%f\t%f\n", blocks,threads,avert,mint,maxt); cudaFree(d_time); cudaFree(d_out); cudaFree(d_in1); cudaFree(d_in2); free(h_time); return 0; } void init_order(DATATYPE *a,int n) { for (int i=0;i<n;i++) { a[i]=i; } } int main(int argc, char* argv[]) { if (argc != 2) { printf("%s <conflictnum> \n", argv[0]); } else { int value = atoi(argv[1]); DATATYPE *h_in1; h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE); init_order(h_in1, SMEMSIZE); printf("blocks\t threads\t aver \t min \t max \t(clocks)\n"); int blocks = 1; for (int j = 0; j <= 512; j += 32) { int threads = (j == 0 ? 1 : j); main_test(blocks, threads, h_in1, h_in1, value); } free(h_in1); } return 0; }
5cf06c4560a9e959177ce650401541b44894e3dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define NUM_BLOCKS 1 #define BLOCK_WIDTH 256 __global__ void hello() { printf("Hello world! I'm thread %d\n", threadIdx.x); } int main(int argc,char **argv) { // launch the kernel hipLaunchKernelGGL(( hello), dim3(NUM_BLOCKS), dim3(BLOCK_WIDTH), 0, 0, ); // force the printf()s to flush hipDeviceSynchronize(); printf("That's all!\n"); return 0; }
5cf06c4560a9e959177ce650401541b44894e3dd.cu
#include <stdio.h> #define NUM_BLOCKS 1 #define BLOCK_WIDTH 256 __global__ void hello() { printf("Hello world! I'm thread %d\n", threadIdx.x); } int main(int argc,char **argv) { // launch the kernel hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>(); // force the printf()s to flush cudaDeviceSynchronize(); printf("That's all!\n"); return 0; }
9f0b12592f5801cdff92c32f66b0142dd8724578.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "crypto.h" __device__ void * mymemset ( void * ptr, int value, size_t num ) { size_t i; for (i = 0; i < num; i++) ((char*) ptr)[i] = value; return ptr; } #define memset mymemset __device__ void * mymemcpy ( void * destination, const void * source, size_t num ) { size_t i; for (i = 0; i < num; i++) ((char*) destination)[i] = ((char*) source)[i]; return destination; } #define memcpy mymemcpy __device__ void crypto_open (PCRYPTO_INFO cryptoInfo) { memset (cryptoInfo, 0, sizeof (CRYPTO_INFO)); cryptoInfo->ea = -1; } __device__ void crypto_loadkey (PKEY_INFO keyInfo, char *lpszUserKey, int nUserKeyLen) { keyInfo->keyLength = nUserKeyLen; burn (keyInfo->userKey, sizeof (keyInfo->userKey)); memcpy (keyInfo->userKey, lpszUserKey, nUserKeyLen); } __device__ int get_pkcs5_iteration_count (int pkcs5_prf_id, BOOL bBoot) { switch (pkcs5_prf_id) { case RIPEMD160: return (bBoot ? 1000 : 2000); case SHA512: return 1000; case SHA1: // Deprecated/legacy return 2000; case WHIRLPOOL: return 1000; default: TC_THROW_FATAL_EXCEPTION; // Unknown/wrong ID } return 0; } __device__ void RMD160Init (RMD160_CTX *ctx) { ctx->count = 0; ctx->state[0] = 0x67452301; ctx->state[1] = 0xefcdab89; ctx->state[2] = 0x98badcfe; ctx->state[3] = 0x10325476; ctx->state[4] = 0xc3d2e1f0; } __device__ word32 rotlFixed (word32 x, unsigned int y) { return (word32)((x<<y) | (x>>(sizeof(word32)*8-y))); } #define F(x, y, z) (x ^ y ^ z) #define G(x, y, z) (z ^ (x & (y^z))) #define H(x, y, z) (z ^ (x | ~y)) #define I(x, y, z) (y ^ (z & (x^y))) #define J(x, y, z) (x ^ (y | ~z)) #define k0 0UL #define k1 0x5a827999UL #define k2 0x6ed9eba1UL #define k3 0x8f1bbcdcUL #define k4 0xa953fd4eUL #define k5 0x50a28be6UL #define k6 0x5c4dd124UL #define k7 0x6d703ef3UL #define k8 0x7a6d76e9UL #define k9 0UL #define Subround(f, a, b, c, d, e, x, s, k) \ a += f(b, c, d) + x + k; \ a = rotlFixed((word32)a, s) + e;\ c = rotlFixed((word32)c, 10U) __device__ void RMD160Transform (u32 *digest, const u32 *data) { const word32 *X = data; word32 a1, b1, c1, d1, e1, a2, b2, c2, d2, e2; a1 = a2 = digest[0]; b1 = b2 = digest[1]; c1 = c2 = digest[2]; d1 = d2 = digest[3]; e1 = e2 = digest[4]; Subround(F, a1, b1, c1, d1, e1, X[ 0], 11, k0); Subround(F, e1, a1, b1, c1, d1, X[ 1], 14, k0); Subround(F, d1, e1, a1, b1, c1, X[ 2], 15, k0); Subround(F, c1, d1, e1, a1, b1, X[ 3], 12, k0); Subround(F, b1, c1, d1, e1, a1, X[ 4], 5, k0); Subround(F, a1, b1, c1, d1, e1, X[ 5], 8, k0); Subround(F, e1, a1, b1, c1, d1, X[ 6], 7, k0); Subround(F, d1, e1, a1, b1, c1, X[ 7], 9, k0); Subround(F, c1, d1, e1, a1, b1, X[ 8], 11, k0); Subround(F, b1, c1, d1, e1, a1, X[ 9], 13, k0); Subround(F, a1, b1, c1, d1, e1, X[10], 14, k0); Subround(F, e1, a1, b1, c1, d1, X[11], 15, k0); Subround(F, d1, e1, a1, b1, c1, X[12], 6, k0); Subround(F, c1, d1, e1, a1, b1, X[13], 7, k0); Subround(F, b1, c1, d1, e1, a1, X[14], 9, k0); Subround(F, a1, b1, c1, d1, e1, X[15], 8, k0); Subround(G, e1, a1, b1, c1, d1, X[ 7], 7, k1); Subround(G, d1, e1, a1, b1, c1, X[ 4], 6, k1); Subround(G, c1, d1, e1, a1, b1, X[13], 8, k1); Subround(G, b1, c1, d1, e1, a1, X[ 1], 13, k1); Subround(G, a1, b1, c1, d1, e1, X[10], 11, k1); Subround(G, e1, a1, b1, c1, d1, X[ 6], 9, k1); Subround(G, d1, e1, a1, b1, c1, X[15], 7, k1); Subround(G, c1, d1, e1, a1, b1, X[ 3], 15, k1); Subround(G, b1, c1, d1, e1, a1, X[12], 7, k1); Subround(G, a1, b1, c1, d1, e1, X[ 0], 12, k1); Subround(G, e1, a1, b1, c1, d1, X[ 9], 15, k1); Subround(G, d1, e1, a1, b1, c1, X[ 5], 9, k1); Subround(G, c1, d1, e1, a1, b1, X[ 2], 11, k1); Subround(G, b1, c1, d1, e1, a1, X[14], 7, k1); Subround(G, a1, b1, c1, d1, e1, X[11], 13, k1); Subround(G, e1, a1, b1, c1, d1, X[ 8], 12, k1); Subround(H, d1, e1, a1, b1, c1, X[ 3], 11, k2); Subround(H, c1, d1, e1, a1, b1, X[10], 13, k2); Subround(H, b1, c1, d1, e1, a1, X[14], 6, k2); Subround(H, a1, b1, c1, d1, e1, X[ 4], 7, k2); Subround(H, e1, a1, b1, c1, d1, X[ 9], 14, k2); Subround(H, d1, e1, a1, b1, c1, X[15], 9, k2); Subround(H, c1, d1, e1, a1, b1, X[ 8], 13, k2); Subround(H, b1, c1, d1, e1, a1, X[ 1], 15, k2); Subround(H, a1, b1, c1, d1, e1, X[ 2], 14, k2); Subround(H, e1, a1, b1, c1, d1, X[ 7], 8, k2); Subround(H, d1, e1, a1, b1, c1, X[ 0], 13, k2); Subround(H, c1, d1, e1, a1, b1, X[ 6], 6, k2); Subround(H, b1, c1, d1, e1, a1, X[13], 5, k2); Subround(H, a1, b1, c1, d1, e1, X[11], 12, k2); Subround(H, e1, a1, b1, c1, d1, X[ 5], 7, k2); Subround(H, d1, e1, a1, b1, c1, X[12], 5, k2); Subround(I, c1, d1, e1, a1, b1, X[ 1], 11, k3); Subround(I, b1, c1, d1, e1, a1, X[ 9], 12, k3); Subround(I, a1, b1, c1, d1, e1, X[11], 14, k3); Subround(I, e1, a1, b1, c1, d1, X[10], 15, k3); Subround(I, d1, e1, a1, b1, c1, X[ 0], 14, k3); Subround(I, c1, d1, e1, a1, b1, X[ 8], 15, k3); Subround(I, b1, c1, d1, e1, a1, X[12], 9, k3); Subround(I, a1, b1, c1, d1, e1, X[ 4], 8, k3); Subround(I, e1, a1, b1, c1, d1, X[13], 9, k3); Subround(I, d1, e1, a1, b1, c1, X[ 3], 14, k3); Subround(I, c1, d1, e1, a1, b1, X[ 7], 5, k3); Subround(I, b1, c1, d1, e1, a1, X[15], 6, k3); Subround(I, a1, b1, c1, d1, e1, X[14], 8, k3); Subround(I, e1, a1, b1, c1, d1, X[ 5], 6, k3); Subround(I, d1, e1, a1, b1, c1, X[ 6], 5, k3); Subround(I, c1, d1, e1, a1, b1, X[ 2], 12, k3); Subround(J, b1, c1, d1, e1, a1, X[ 4], 9, k4); Subround(J, a1, b1, c1, d1, e1, X[ 0], 15, k4); Subround(J, e1, a1, b1, c1, d1, X[ 5], 5, k4); Subround(J, d1, e1, a1, b1, c1, X[ 9], 11, k4); Subround(J, c1, d1, e1, a1, b1, X[ 7], 6, k4); Subround(J, b1, c1, d1, e1, a1, X[12], 8, k4); Subround(J, a1, b1, c1, d1, e1, X[ 2], 13, k4); Subround(J, e1, a1, b1, c1, d1, X[10], 12, k4); Subround(J, d1, e1, a1, b1, c1, X[14], 5, k4); Subround(J, c1, d1, e1, a1, b1, X[ 1], 12, k4); Subround(J, b1, c1, d1, e1, a1, X[ 3], 13, k4); Subround(J, a1, b1, c1, d1, e1, X[ 8], 14, k4); Subround(J, e1, a1, b1, c1, d1, X[11], 11, k4); Subround(J, d1, e1, a1, b1, c1, X[ 6], 8, k4); Subround(J, c1, d1, e1, a1, b1, X[15], 5, k4); Subround(J, b1, c1, d1, e1, a1, X[13], 6, k4); Subround(J, a2, b2, c2, d2, e2, X[ 5], 8, k5); Subround(J, e2, a2, b2, c2, d2, X[14], 9, k5); Subround(J, d2, e2, a2, b2, c2, X[ 7], 9, k5); Subround(J, c2, d2, e2, a2, b2, X[ 0], 11, k5); Subround(J, b2, c2, d2, e2, a2, X[ 9], 13, k5); Subround(J, a2, b2, c2, d2, e2, X[ 2], 15, k5); Subround(J, e2, a2, b2, c2, d2, X[11], 15, k5); Subround(J, d2, e2, a2, b2, c2, X[ 4], 5, k5); Subround(J, c2, d2, e2, a2, b2, X[13], 7, k5); Subround(J, b2, c2, d2, e2, a2, X[ 6], 7, k5); Subround(J, a2, b2, c2, d2, e2, X[15], 8, k5); Subround(J, e2, a2, b2, c2, d2, X[ 8], 11, k5); Subround(J, d2, e2, a2, b2, c2, X[ 1], 14, k5); Subround(J, c2, d2, e2, a2, b2, X[10], 14, k5); Subround(J, b2, c2, d2, e2, a2, X[ 3], 12, k5); Subround(J, a2, b2, c2, d2, e2, X[12], 6, k5); Subround(I, e2, a2, b2, c2, d2, X[ 6], 9, k6); Subround(I, d2, e2, a2, b2, c2, X[11], 13, k6); Subround(I, c2, d2, e2, a2, b2, X[ 3], 15, k6); Subround(I, b2, c2, d2, e2, a2, X[ 7], 7, k6); Subround(I, a2, b2, c2, d2, e2, X[ 0], 12, k6); Subround(I, e2, a2, b2, c2, d2, X[13], 8, k6); Subround(I, d2, e2, a2, b2, c2, X[ 5], 9, k6); Subround(I, c2, d2, e2, a2, b2, X[10], 11, k6); Subround(I, b2, c2, d2, e2, a2, X[14], 7, k6); Subround(I, a2, b2, c2, d2, e2, X[15], 7, k6); Subround(I, e2, a2, b2, c2, d2, X[ 8], 12, k6); Subround(I, d2, e2, a2, b2, c2, X[12], 7, k6); Subround(I, c2, d2, e2, a2, b2, X[ 4], 6, k6); Subround(I, b2, c2, d2, e2, a2, X[ 9], 15, k6); Subround(I, a2, b2, c2, d2, e2, X[ 1], 13, k6); Subround(I, e2, a2, b2, c2, d2, X[ 2], 11, k6); Subround(H, d2, e2, a2, b2, c2, X[15], 9, k7); Subround(H, c2, d2, e2, a2, b2, X[ 5], 7, k7); Subround(H, b2, c2, d2, e2, a2, X[ 1], 15, k7); Subround(H, a2, b2, c2, d2, e2, X[ 3], 11, k7); Subround(H, e2, a2, b2, c2, d2, X[ 7], 8, k7); Subround(H, d2, e2, a2, b2, c2, X[14], 6, k7); Subround(H, c2, d2, e2, a2, b2, X[ 6], 6, k7); Subround(H, b2, c2, d2, e2, a2, X[ 9], 14, k7); Subround(H, a2, b2, c2, d2, e2, X[11], 12, k7); Subround(H, e2, a2, b2, c2, d2, X[ 8], 13, k7); Subround(H, d2, e2, a2, b2, c2, X[12], 5, k7); Subround(H, c2, d2, e2, a2, b2, X[ 2], 14, k7); Subround(H, b2, c2, d2, e2, a2, X[10], 13, k7); Subround(H, a2, b2, c2, d2, e2, X[ 0], 13, k7); Subround(H, e2, a2, b2, c2, d2, X[ 4], 7, k7); Subround(H, d2, e2, a2, b2, c2, X[13], 5, k7); Subround(G, c2, d2, e2, a2, b2, X[ 8], 15, k8); Subround(G, b2, c2, d2, e2, a2, X[ 6], 5, k8); Subround(G, a2, b2, c2, d2, e2, X[ 4], 8, k8); Subround(G, e2, a2, b2, c2, d2, X[ 1], 11, k8); Subround(G, d2, e2, a2, b2, c2, X[ 3], 14, k8); Subround(G, c2, d2, e2, a2, b2, X[11], 14, k8); Subround(G, b2, c2, d2, e2, a2, X[15], 6, k8); Subround(G, a2, b2, c2, d2, e2, X[ 0], 14, k8); Subround(G, e2, a2, b2, c2, d2, X[ 5], 6, k8); Subround(G, d2, e2, a2, b2, c2, X[12], 9, k8); Subround(G, c2, d2, e2, a2, b2, X[ 2], 12, k8); Subround(G, b2, c2, d2, e2, a2, X[13], 9, k8); Subround(G, a2, b2, c2, d2, e2, X[ 9], 12, k8); Subround(G, e2, a2, b2, c2, d2, X[ 7], 5, k8); Subround(G, d2, e2, a2, b2, c2, X[10], 15, k8); Subround(G, c2, d2, e2, a2, b2, X[14], 8, k8); Subround(F, b2, c2, d2, e2, a2, X[12], 8, k9); Subround(F, a2, b2, c2, d2, e2, X[15], 5, k9); Subround(F, e2, a2, b2, c2, d2, X[10], 12, k9); Subround(F, d2, e2, a2, b2, c2, X[ 4], 9, k9); Subround(F, c2, d2, e2, a2, b2, X[ 1], 12, k9); Subround(F, b2, c2, d2, e2, a2, X[ 5], 5, k9); Subround(F, a2, b2, c2, d2, e2, X[ 8], 14, k9); Subround(F, e2, a2, b2, c2, d2, X[ 7], 6, k9); Subround(F, d2, e2, a2, b2, c2, X[ 6], 8, k9); Subround(F, c2, d2, e2, a2, b2, X[ 2], 13, k9); Subround(F, b2, c2, d2, e2, a2, X[13], 6, k9); Subround(F, a2, b2, c2, d2, e2, X[14], 5, k9); Subround(F, e2, a2, b2, c2, d2, X[ 0], 15, k9); Subround(F, d2, e2, a2, b2, c2, X[ 3], 13, k9); Subround(F, c2, d2, e2, a2, b2, X[ 9], 11, k9); Subround(F, b2, c2, d2, e2, a2, X[11], 11, k9); c1 = digest[1] + c1 + d2; digest[1] = digest[2] + d1 + e2; digest[2] = digest[3] + e1 + a2; digest[3] = digest[4] + a1 + b2; digest[4] = digest[0] + b1 + c2; digest[0] = c1; } #undef k1 #undef k2 #undef k3 // Update context to reflect the concatenation of another buffer full // of bytes. __device__ void RMD160Update (RMD160_CTX *ctx, const unsigned char *input, u32 lenArg) { uint64 len = lenArg, have, need; // Check how many bytes we already have and how many more we need. have = ctx->count >> 3; have &= (RIPEMD160_BLOCK_LENGTH - 1); need = RIPEMD160_BLOCK_LENGTH - have; // Update bitcount. ctx->count += len << 3; if (len >= need) { if (have != 0) { memcpy (ctx->buffer + have, input, (size_t) need); RMD160Transform ((uint32 *) ctx->state, (const uint32 *) ctx->buffer); input += need; len -= need; have = 0; } // Process data in RIPEMD160_BLOCK_LENGTH-byte chunks. while (len >= RIPEMD160_BLOCK_LENGTH) { RMD160Transform ((uint32 *) ctx->state, (const uint32 *) input); input += RIPEMD160_BLOCK_LENGTH; len -= RIPEMD160_BLOCK_LENGTH; } } // Handle any remaining bytes of data. if (len != 0) memcpy (ctx->buffer + have, input, (size_t) len); } // Pad pad to 64-byte boundary with the bit pattern // 1 0* (64-bit count of bits processed, MSB-first) __device__ void RMD160Pad(RMD160_CTX *ctx) { byte count[8]; uint32 padlen; // Convert count to 8 bytes in little endian order. PUT_64BIT_LE(count, ctx->count); // Pad out to 56 mod 64. padlen = RIPEMD160_BLOCK_LENGTH - (uint32)((ctx->count >> 3) & (RIPEMD160_BLOCK_LENGTH - 1)); if (padlen < 1 + 8) padlen += RIPEMD160_BLOCK_LENGTH; RMD160Update(ctx, PADDING, padlen - 8); // padlen - 8 <= 64 RMD160Update(ctx, count, 8); } // Final wrapup--call RMD160Pad, fill in digest and zero out ctx. __device__ void RMD160Final(unsigned char *digest, RMD160_CTX *ctx) { int i; RMD160Pad(ctx); if (digest) { for (i = 0; i < 5; i++) PUT_32BIT_LE(digest + i * 4, ctx->state[i]); memset (ctx, 0, sizeof(*ctx)); } } #define k_ipad td->k_ipad #define k_opad td->k_opad __device__ void hmac_ripemd160 (char *key, int keylen, char *input, int len, char *digest, PTHREAD_DATA td) { RMD160_CTX context; unsigned char tk[RIPEMD160_DIGESTSIZE]; int i; // If the key is longer than the hash algorithm block size, // let key = ripemd160(key), as per HMAC specifications. if (keylen > RIPEMD160_BLOCKSIZE) { RMD160_CTX tctx; RMD160Init(&tctx); RMD160Update(&tctx, (const unsigned char *) key, keylen); RMD160Final(tk, &tctx); key = (char *) tk; keylen = RIPEMD160_DIGESTSIZE; burn (&tctx, sizeof(tctx)); // Prevent leaks } // RMD160(K XOR opad, RMD160(K XOR ipad, text)) // where K is an n byte key // ipad is the byte 0x36 repeated RIPEMD160_BLOCKSIZE times // opad is the byte 0x5c repeated RIPEMD160_BLOCKSIZE times // and text is the data being protected */ // start out by storing key in pads memset(k_ipad, 54U, 64); memset(k_opad, 92U, 64); // XOR key with ipad and opad values for (i=0; i<keylen; i++) { k_ipad[i] ^= (unsigned char)key[i]; k_opad[i] ^= (unsigned char)key[i]; } // perform inner RIPEMD-160 RMD160Init(&context); // init context for 1st pass RMD160Update(&context, k_ipad, RIPEMD160_BLOCKSIZE); // start with inner pad RMD160Update(&context, (const unsigned char *) input, len); // then text of datagram RMD160Final((unsigned char *) digest, &context); // finish up 1st pass // perform outer RIPEMD-160 RMD160Init(&context); // init context for 2nd pass RMD160Update(&context, k_opad, RIPEMD160_BLOCKSIZE); // start with outer pad // results of 1st hash RMD160Update(&context, (const unsigned char *) digest, RIPEMD160_DIGESTSIZE); RMD160Final((unsigned char *) digest, &context); // finish up 2nd pass // Prevent possible leaks. burn (tk, sizeof(tk)); burn (&context, sizeof(context)); } #undef k_ipad #undef k_opad __device__ void derive_u_ripemd160 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *u, int b, PTHREAD_DATA td) { char j[RIPEMD160_DIGESTSIZE], k[RIPEMD160_DIGESTSIZE]; char init[128]; char counter[4]; int c, i; // iteration 1 memset (counter, 0, 4); counter[3] = (char) b; memcpy (init, salt, salt_len); // salt memcpy (&init[salt_len], counter, 4); // big-endian block number // remaining iterations for (c = 0; c < iterations; c++) { hmac_ripemd160 (pwd, pwd_len, !c ? init : j, !c ? salt_len + 4 : RIPEMD160_DIGESTSIZE, !c ? j : k, td); if (!c) memcpy (u, j, RIPEMD160_DIGESTSIZE); else for (i = 0; i < RIPEMD160_DIGESTSIZE; i++) { u[i] ^= k[i]; j[i] = k[i]; } } // Prevent possible leaks. burn (j, sizeof(j)); burn (k, sizeof(k)); } __device__ void derive_key_ripemd160 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *dk, int dklen, PTHREAD_DATA td) { char u[RIPEMD160_DIGESTSIZE]; int b, l, r; if (dklen % RIPEMD160_DIGESTSIZE) { l = 1 + dklen / RIPEMD160_DIGESTSIZE; } else { l = dklen / RIPEMD160_DIGESTSIZE; } r = dklen - (l - 1) * RIPEMD160_DIGESTSIZE; // first l - 1 blocks for (b = 1; b <= l; b++) { derive_u_ripemd160 (pwd, pwd_len, salt, salt_len, iterations, u, b, td); if (b < l) { memcpy (dk, u, RIPEMD160_DIGESTSIZE); dk += RIPEMD160_DIGESTSIZE; } } // last block memcpy (dk, u, r); // Prevent possible leaks. burn (u, sizeof(u)); } __device__ int EAGetFirst () { return 1; } __device__ int EAGetNext (int previousEA) { int id = previousEA + 1; if (EncryptionAlgorithms[id].Ciphers[0] != 0) return id; return 0; } // Returns the first mode of operation of EA __device__ int EAGetFirstMode (int ea) { return (EncryptionAlgorithms[ea].Modes[0]); } __device__ int EAGetNextMode (int ea, int previousModeId) { int c, i = 0; while (c = EncryptionAlgorithms[ea].Modes[i++]) { if (c == previousModeId) return EncryptionAlgorithms[ea].Modes[i]; } return 0; } // Returns TRUE if the mode of operation is supported for the encryption algorithm __device__ BOOL EAIsModeSupported (int ea, int testedMode) { int mode; for (mode = EAGetFirstMode (ea); mode != 0; mode = EAGetNextMode (ea, mode)) { if (mode == testedMode) return TRUE; } return FALSE; } __device__ int EAGetFirstCipher (int ea) { return EncryptionAlgorithms[ea].Ciphers[0]; } __device__ int EAGetNextCipher (int ea, int previousCipherId) { int c, i = 0; while (c = EncryptionAlgorithms[ea].Ciphers[i++]) { if (c == previousCipherId) return EncryptionAlgorithms[ea].Ciphers[i]; } return 0; } // Returns number of ciphers in EA __device__ int EAGetCipherCount (int ea) { int i = 0; while (EncryptionAlgorithms[ea].Ciphers[i++]); return i - 1; } __device__ int EAGetLastCipher (int ea) { int i = 0; while (EncryptionAlgorithms[ea].Ciphers[i++]); return EncryptionAlgorithms[ea].Ciphers[i - 2]; } __device__ int EAGetPreviousCipher (int ea, int previousCipherId) { int c, i = 0; if (EncryptionAlgorithms[ea].Ciphers[i++] == previousCipherId) return 0; while (c = EncryptionAlgorithms[ea].Ciphers[i++]) { if (c == previousCipherId) return EncryptionAlgorithms[ea].Ciphers[i - 2]; } return 0; } __device__ const Cipher *CipherGet (int id) { int i; for (i = 0; Ciphers[i].Id != 0; i++) if (Ciphers[i].Id == id) return &Ciphers[i]; return NULL; } __device__ int CipherGetKeySize (int cipherId) { return CipherGet (cipherId) -> KeySize; } // Returns sum of key sizes of all ciphers of the EA (in bytes) __device__ int EAGetKeySize (int ea) { int i = EAGetFirstCipher (ea); int size = CipherGetKeySize (i); while (i = EAGetNextCipher (ea, i)) { size += CipherGetKeySize (i); } return size; } // Returns the largest key size needed by an EA for the specified mode of operation __device__ int EAGetLargestKeyForMode (int mode) { int ea, key = 0; for (ea = EAGetFirst (); ea != 0; ea = EAGetNext (ea)) { if (!EAIsModeSupported (ea, mode)) continue; if (EAGetKeySize (ea) >= key) key = EAGetKeySize (ea); } return key; } __device__ int GetMaxPkcs5OutSize (void) { int size = 32; size = max (size, EAGetLargestKeyForMode (XTS) * 2); // Sizes of primary + secondary keys size = max (size, LEGACY_VOL_IV_SIZE + EAGetLargestKeyForMode (LRW)); // Deprecated/legacy size = max (size, LEGACY_VOL_IV_SIZE + EAGetLargestKeyForMode (CBC)); // Deprecated/legacy size = max (size, LEGACY_VOL_IV_SIZE + EAGetLargestKeyForMode (OUTER_CBC)); // Deprecated/legacy size = max (size, LEGACY_VOL_IV_SIZE + EAGetLargestKeyForMode (INNER_CBC)); // Deprecated/legacy return size; } #define rotl32(x,n) (((x) << n) | ((x) >> (32 - n))) #define rotr32(x,n) (((x) >> n) | ((x) << (32 - n))) #define rotr64(x,n) (((x) >> n) | ((x) << (64 - n))) #define bswap_32(x) ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00)) #define bswap_64(x) (((uint_64t)(bswap_32((uint_32t)(x)))) << 32 | bswap_32((uint_32t)((x) >> 32))) #define bsw_32(p,n) \ { int _i = (n); while(_i--) ((sha1_32t*)p)[_i] = bswap_32(((sha1_32t*)p)[_i]); } #define bsw_64(p,n) \ { int _i = (n); while(_i--) ((uint_64t*)p)[_i] = bswap_64(((uint_64t*)p)[_i]); } #define s_0(x) (rotr64((x), 28) ^ rotr64((x), 34) ^ rotr64((x), 39)) #define s_1(x) (rotr64((x), 14) ^ rotr64((x), 18) ^ rotr64((x), 41)) #define g_0(x) (rotr64((x), 1) ^ rotr64((x), 8) ^ ((x) >> 7)) #define g_1(x) (rotr64((x), 19) ^ rotr64((x), 61) ^ ((x) >> 6)) #define k_0 k512 #define ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) #define parity(x,y,z) ((x) ^ (y) ^ (z)) #define maj(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y)))) // round transforms for SHA256 and SHA512 compression functions #define vf(n,i) v[(n - i) & 7] #define hf(i) (p[i & 15] += \ g_1(p[(i + 14) & 15]) + p[(i + 9) & 15] + g_0(p[(i + 1) & 15])) #define v_cycle(i,j) \ vf(7,i) += (j ? hf(i) : p[i]) + k_0[i+j] \ + s_1(vf(4,i)) + ch(vf(4,i),vf(5,i),vf(6,i)); \ vf(3,i) += vf(7,i); \ vf(7,i) += s_0(vf(0,i))+ maj(vf(0,i),vf(1,i),vf(2,i)) __device__ VOID_RETURN sha512_compile(sha512_ctx ctx[1]) { uint_64t v[8], *p = ctx->wbuf; uint_32t j; memcpy(v, ctx->hash, 8 * sizeof(uint_64t)); for(j = 0; j < 80; j += 16) { v_cycle( 0, j); v_cycle( 1, j); v_cycle( 2, j); v_cycle( 3, j); v_cycle( 4, j); v_cycle( 5, j); v_cycle( 6, j); v_cycle( 7, j); v_cycle( 8, j); v_cycle( 9, j); v_cycle(10, j); v_cycle(11, j); v_cycle(12, j); v_cycle(13, j); v_cycle(14, j); v_cycle(15, j); } ctx->hash[0] += v[0]; ctx->hash[1] += v[1]; ctx->hash[2] += v[2]; ctx->hash[3] += v[3]; ctx->hash[4] += v[4]; ctx->hash[5] += v[5]; ctx->hash[6] += v[6]; ctx->hash[7] += v[7]; } __device__ void sha_end2(unsigned char hval[], sha512_ctx ctx[1], const unsigned int hlen) { uint_32t i = (uint_32t)(ctx->count[0] & SHA512_MASK); /* put bytes in the buffer in an order in which references to */ /* 32-bit words will put bytes with lower addresses into the */ /* top of 32 bit words on BOTH big and little endian machines */ bsw_64(ctx->wbuf, (i + 7) >> 3); /* we now need to mask valid bytes and add the padding which is */ /* a single 1 bit and as many zero bits as necessary. Note that */ /* we can always add the first padding byte here because the */ /* buffer always has at least one empty slot */ ctx->wbuf[i >> 3] &= li_64(ffffffffffffff00) << 8 * (~i & 7); ctx->wbuf[i >> 3] |= li_64(0000000000000080) << 8 * (~i & 7); /* we need 17 or more empty byte positions, one for the padding */ /* byte (above) and sixteen for the length count. If there is */ /* not enough space pad and empty the buffer */ if(i > SHA512_BLOCK_SIZE - 17) { if(i < 120) ctx->wbuf[15] = 0; sha512_compile(ctx); i = 0; } else i = (i >> 3) + 1; while(i < 14) ctx->wbuf[i++] = 0; /* the following 64-bit length fields are assembled in the */ /* wrong byte order on little endian machines but this is */ /* corrected later since they are only ever used as 64-bit */ /* word values. */ ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 61); ctx->wbuf[15] = ctx->count[0] << 3; sha512_compile(ctx); /* extract the hash value as bytes in case the hash buffer is */ /* misaligned for 32-bit words */ for(i = 0; i < hlen; ++i) hval[i] = (unsigned char)(ctx->hash[i >> 3] >> (8 * (~i & 7))); } __device__ VOID_RETURN sha512_begin(sha512_ctx ctx[1]) { ctx->count[0] = ctx->count[1] = 0; memcpy(ctx->hash, i512, 8 * sizeof(uint_64t)); } __device__ VOID_RETURN sha512_end(unsigned char hval[], sha512_ctx ctx[1]) { sha_end2(hval, ctx, SHA512_DIGEST_SIZE); } /* Compile 128 bytes of hash data into SHA256 digest value */ /* NOTE: this routine assumes that the byte order in the */ /* ctx->wbuf[] at this point is in such an order that low */ /* address bytes in the ORIGINAL byte stream placed in this */ /* buffer will now go to the high end of words on BOTH big */ /* and little endian systems */ __device__ VOID_RETURN sha512_hash(const unsigned char data[], unsigned long len, sha512_ctx ctx[1]) { uint_32t pos = (uint_32t)(ctx->count[0] & SHA512_MASK), space = SHA512_BLOCK_SIZE - pos; const unsigned char *sp = data; if((ctx->count[0] += len) < len) ++(ctx->count[1]); while(len >= space) /* tranfer whole blocks while possible */ { memcpy(((unsigned char*)ctx->wbuf) + pos, sp, space); sp += space; len -= space; space = SHA512_BLOCK_SIZE; pos = 0; bsw_64(ctx->wbuf, SHA512_BLOCK_SIZE >> 3); sha512_compile(ctx); } memcpy(((unsigned char*)ctx->wbuf) + pos, sp, len); } __device__ void hmac_truncate ( char *d1, /* data to be truncated */ char *d2, /* truncated data */ int len /* length in bytes to keep */ ) { int i; for (i = 0; i < len; i++) d2[i] = d1[i]; } __device__ void hmac_sha512 ( char *k, /* secret key */ int lk, /* length of the key in bytes */ char *d, /* data */ int ld, /* length of data in bytes */ char *out, /* output buffer, at least "t" bytes */ int t ) { sha512_ctx ictx, octx; char isha[SHA512_DIGESTSIZE], osha[SHA512_DIGESTSIZE]; char key[SHA512_DIGESTSIZE]; char buf[SHA512_BLOCKSIZE]; int i; /* If the key is longer than the hash algorithm block size, let key = sha512(key), as per HMAC specifications. */ if (lk > SHA512_BLOCKSIZE) { sha512_ctx tctx; sha512_begin (&tctx); sha512_hash ((unsigned char *) k, lk, &tctx); sha512_end ((unsigned char *) key, &tctx); k = key; lk = SHA512_DIGESTSIZE; burn (&tctx, sizeof(tctx)); // Prevent leaks } /**** Inner Digest ****/ sha512_begin (&ictx); /* Pad the key for inner digest */ for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x36); for (i = lk; i < SHA512_BLOCKSIZE; ++i) buf[i] = 0x36; sha512_hash ((unsigned char *) buf, SHA512_BLOCKSIZE, &ictx); sha512_hash ((unsigned char *) d, ld, &ictx); sha512_end ((unsigned char *) isha, &ictx); /**** Outer Digest ****/ sha512_begin (&octx); for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x5C); for (i = lk; i < SHA512_BLOCKSIZE; ++i) buf[i] = 0x5C; sha512_hash ((unsigned char *) buf, SHA512_BLOCKSIZE, &octx); sha512_hash ((unsigned char *) isha, SHA512_DIGESTSIZE, &octx); sha512_end ((unsigned char *) osha, &octx); /* truncate and print the results */ t = t > SHA512_DIGESTSIZE ? SHA512_DIGESTSIZE : t; hmac_truncate (osha, out, t); /* Prevent leaks */ burn (&ictx, sizeof(ictx)); burn (&octx, sizeof(octx)); burn (isha, sizeof(isha)); burn (osha, sizeof(osha)); burn (buf, sizeof(buf)); burn (key, sizeof(key)); } __device__ void derive_u_sha512 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *u, int b) { char j[SHA512_DIGESTSIZE], k[SHA512_DIGESTSIZE]; char init[128]; char counter[4]; int c, i; /* iteration 1 */ memset (counter, 0, 4); counter[3] = (char) b; memcpy (init, salt, salt_len); /* salt */ memcpy (&init[salt_len], counter, 4); /* big-endian block number */ /* remaining iterations */ for (c = 0; c < iterations; c++) { hmac_sha512 (pwd, pwd_len, !c ? init : j, !c ? salt_len + 4 : SHA512_DIGESTSIZE, !c ? j : k, SHA512_DIGESTSIZE); if (!c) memcpy (u, j, SHA512_DIGESTSIZE); else for (i = 0; i < SHA512_DIGESTSIZE; i++) { u[i] ^= k[i]; j[i] = k[i]; } } /* Prevent possible leaks. */ burn (j, sizeof(j)); burn (k, sizeof(k)); } __device__ void derive_key_sha512 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *dk, int dklen) { char u[SHA512_DIGESTSIZE]; int b, l, r; if (dklen % SHA512_DIGESTSIZE) { l = 1 + dklen / SHA512_DIGESTSIZE; } else { l = dklen / SHA512_DIGESTSIZE; } r = dklen - (l - 1) * SHA512_DIGESTSIZE; /* first l - 1 blocks */ for (b = 1; b <= l; b++) { derive_u_sha512 (pwd, pwd_len, salt, salt_len, iterations, u, b); if (b < l) { memcpy (dk, u, SHA512_DIGESTSIZE); dk += SHA512_DIGESTSIZE; } } /* last block */ memcpy (dk, u, r); /* Prevent possible leaks. */ burn (u, sizeof(u)); } __device__ void sha1_begin(sha1_ctx ctx[1]) { ctx->count[0] = ctx->count[1] = 0; ctx->hash[0] = 0x67452301; ctx->hash[1] = 0xefcdab89; ctx->hash[2] = 0x98badcfe; ctx->hash[3] = 0x10325476; ctx->hash[4] = 0xc3d2e1f0; } #define q(v,n) v##n #define one_cycle(v,a,b,c,d,e,f,k,h) \ q(v,e) += rotr32(q(v,a),27) + \ f(q(v,b),q(v,c),q(v,d)) + k + h; \ q(v,b) = rotr32(q(v,b), 2) #define five_cycle(v,f,k,i) \ one_cycle(v, 0,1,2,3,4, f,k,hf(i )); \ one_cycle(v, 4,0,1,2,3, f,k,hf(i+1)); \ one_cycle(v, 3,4,0,1,2, f,k,hf(i+2)); \ one_cycle(v, 2,3,4,0,1, f,k,hf(i+3)); \ one_cycle(v, 1,2,3,4,0, f,k,hf(i+4)) __device__ void sha1_compile(sha1_ctx ctx[1]) { sha1_32t *w = ctx->wbuf; sha1_32t v0, v1, v2, v3, v4; v0 = ctx->hash[0]; v1 = ctx->hash[1]; v2 = ctx->hash[2]; v3 = ctx->hash[3]; v4 = ctx->hash[4]; #undef hf #define hf(i) w[i] five_cycle(v, ch, 0x5a827999, 0); five_cycle(v, ch, 0x5a827999, 5); five_cycle(v, ch, 0x5a827999, 10); one_cycle(v,0,1,2,3,4, ch, 0x5a827999, hf(15)); \ #undef hf #define hf(i) (w[(i) & 15] = rotl32( \ w[((i) + 13) & 15] ^ w[((i) + 8) & 15] \ ^ w[((i) + 2) & 15] ^ w[(i) & 15], 1)) one_cycle(v,4,0,1,2,3, ch, 0x5a827999, hf(16)); one_cycle(v,3,4,0,1,2, ch, 0x5a827999, hf(17)); one_cycle(v,2,3,4,0,1, ch, 0x5a827999, hf(18)); one_cycle(v,1,2,3,4,0, ch, 0x5a827999, hf(19)); five_cycle(v, parity, 0x6ed9eba1, 20); five_cycle(v, parity, 0x6ed9eba1, 25); five_cycle(v, parity, 0x6ed9eba1, 30); five_cycle(v, parity, 0x6ed9eba1, 35); five_cycle(v, maj, 0x8f1bbcdc, 40); five_cycle(v, maj, 0x8f1bbcdc, 45); five_cycle(v, maj, 0x8f1bbcdc, 50); five_cycle(v, maj, 0x8f1bbcdc, 55); five_cycle(v, parity, 0xca62c1d6, 60); five_cycle(v, parity, 0xca62c1d6, 65); five_cycle(v, parity, 0xca62c1d6, 70); five_cycle(v, parity, 0xca62c1d6, 75); ctx->hash[0] += v0; ctx->hash[1] += v1; ctx->hash[2] += v2; ctx->hash[3] += v3; ctx->hash[4] += v4; } /* SHA1 hash data in an array of bytes into hash buffer and */ /* call the hash_compile function as required. */ __device__ void sha1_hash(const unsigned char data[], u32 len, sha1_ctx ctx[1]) { sha1_32t pos = (sha1_32t)(ctx->count[0] & SHA1_MASK), space = SHA1_BLOCK_SIZE - pos; const unsigned char *sp = data; if((ctx->count[0] += len) < len) ++(ctx->count[1]); while(len >= space) /* tranfer whole blocks if possible */ { memcpy(((unsigned char*)ctx->wbuf) + pos, sp, space); sp += space; len -= space; space = SHA1_BLOCK_SIZE; pos = 0; bsw_32(ctx->wbuf, SHA1_BLOCK_SIZE >> 2); sha1_compile(ctx); } memcpy(((unsigned char*)ctx->wbuf) + pos, sp, len); } /* SHA1 final padding and digest calculation */ __device__ void sha1_end(unsigned char hval[], sha1_ctx ctx[1]) { sha1_32t i = (sha1_32t)(ctx->count[0] & SHA1_MASK); /* put bytes in the buffer in an order in which references to */ /* 32-bit words will put bytes with lower addresses into the */ /* top of 32 bit words on BOTH big and little endian machines */ bsw_32(ctx->wbuf, (i + 3) >> 2); /* we now need to mask valid bytes and add the padding which is */ /* a single 1 bit and as many zero bits as necessary. Note that */ /* we can always add the first padding byte here because the */ /* buffer always has at least one empty slot */ ctx->wbuf[i >> 2] &= 0xffffff80 << 8 * (~i & 3); ctx->wbuf[i >> 2] |= 0x00000080 << 8 * (~i & 3); /* we need 9 or more empty positions, one for the padding byte */ /* (above) and eight for the length count. If there is not */ /* enough space, pad and empty the buffer */ if(i > SHA1_BLOCK_SIZE - 9) { if(i < 60) ctx->wbuf[15] = 0; sha1_compile(ctx); i = 0; } else /* compute a word index for the empty buffer positions */ i = (i >> 2) + 1; while(i < 14) /* and zero pad all but last two positions */ ctx->wbuf[i++] = 0; /* the following 32-bit length fields are assembled in the */ /* wrong byte order on little endian machines but this is */ /* corrected later since they are only ever used as 32-bit */ /* word values. */ ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 29); ctx->wbuf[15] = ctx->count[0] << 3; sha1_compile(ctx); /* extract the hash value as bytes in case the hash buffer is */ /* misaligned for 32-bit words */ for(i = 0; i < SHA1_DIGEST_SIZE; ++i) hval[i] = (unsigned char)(ctx->hash[i >> 2] >> (8 * (~i & 3))); } /* Deprecated/legacy */ __device__ void hmac_sha1 ( char *k, /* secret key */ int lk, /* length of the key in bytes */ char *d, /* data */ int ld, /* length of data in bytes */ char *out, /* output buffer, at least "t" bytes */ int t ) { sha1_ctx ictx, octx; char isha[SHA1_DIGESTSIZE], osha[SHA1_DIGESTSIZE]; char key[SHA1_DIGESTSIZE]; char buf[SHA1_BLOCKSIZE]; int i; /* If the key is longer than the hash algorithm block size, let key = sha1(key), as per HMAC specifications. */ if (lk > SHA1_BLOCKSIZE) { sha1_ctx tctx; sha1_begin (&tctx); sha1_hash ((unsigned char *) k, lk, &tctx); sha1_end ((unsigned char *) key, &tctx); k = key; lk = SHA1_DIGESTSIZE; burn (&tctx, sizeof(tctx)); // Prevent leaks } /**** Inner Digest ****/ sha1_begin (&ictx); /* Pad the key for inner digest */ for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x36); for (i = lk; i < SHA1_BLOCKSIZE; ++i) buf[i] = 0x36; sha1_hash ((unsigned char *) buf, SHA1_BLOCKSIZE, &ictx); sha1_hash ((unsigned char *) d, ld, &ictx); sha1_end ((unsigned char *) isha, &ictx); /**** Outer Digest ****/ sha1_begin (&octx); for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x5C); for (i = lk; i < SHA1_BLOCKSIZE; ++i) buf[i] = 0x5C; sha1_hash ((unsigned char *) buf, SHA1_BLOCKSIZE, &octx); sha1_hash ((unsigned char *) isha, SHA1_DIGESTSIZE, &octx); sha1_end ((unsigned char *) osha, &octx); /* truncate and print the results */ t = t > SHA1_DIGESTSIZE ? SHA1_DIGESTSIZE : t; hmac_truncate (osha, out, t); /* Prevent leaks */ burn (&ictx, sizeof(ictx)); burn (&octx, sizeof(octx)); burn (isha, sizeof(isha)); burn (osha, sizeof(osha)); burn (buf, sizeof(buf)); burn (key, sizeof(key)); } /* Deprecated/legacy */ __device__ void derive_u_sha1 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *u, int b) { char j[SHA1_DIGESTSIZE], k[SHA1_DIGESTSIZE]; char init[128]; char counter[4]; int c, i; /* iteration 1 */ memset (counter, 0, 4); counter[3] = (char) b; memcpy (init, salt, salt_len); /* salt */ memcpy (&init[salt_len], counter, 4); /* big-endian block number */ /* remaining iterations */ for (c = 0; c < iterations; c++) { hmac_sha1 (pwd, pwd_len, !c ? init : j, !c ? salt_len + 4 : SHA1_DIGESTSIZE, !c ? j : k, SHA1_DIGESTSIZE); if (!c) memcpy (u, j, SHA1_DIGESTSIZE); else for (i = 0; i < SHA1_DIGESTSIZE; i++) { u[i] ^= k[i]; j[i] = k[i]; } } /* Prevent possible leaks. */ burn (j, sizeof(j)); burn (k, sizeof(k)); } /* Deprecated/legacy */ __device__ void derive_key_sha1 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *dk, int dklen) { char u[SHA1_DIGESTSIZE]; int b, l, r; if (dklen % SHA1_DIGESTSIZE) { l = 1 + dklen / SHA1_DIGESTSIZE; } else { l = dklen / SHA1_DIGESTSIZE; } r = dklen - (l - 1) * SHA1_DIGESTSIZE; /* first l - 1 blocks */ for (b = 1; b <= l; b++) { derive_u_sha1 (pwd, pwd_len, salt, salt_len, iterations, u, b); if (b < l) { memcpy (dk, u, SHA1_DIGESTSIZE); dk += SHA1_DIGESTSIZE; } } /* last block */ memcpy (dk, u, r); /* Prevent possible leaks. */ burn (u, sizeof(u)); } __device__ void WHIRLPOOL_init(struct NESSIEstruct * const structpointer) { int i; memset(structpointer->bitLength, 0, 32); structpointer->bufferBits = structpointer->bufferPos = 0; structpointer->buffer[0] = 0; /* it's only necessary to cleanup buffer[bufferPos] */ for (i = 0; i < 8; i++) { structpointer->hash[i] = 0L; /* initial value */ } } /** * The core Whirlpool transform. */ __device__ void processBuffer(struct NESSIEstruct * const structpointer, PTHREAD_DATA td) { int i, r; #define K td->K #define block td->block #define state td->state #define L td->L #define buffer td->buffer /* * map the buffer to a block: */ for (i = 0; i < 8; i++, buffer += 8) { block[i] = (((u64)buffer[0] ) << 56) ^ (((u64)buffer[1] & 0xffL) << 48) ^ (((u64)buffer[2] & 0xffL) << 40) ^ (((u64)buffer[3] & 0xffL) << 32) ^ (((u64)buffer[4] & 0xffL) << 24) ^ (((u64)buffer[5] & 0xffL) << 16) ^ (((u64)buffer[6] & 0xffL) << 8) ^ (((u64)buffer[7] & 0xffL) ); } /* * compute and apply K^0 to the cipher state: */ state[0] = block[0] ^ (K[0] = structpointer->hash[0]); state[1] = block[1] ^ (K[1] = structpointer->hash[1]); state[2] = block[2] ^ (K[2] = structpointer->hash[2]); state[3] = block[3] ^ (K[3] = structpointer->hash[3]); state[4] = block[4] ^ (K[4] = structpointer->hash[4]); state[5] = block[5] ^ (K[5] = structpointer->hash[5]); state[6] = block[6] ^ (K[6] = structpointer->hash[6]); state[7] = block[7] ^ (K[7] = structpointer->hash[7]); /* * iterate over all rounds: */ for (r = 1; r <= R; r++) { /* * compute K^r from K^{r-1}: */ L[0] = C0[(int)(K[0] >> 56) ] ^ C1[(int)(K[7] >> 48) & 0xff] ^ C2[(int)(K[6] >> 40) & 0xff] ^ C3[(int)(K[5] >> 32) & 0xff] ^ C4[(int)(K[4] >> 24) & 0xff] ^ C5[(int)(K[3] >> 16) & 0xff] ^ C6[(int)(K[2] >> 8) & 0xff] ^ C7[(int)(K[1] ) & 0xff] ^ rc[r]; L[1] = C0[(int)(K[1] >> 56) ] ^ C1[(int)(K[0] >> 48) & 0xff] ^ C2[(int)(K[7] >> 40) & 0xff] ^ C3[(int)(K[6] >> 32) & 0xff] ^ C4[(int)(K[5] >> 24) & 0xff] ^ C5[(int)(K[4] >> 16) & 0xff] ^ C6[(int)(K[3] >> 8) & 0xff] ^ C7[(int)(K[2] ) & 0xff]; L[2] = C0[(int)(K[2] >> 56) ] ^ C1[(int)(K[1] >> 48) & 0xff] ^ C2[(int)(K[0] >> 40) & 0xff] ^ C3[(int)(K[7] >> 32) & 0xff] ^ C4[(int)(K[6] >> 24) & 0xff] ^ C5[(int)(K[5] >> 16) & 0xff] ^ C6[(int)(K[4] >> 8) & 0xff] ^ C7[(int)(K[3] ) & 0xff]; L[3] = C0[(int)(K[3] >> 56) ] ^ C1[(int)(K[2] >> 48) & 0xff] ^ C2[(int)(K[1] >> 40) & 0xff] ^ C3[(int)(K[0] >> 32) & 0xff] ^ C4[(int)(K[7] >> 24) & 0xff] ^ C5[(int)(K[6] >> 16) & 0xff] ^ C6[(int)(K[5] >> 8) & 0xff] ^ C7[(int)(K[4] ) & 0xff]; L[4] = C0[(int)(K[4] >> 56) ] ^ C1[(int)(K[3] >> 48) & 0xff] ^ C2[(int)(K[2] >> 40) & 0xff] ^ C3[(int)(K[1] >> 32) & 0xff] ^ C4[(int)(K[0] >> 24) & 0xff] ^ C5[(int)(K[7] >> 16) & 0xff] ^ C6[(int)(K[6] >> 8) & 0xff] ^ C7[(int)(K[5] ) & 0xff]; L[5] = C0[(int)(K[5] >> 56) ] ^ C1[(int)(K[4] >> 48) & 0xff] ^ C2[(int)(K[3] >> 40) & 0xff] ^ C3[(int)(K[2] >> 32) & 0xff] ^ C4[(int)(K[1] >> 24) & 0xff] ^ C5[(int)(K[0] >> 16) & 0xff] ^ C6[(int)(K[7] >> 8) & 0xff] ^ C7[(int)(K[6] ) & 0xff]; L[6] = C0[(int)(K[6] >> 56) ] ^ C1[(int)(K[5] >> 48) & 0xff] ^ C2[(int)(K[4] >> 40) & 0xff] ^ C3[(int)(K[3] >> 32) & 0xff] ^ C4[(int)(K[2] >> 24) & 0xff] ^ C5[(int)(K[1] >> 16) & 0xff] ^ C6[(int)(K[0] >> 8) & 0xff] ^ C7[(int)(K[7] ) & 0xff]; L[7] = C0[(int)(K[7] >> 56) ] ^ C1[(int)(K[6] >> 48) & 0xff] ^ C2[(int)(K[5] >> 40) & 0xff] ^ C3[(int)(K[4] >> 32) & 0xff] ^ C4[(int)(K[3] >> 24) & 0xff] ^ C5[(int)(K[2] >> 16) & 0xff] ^ C6[(int)(K[1] >> 8) & 0xff] ^ C7[(int)(K[0] ) & 0xff]; K[0] = L[0]; K[1] = L[1]; K[2] = L[2]; K[3] = L[3]; K[4] = L[4]; K[5] = L[5]; K[6] = L[6]; K[7] = L[7]; /* * apply the r-th round transformation: */ L[0] = C0[(int)(state[0] >> 56) ] ^ C1[(int)(state[7] >> 48) & 0xff] ^ C2[(int)(state[6] >> 40) & 0xff] ^ C3[(int)(state[5] >> 32) & 0xff] ^ C4[(int)(state[4] >> 24) & 0xff] ^ C5[(int)(state[3] >> 16) & 0xff] ^ C6[(int)(state[2] >> 8) & 0xff] ^ C7[(int)(state[1] ) & 0xff] ^ K[0]; L[1] = C0[(int)(state[1] >> 56) ] ^ C1[(int)(state[0] >> 48) & 0xff] ^ C2[(int)(state[7] >> 40) & 0xff] ^ C3[(int)(state[6] >> 32) & 0xff] ^ C4[(int)(state[5] >> 24) & 0xff] ^ C5[(int)(state[4] >> 16) & 0xff] ^ C6[(int)(state[3] >> 8) & 0xff] ^ C7[(int)(state[2] ) & 0xff] ^ K[1]; L[2] = C0[(int)(state[2] >> 56) ] ^ C1[(int)(state[1] >> 48) & 0xff] ^ C2[(int)(state[0] >> 40) & 0xff] ^ C3[(int)(state[7] >> 32) & 0xff] ^ C4[(int)(state[6] >> 24) & 0xff] ^ C5[(int)(state[5] >> 16) & 0xff] ^ C6[(int)(state[4] >> 8) & 0xff] ^ C7[(int)(state[3] ) & 0xff] ^ K[2]; L[3] = C0[(int)(state[3] >> 56) ] ^ C1[(int)(state[2] >> 48) & 0xff] ^ C2[(int)(state[1] >> 40) & 0xff] ^ C3[(int)(state[0] >> 32) & 0xff] ^ C4[(int)(state[7] >> 24) & 0xff] ^ C5[(int)(state[6] >> 16) & 0xff] ^ C6[(int)(state[5] >> 8) & 0xff] ^ C7[(int)(state[4] ) & 0xff] ^ K[3]; L[4] = C0[(int)(state[4] >> 56) ] ^ C1[(int)(state[3] >> 48) & 0xff] ^ C2[(int)(state[2] >> 40) & 0xff] ^ C3[(int)(state[1] >> 32) & 0xff] ^ C4[(int)(state[0] >> 24) & 0xff] ^ C5[(int)(state[7] >> 16) & 0xff] ^ C6[(int)(state[6] >> 8) & 0xff] ^ C7[(int)(state[5] ) & 0xff] ^ K[4]; L[5] = C0[(int)(state[5] >> 56) ] ^ C1[(int)(state[4] >> 48) & 0xff] ^ C2[(int)(state[3] >> 40) & 0xff] ^ C3[(int)(state[2] >> 32) & 0xff] ^ C4[(int)(state[1] >> 24) & 0xff] ^ C5[(int)(state[0] >> 16) & 0xff] ^ C6[(int)(state[7] >> 8) & 0xff] ^ C7[(int)(state[6] ) & 0xff] ^ K[5]; L[6] = C0[(int)(state[6] >> 56) ] ^ C1[(int)(state[5] >> 48) & 0xff] ^ C2[(int)(state[4] >> 40) & 0xff] ^ C3[(int)(state[3] >> 32) & 0xff] ^ C4[(int)(state[2] >> 24) & 0xff] ^ C5[(int)(state[1] >> 16) & 0xff] ^ C6[(int)(state[0] >> 8) & 0xff] ^ C7[(int)(state[7] ) & 0xff] ^ K[6]; L[7] = C0[(int)(state[7] >> 56) ] ^ C1[(int)(state[6] >> 48) & 0xff] ^ C2[(int)(state[5] >> 40) & 0xff] ^ C3[(int)(state[4] >> 32) & 0xff] ^ C4[(int)(state[3] >> 24) & 0xff] ^ C5[(int)(state[2] >> 16) & 0xff] ^ C6[(int)(state[1] >> 8) & 0xff] ^ C7[(int)(state[0] ) & 0xff] ^ K[7]; state[0] = L[0]; state[1] = L[1]; state[2] = L[2]; state[3] = L[3]; state[4] = L[4]; state[5] = L[5]; state[6] = L[6]; state[7] = L[7]; } /* * apply the Miyaguchi-Preneel compression function: */ structpointer->hash[0] ^= state[0] ^ block[0]; structpointer->hash[1] ^= state[1] ^ block[1]; structpointer->hash[2] ^= state[2] ^ block[2]; structpointer->hash[3] ^= state[3] ^ block[3]; structpointer->hash[4] ^= state[4] ^ block[4]; structpointer->hash[5] ^= state[5] ^ block[5]; structpointer->hash[6] ^= state[6] ^ block[6]; structpointer->hash[7] ^= state[7] ^ block[7]; } #undef buffer #undef K #undef block #undef state #undef L /** * Delivers input data to the hashing algorithm. * * @param source plaintext data to hash. * @param sourceBits how many bits of plaintext to process. * * This method maintains the invariant: bufferBits < DIGESTBITS */ __device__ void WHIRLPOOL_add(const unsigned char * const source, u32 sourceBits, struct NESSIEstruct * const structpointer, PTHREAD_DATA td) { /* sourcePos | +-------+-------+------- ||||||||||||||||||||| source +-------+-------+------- +-------+-------+-------+-------+-------+------- |||||||||||||||||||||| buffer +-------+-------+-------+-------+-------+------- | bufferPos */ int sourcePos = 0; /* index of leftmost source u8 containing data (1 to 8 bits). */ int sourceGap = (8 - ((int)sourceBits & 7)) & 7; /* space on source[sourcePos]. */ int bufferRem = structpointer->bufferBits & 7; /* occupied bits on buffer[bufferPos]. */ int i; u32 b, carry; u8 *buffer = structpointer->buffer; u8 *bitLength = structpointer->bitLength; int bufferBits = structpointer->bufferBits; int bufferPos = structpointer->bufferPos; /* * tally the length of the added data: */ u64 value = sourceBits; for (i = 31, carry = 0; i >= 0 && (carry != 0 || value != LL(0)); i--) { carry += bitLength[i] + ((u32)value & 0xff); bitLength[i] = (u8)carry; carry >>= 8; value >>= 8; } /* * process data in chunks of 8 bits (a more efficient approach would be to take whole-word chunks): */ while (sourceBits > 8) { /* N.B. at least source[sourcePos] and source[sourcePos+1] contain data. */ /* * take a byte from the source: */ b = ((source[sourcePos] << sourceGap) & 0xff) | ((source[sourcePos + 1] & 0xff) >> (8 - sourceGap)); /* * process this byte: */ buffer[bufferPos++] |= (u8)(b >> bufferRem); bufferBits += 8 - bufferRem; /* bufferBits = 8*bufferPos; */ if (bufferBits == DIGESTBITS) { /* * process data block: */ processBuffer(structpointer, td); /* * reset buffer: */ bufferBits = bufferPos = 0; } buffer[bufferPos] = (u8) (b << (8 - bufferRem)); bufferBits += bufferRem; /* * proceed to remaining data: */ sourceBits -= 8; sourcePos++; } /* now 0 <= sourceBits <= 8; * furthermore, all data (if any is left) is in source[sourcePos]. */ if (sourceBits > 0) { b = (source[sourcePos] << sourceGap) & 0xff; /* bits are left-justified on b. */ /* * process the remaining bits: */ buffer[bufferPos] |= b >> bufferRem; } else { b = 0; } if (bufferRem + sourceBits < 8) { /* * all remaining data fits on buffer[bufferPos], * and there still remains some space. */ bufferBits += sourceBits; } else { /* * buffer[bufferPos] is full: */ bufferPos++; bufferBits += 8 - bufferRem; /* bufferBits = 8*bufferPos; */ sourceBits -= 8 - bufferRem; /* now 0 <= sourceBits < 8; * furthermore, all data (if any is left) is in source[sourcePos]. */ if (bufferBits == DIGESTBITS) { /* * process data block: */ processBuffer(structpointer, td); /* * reset buffer: */ bufferBits = bufferPos = 0; } buffer[bufferPos] = (u8) (b << (8 - bufferRem)); bufferBits += (int)sourceBits; } structpointer->bufferBits = bufferBits; structpointer->bufferPos = bufferPos; } /** * Get the hash value from the hashing state. * * This method uses the invariant: bufferBits < DIGESTBITS */ __device__ void WHIRLPOOL_finalize(struct NESSIEstruct * const structpointer, unsigned char * const result, PTHREAD_DATA td) { int i; u8 *buffer = structpointer->buffer; u8 *bitLength = structpointer->bitLength; int bufferBits = structpointer->bufferBits; int bufferPos = structpointer->bufferPos; u8 *digest = result; /* * append a '1'-bit: */ buffer[bufferPos] |= 0x80U >> (bufferBits & 7); bufferPos++; /* all remaining bits on the current u8 are set to zero. */ /* * pad with zero bits to complete (N*WBLOCKBITS - LENGTHBITS) bits: */ if (bufferPos > WBLOCKBYTES - LENGTHBYTES) { if (bufferPos < WBLOCKBYTES) { memset(&buffer[bufferPos], 0, WBLOCKBYTES - bufferPos); } /* * process data block: */ processBuffer(structpointer, td); /* * reset buffer: */ bufferPos = 0; } if (bufferPos < WBLOCKBYTES - LENGTHBYTES) { memset(&buffer[bufferPos], 0, (WBLOCKBYTES - LENGTHBYTES) - bufferPos); } bufferPos = WBLOCKBYTES - LENGTHBYTES; /* * append bit length of hashed data: */ memcpy(&buffer[WBLOCKBYTES - LENGTHBYTES], bitLength, LENGTHBYTES); /* * process data block: */ processBuffer(structpointer, td); /* * return the completed message digest: */ for (i = 0; i < DIGESTBYTES/8; i++) { digest[0] = (u8)(structpointer->hash[i] >> 56); digest[1] = (u8)(structpointer->hash[i] >> 48); digest[2] = (u8)(structpointer->hash[i] >> 40); digest[3] = (u8)(structpointer->hash[i] >> 32); digest[4] = (u8)(structpointer->hash[i] >> 24); digest[5] = (u8)(structpointer->hash[i] >> 16); digest[6] = (u8)(structpointer->hash[i] >> 8); digest[7] = (u8)(structpointer->hash[i] ); digest += 8; } structpointer->bufferBits = bufferBits; structpointer->bufferPos = bufferPos; } __device__ void hmac_whirlpool ( char *k, /* secret key */ int lk, /* length of the key in bytes */ char *d, /* data */ int ld, /* length of data in bytes */ char *out, /* output buffer, at least "t" bytes */ int t, PTHREAD_DATA td ) { #define ictx td->ictx #define octx td->octx #define iwhi td->iwhi #define owhi td->owhi #define key td->key #define buf td->buf #define tctx td->tctx int i; /* If the key is longer than the hash algorithm block size, let key = whirlpool(key), as per HMAC specifications. */ if (lk > WHIRLPOOL_BLOCKSIZE) { WHIRLPOOL_init (&tctx); WHIRLPOOL_add ((unsigned char *) k, lk * 8, &tctx, td); WHIRLPOOL_finalize (&tctx, (unsigned char *) key, td); k = key; lk = WHIRLPOOL_DIGESTSIZE; burn (&tctx, sizeof(tctx)); // Prevent leaks } /**** Inner Digest ****/ WHIRLPOOL_init (&ictx); /* Pad the key for inner digest */ for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x36); for (i = lk; i < WHIRLPOOL_BLOCKSIZE; ++i) buf[i] = 0x36; WHIRLPOOL_add ((unsigned char *) buf, WHIRLPOOL_BLOCKSIZE * 8, &ictx, td); WHIRLPOOL_add ((unsigned char *) d, ld * 8, &ictx, td); WHIRLPOOL_finalize (&ictx, (unsigned char *) iwhi, td); /**** Outer Digest ****/ WHIRLPOOL_init (&octx); for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x5C); for (i = lk; i < WHIRLPOOL_BLOCKSIZE; ++i) buf[i] = 0x5C; WHIRLPOOL_add ((unsigned char *) buf, WHIRLPOOL_BLOCKSIZE * 8, &octx, td); WHIRLPOOL_add ((unsigned char *) iwhi, WHIRLPOOL_DIGESTSIZE * 8, &octx, td); WHIRLPOOL_finalize (&octx, (unsigned char *) owhi, td); /* truncate and print the results */ t = t > WHIRLPOOL_DIGESTSIZE ? WHIRLPOOL_DIGESTSIZE : t; hmac_truncate (owhi, out, t); /* Prevent possible leaks. */ burn (&ictx, sizeof(ictx)); burn (&octx, sizeof(octx)); burn (owhi, sizeof(owhi)); burn (iwhi, sizeof(iwhi)); burn (buf, sizeof(buf)); burn (key, sizeof(key)); } #undef ictx #undef octx #undef tctx #undef iwhi #undef owhi #undef key #undef buf __device__ void derive_u_whirlpool (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *u, int b, PTHREAD_DATA td) { #define j td->j #define k td->k #define init td->init char counter[4]; int c, i; /* iteration 1 */ memset (counter, 0, 4); counter[3] = (char) b; memcpy (init, salt, salt_len); /* salt */ memcpy (&init[salt_len], counter, 4); /* big-endian block number */ /* remaining iterations */ for (c = 0; c < iterations; c++) { hmac_whirlpool (pwd, pwd_len, !c ? init : j, !c ? salt_len + 4 : WHIRLPOOL_DIGESTSIZE, !c ? j : k, WHIRLPOOL_DIGESTSIZE, td); if (!c) memcpy (u, j, WHIRLPOOL_DIGESTSIZE); else for (i = 0; i < WHIRLPOOL_DIGESTSIZE; i++) { u[i] ^= k[i]; j[i] = k[i]; } } /* Prevent possible leaks. */ burn (j, sizeof(j)); burn (k, sizeof(k)); } #undef j #undef k #undef init __device__ void derive_key_whirlpool (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *dk, int dklen, PTHREAD_DATA td) { #define u td->u int b, l, r; if (dklen % WHIRLPOOL_DIGESTSIZE) { l = 1 + dklen / WHIRLPOOL_DIGESTSIZE; } else { l = dklen / WHIRLPOOL_DIGESTSIZE; } r = dklen - (l - 1) * WHIRLPOOL_DIGESTSIZE; /* first l - 1 blocks */ for (b = 1; b <= l; b++) { derive_u_whirlpool (pwd, pwd_len, salt, salt_len, iterations, u, b, td); if (b < l) { memcpy (dk, u, WHIRLPOOL_DIGESTSIZE); dk += WHIRLPOOL_DIGESTSIZE; } } /* last block */ memcpy (dk, u, r); /* Prevent possible leaks. */ burn (u, sizeof(u)); } #undef u __device__ int CipherGetBlockSize (int cipherId) { return CipherGet (cipherId) -> BlockSize; } __device__ int CipherGetKeyScheduleSize (int cipherId) { return CipherGet (cipherId) -> KeyScheduleSize; } // Returns sum of key schedule sizes of all ciphers of the EA __device__ int EAGetKeyScheduleSize (int ea) { int i = EAGetFirstCipher(ea); int size = CipherGetKeyScheduleSize (i); while (i = EAGetNextCipher(ea, i)) { size += CipherGetKeyScheduleSize (i); } return size; } #define vf1(x,r,c) (x) #define rf1(r,c) (r) #define rf2(r,c) ((8+r-c)&3) #define bval(x,n) ((uint_8t)((x) >> (8 * (n)))) #define four_tables(x,tab,vf,rf,c) \ ( tab[0][bval(vf(x,0,c),rf(0,c))] \ ^ tab[1][bval(vf(x,1,c),rf(1,c))] \ ^ tab[2][bval(vf(x,2,c),rf(2,c))] \ ^ tab[3][bval(vf(x,3,c),rf(3,c))]) #define t_use(m,n) t_##m##n #define ls_box(x,c) four_tables(x,t_use(f,l),vf1,rf2,c) #define kef8(k,i) \ { k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; \ k[8*(i)+ 9] = ss[1] ^= ss[0]; \ k[8*(i)+10] = ss[2] ^= ss[1]; \ k[8*(i)+11] = ss[3] ^= ss[2]; \ } #define ke8(k,i) \ { kef8(k,i); \ k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0); \ k[8*(i)+13] = ss[5] ^= ss[4]; \ k[8*(i)+14] = ss[6] ^= ss[5]; \ k[8*(i)+15] = ss[7] ^= ss[6]; \ } #define word_in(x,c) (*((uint_32t*)(x)+(c))) __device__ AES_RETURN aes_encrypt_key256(const unsigned char *key, aes_encrypt_ctx cx[1]) { uint_32t ss[8]; cx->ks[0] = ss[0] = word_in(key, 0); cx->ks[1] = ss[1] = word_in(key, 1); cx->ks[2] = ss[2] = word_in(key, 2); cx->ks[3] = ss[3] = word_in(key, 3); cx->ks[4] = ss[4] = word_in(key, 4); cx->ks[5] = ss[5] = word_in(key, 5); cx->ks[6] = ss[6] = word_in(key, 6); cx->ks[7] = ss[7] = word_in(key, 7); ke8(cx->ks, 0); ke8(cx->ks, 1); ke8(cx->ks, 2); ke8(cx->ks, 3); ke8(cx->ks, 4); ke8(cx->ks, 5); kef8(cx->ks, 6); cx->inf.l = 0; cx->inf.b[0] = 14 * 16; return EXIT_SUCCESS; } #define v(n,i) ((n) - (i) + 2 * ((i) & 3)) #define kdf8(k,i) \ { ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[v(56,(8*(i))+ 8)] = ff(ss[0]); \ ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ff(ss[1]); \ ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ff(ss[2]); \ ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ff(ss[3]); \ ss[4] ^= ls_box(ss[3],0); k[v(56,(8*(i))+12)] = ff(ss[4]); \ ss[5] ^= ss[4]; k[v(56,(8*(i))+13)] = ff(ss[5]); \ ss[6] ^= ss[5]; k[v(56,(8*(i))+14)] = ff(ss[6]); \ ss[7] ^= ss[6]; k[v(56,(8*(i))+15)] = ff(ss[7]); \ } #define kd8(k,i) \ { ss[8] = ls_box(ss[7],3) ^ t_use(r,c)[i]; \ ss[0] ^= ss[8]; ss[8] = ff(ss[8]); k[v(56,(8*(i))+ 8)] = ss[8] ^= k[v(56,(8*(i)))]; \ ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ss[8] ^= k[v(56,(8*(i))+ 1)]; \ ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ss[8] ^= k[v(56,(8*(i))+ 2)]; \ ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ss[8] ^= k[v(56,(8*(i))+ 3)]; \ ss[8] = ls_box(ss[3],0); \ ss[4] ^= ss[8]; ss[8] = ff(ss[8]); k[v(56,(8*(i))+12)] = ss[8] ^= k[v(56,(8*(i))+ 4)]; \ ss[5] ^= ss[4]; k[v(56,(8*(i))+13)] = ss[8] ^= k[v(56,(8*(i))+ 5)]; \ ss[6] ^= ss[5]; k[v(56,(8*(i))+14)] = ss[8] ^= k[v(56,(8*(i))+ 6)]; \ ss[7] ^= ss[6]; k[v(56,(8*(i))+15)] = ss[8] ^= k[v(56,(8*(i))+ 7)]; \ } #define kdl8(k,i) \ { ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[v(56,(8*(i))+ 8)] = ss[0]; \ ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ss[1]; \ ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ss[2]; \ ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ss[3]; \ } #define inv_mcol(x) four_tables(x,t_use(i,m),vf1,rf1,0) #define ff(x) inv_mcol(x) __device__ AES_RETURN aes_decrypt_key256(const unsigned char *key, aes_decrypt_ctx cx[1]) { uint_32t ss[9]; cx->ks[v(56,(0))] = ss[0] = word_in(key, 0); cx->ks[v(56,(1))] = ss[1] = word_in(key, 1); cx->ks[v(56,(2))] = ss[2] = word_in(key, 2); cx->ks[v(56,(3))] = ss[3] = word_in(key, 3); cx->ks[v(56,(4))] = ff(ss[4] = word_in(key, 4)); cx->ks[v(56,(5))] = ff(ss[5] = word_in(key, 5)); cx->ks[v(56,(6))] = ff(ss[6] = word_in(key, 6)); cx->ks[v(56,(7))] = ff(ss[7] = word_in(key, 7)); kdf8(cx->ks, 0); kd8(cx->ks, 1); kd8(cx->ks, 2); kd8(cx->ks, 3); kd8(cx->ks, 4); kd8(cx->ks, 5); kdl8(cx->ks, 6); cx->inf.l = 0; cx->inf.b[0] = 14 * 16; return EXIT_SUCCESS; } __device__ void LKf (u32 *k, unsigned int r, u32 *a, u32 *b, u32 *c, u32 *d) { *a = k[r]; *b = k[r + 1]; *c = k[r + 2]; *d = k[r + 3]; } __device__ void SKf (u32 *k, unsigned int r, u32 *a, u32 *b, u32 *c, u32 *d) { k[r + 4] = *a; k[r + 5] = *b; k[r + 6] = *c; k[r + 7] = *d; } #define rotlFixed(x,n) (((x) << (n)) | ((x) >> (32 - (n)))) #define rotrFixed(x,n) (((x) >> (n)) | ((x) << (32 - (n)))) __device__ void S0f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r3 ^= *r0; *r4 = *r1; *r1 &= *r3; *r4 ^= *r2; *r1 ^= *r0; *r0 |= *r3; *r0 ^= *r4; *r4 ^= *r3; *r3 ^= *r2; *r2 |= *r1; *r2 ^= *r4; *r4 = ~*r4; *r4 |= *r1; *r1 ^= *r3; *r1 ^= *r4; *r3 |= *r0; *r1 ^= *r3; *r4 ^= *r3; } __device__ void S1f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r0 = ~*r0; *r2 = ~*r2; *r4 = *r0; *r0 &= *r1; *r2 ^= *r0; *r0 |= *r3; *r3 ^= *r2; *r1 ^= *r0; *r0 ^= *r4; *r4 |= *r1; *r1 ^= *r3; *r2 |= *r0; *r2 &= *r4; *r0 ^= *r1; *r1 &= *r2; *r1 ^= *r0; *r0 &= *r2; *r0 ^= *r4; } __device__ void S2f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r4 = *r0; *r0 &= *r2; *r0 ^= *r3; *r2 ^= *r1; *r2 ^= *r0; *r3 |= *r4; *r3 ^= *r1; *r4 ^= *r2; *r1 = *r3; *r3 |= *r4; *r3 ^= *r0; *r0 &= *r1; *r4 ^= *r0; *r1 ^= *r3; *r1 ^= *r4; *r4 = ~*r4; } __device__ void S3f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r4 = *r0; *r0 |= *r3; *r3 ^= *r1; *r1 &= *r4; *r4 ^= *r2; *r2 ^= *r3; *r3 &= *r0; *r4 |= *r1; *r3 ^= *r4; *r0 ^= *r1; *r4 &= *r0; *r1 ^= *r3; *r4 ^= *r2; *r1 |= *r0; *r1 ^= *r2; *r0 ^= *r3; *r2 = *r1; *r1 |= *r3; *r1 ^= *r0; } __device__ void S4f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r1 ^= *r3; *r3 = ~*r3; *r2 ^= *r3; *r3 ^= *r0; *r4 = *r1; *r1 &= *r3; *r1 ^= *r2; *r4 ^= *r3; *r0 ^= *r4; *r2 &= *r4; *r2 ^= *r0; *r0 &= *r1; *r3 ^= *r0; *r4 |= *r1; *r4 ^= *r0; *r0 |= *r3; *r0 ^= *r2; *r2 &= *r3; *r0 = ~*r0; *r4 ^= *r2; } __device__ void S5f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r0 ^= *r1; *r1 ^= *r3; *r3 = ~*r3; *r4 = *r1; *r1 &= *r0; *r2 ^= *r3; *r1 ^= *r2; *r2 |= *r4; *r4 ^= *r3; *r3 &= *r1; *r3 ^= *r0; *r4 ^= *r1; *r4 ^= *r2; *r2 ^= *r0; *r0 &= *r3; *r2 = ~*r2; *r0 ^= *r4; *r4 |= *r3; *r2 ^= *r4; } __device__ void S6f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r2 = ~*r2; *r4 = *r3; *r3 &= *r0; *r0 ^= *r4; *r3 ^= *r2; *r2 |= *r4; *r1 ^= *r3; *r2 ^= *r0; *r0 |= *r1; *r2 ^= *r1; *r4 ^= *r0; *r0 |= *r3; *r0 ^= *r2; *r4 ^= *r3; *r4 ^= *r0; *r3 = ~*r3; *r2 &= *r4; *r2 ^= *r3; } __device__ void S7f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r4 = *r2; *r2 &= *r1; *r2 ^= *r3; *r3 &= *r1; *r4 ^= *r2; *r2 ^= *r1; *r1 ^= *r0; *r0 |= *r4; *r0 ^= *r2; *r3 ^= *r1; *r2 ^= *r3; *r3 &= *r0; *r3 ^= *r4; *r4 ^= *r2; *r2 &= *r0; *r4 = ~*r4; *r2 ^= *r4; *r4 &= *r0; *r1 ^= *r3; *r4 ^= *r1; } __device__ void serpent_set_key(const u8 userKey[], int keylen, u8 *ks) { u32 a,b,c,d,e; u32 *k = (u32 *)ks; u32 t; int i; for (i = 0; i < keylen / (int)sizeof(int); i++) k[i] = ((u32*)userKey)[i]; if (keylen < 32) k[keylen/4] |= (u32)1 << ((keylen%4)*8); k += 8; t = k[-1]; for (i = 0; i < 132; ++i) k[i] = t = rotlFixed(k[i-8] ^ k[i-5] ^ k[i-3] ^ t ^ 0x9e3779b9 ^ i, 11); k -= 20; for (i=0; i<4; i++) { LKf (k, 20, &a, &e, &b, &d); S3f (&a, &e, &b, &d, &c); SKf (k, 16, &e, &b, &d, &c); LKf (k, 24, &c, &b, &a, &e); S2f (&c, &b, &a, &e, &d); SKf (k, 20, &a, &e, &b, &d); LKf (k, 28, &b, &e, &c, &a); S1f (&b, &e, &c, &a, &d); SKf (k, 24, &c, &b, &a, &e); LKf (k, 32, &a, &b, &c, &d); S0f (&a, &b, &c, &d, &e); SKf (k, 28, &b, &e, &c, &a); k += 8*4; LKf (k, 4, &a, &c, &d, &b); S7f (&a, &c, &d, &b, &e); SKf (k, 0, &d, &e, &b, &a); LKf (k, 8, &a, &c, &b, &e); S6f (&a, &c, &b, &e, &d); SKf (k, 4, &a, &c, &d, &b); LKf (k, 12, &b, &a, &e, &c); S5f (&b, &a, &e, &c, &d); SKf (k, 8, &a, &c, &b, &e); LKf (k, 16, &e, &b, &d, &c); S4f (&e, &b, &d, &c, &a); SKf (k, 12, &b, &a, &e, &c); } LKf (k, 20, &a, &e, &b, &d); S3f (&a, &e, &b, &d, &c); SKf (k, 16, &e, &b, &d, &c); } #define G_MOD 0x0000014d __device__ u4byte mds_rem(u4byte p0, u4byte p1) { u4byte i, t, u; for(i = 0; i < 8; ++i) { t = p1 >> 24; // get most significant coefficient p1 = (p1 << 8) | (p0 >> 24); p0 <<= 8; // shift others up // multiply t by a (the primitive element - i.e. left shift) u = (t << 1); if(t & 0x80) // subtract modular polynomial on overflow u ^= G_MOD; p1 ^= t ^ (u << 16); // remove t * (a * x^2 + 1) u ^= (t >> 1); // form u = a * t + t / a = t * (a + 1 / a); if(t & 0x01) // add the modular polynomial on underflow u ^= G_MOD >> 1; p1 ^= (u << 24) | (u << 8); // remove t * (a + 1/a) * (x^3 + x) } return p1; } #define extract_byte(x,n) ((u1byte)((x) >> (8 * n))) #undef q #define q(n,x) q_tab[n][x] #define mds(n,x) m_tab[n][x] #define rotr(x,n) (((x)>>(n))|((x)<<(32-(n)))) #define rotl(x,n) (((x)<<(n))|((x)>>(32-(n)))) #define q20(x) q(0,q(0,x) ^ extract_byte(key[1],0)) ^ extract_byte(key[0],0) #define q21(x) q(0,q(1,x) ^ extract_byte(key[1],1)) ^ extract_byte(key[0],1) #define q22(x) q(1,q(0,x) ^ extract_byte(key[1],2)) ^ extract_byte(key[0],2) #define q23(x) q(1,q(1,x) ^ extract_byte(key[1],3)) ^ extract_byte(key[0],3) #define q30(x) q(0,q(0,q(1, x) ^ extract_byte(key[2],0)) ^ extract_byte(key[1],0)) ^ extract_byte(key[0],0) #define q31(x) q(0,q(1,q(1, x) ^ extract_byte(key[2],1)) ^ extract_byte(key[1],1)) ^ extract_byte(key[0],1) #define q32(x) q(1,q(0,q(0, x) ^ extract_byte(key[2],2)) ^ extract_byte(key[1],2)) ^ extract_byte(key[0],2) #define q33(x) q(1,q(1,q(0, x) ^ extract_byte(key[2],3)) ^ extract_byte(key[1],3)) ^ extract_byte(key[0],3) #define q40(x) q(0,q(0,q(1, q(1, x) ^ extract_byte(key[3],0)) ^ extract_byte(key[2],0)) ^ extract_byte(key[1],0)) ^ extract_byte(key[0],0) #define q41(x) q(0,q(1,q(1, q(0, x) ^ extract_byte(key[3],1)) ^ extract_byte(key[2],1)) ^ extract_byte(key[1],1)) ^ extract_byte(key[0],1) #define q42(x) q(1,q(0,q(0, q(0, x) ^ extract_byte(key[3],2)) ^ extract_byte(key[2],2)) ^ extract_byte(key[1],2)) ^ extract_byte(key[0],2) #define q43(x) q(1,q(1,q(0, q(1, x) ^ extract_byte(key[3],3)) ^ extract_byte(key[2],3)) ^ extract_byte(key[1],3)) ^ extract_byte(key[0],3) __device__ void gen_mk_tab(TwofishInstance *instance, u4byte key[], u1byte** q_tab, u4byte** m_tab) { u4byte i; u1byte by; u4byte *mk_tab = instance->mk_tab; switch(instance->k_len) { case 2: for(i = 0; i < 256; ++i) { by = (u1byte)i; mk_tab[0 + 4*i] = mds(0, q20(by)); mk_tab[1 + 4*i] = mds(1, q21(by)); mk_tab[2 + 4*i] = mds(2, q22(by)); mk_tab[3 + 4*i] = mds(3, q23(by)); } break; case 3: for(i = 0; i < 256; ++i) { by = (u1byte)i; mk_tab[0 + 4*i] = mds(0, q30(by)); mk_tab[1 + 4*i] = mds(1, q31(by)); mk_tab[2 + 4*i] = mds(2, q32(by)); mk_tab[3 + 4*i] = mds(3, q33(by)); } break; case 4: for(i = 0; i < 256; ++i) { by = (u1byte)i; mk_tab[0 + 4*i] = mds(0, q40(by)); mk_tab[1 + 4*i] = mds(1, q41(by)); mk_tab[2 + 4*i] = mds(2, q42(by)); mk_tab[3 + 4*i] = mds(3, q43(by)); } } } __device__ u1byte qp(const u4byte n, const u1byte x) { u1byte a0, a1, a2, a3, a4, b0, b1, b2, b3, b4; a0 = x >> 4; b0 = x & 15; a1 = a0 ^ b0; b1 = ror4[b0] ^ ashx[a0]; a2 = qt0[n][a1]; b2 = qt1[n][b1]; a3 = a2 ^ b2; b3 = ror4[b2] ^ ashx[a2]; a4 = qt2[n][a3]; b4 = qt3[n][b3]; return (b4 << 4) | a4; } __device__ u4byte h_fun(TwofishInstance *instance, const u4byte x, const u4byte key[], u1byte** q_tab, u4byte** m_tab) { u4byte b0, b1, b2, b3; b0 = extract_byte(x, 0); b1 = extract_byte(x, 1); b2 = extract_byte(x, 2); b3 = extract_byte(x, 3); switch(instance->k_len) { case 4: b0 = q(1, (u1byte) b0) ^ extract_byte(key[3],0); b1 = q(0, (u1byte) b1) ^ extract_byte(key[3],1); b2 = q(0, (u1byte) b2) ^ extract_byte(key[3],2); b3 = q(1, (u1byte) b3) ^ extract_byte(key[3],3); case 3: b0 = q(1, (u1byte) b0) ^ extract_byte(key[2],0); b1 = q(1, (u1byte) b1) ^ extract_byte(key[2],1); b2 = q(0, (u1byte) b2) ^ extract_byte(key[2],2); b3 = q(0, (u1byte) b3) ^ extract_byte(key[2],3); case 2: b0 = q(0, (u1byte) (q(0, (u1byte) b0) ^ extract_byte(key[1],0))) ^ extract_byte(key[0],0); b1 = q(0, (u1byte) (q(1, (u1byte) b1) ^ extract_byte(key[1],1))) ^ extract_byte(key[0],1); b2 = q(1, (u1byte) (q(0, (u1byte) b2) ^ extract_byte(key[1],2))) ^ extract_byte(key[0],2); b3 = q(1, (u1byte) (q(1, (u1byte) b3) ^ extract_byte(key[1],3))) ^ extract_byte(key[0],3); } return mds(0, b0) ^ mds(1, b1) ^ mds(2, b2) ^ mds(3, b3); } __device__ void gen_qtab(u1byte** q_tab) { u4byte i; for(i = 0; i < 256; ++i) { q(0,i) = qp(0, (u1byte)i); q(1,i) = qp(1, (u1byte)i); } } #define ffm_5b(x) ((x) ^ ((x) >> 2) ^ tab_5b[(x) & 3]) #define ffm_ef(x) ((x) ^ ((x) >> 1) ^ ((x) >> 2) ^ tab_ef[(x) & 3]) __device__ void gen_mtab(u1byte** q_tab, u4byte** m_tab) { u4byte i, f01, f5b, fef; for(i = 0; i < 256; ++i) { f01 = q(1,i); f5b = ffm_5b(f01); fef = ffm_ef(f01); m_tab[0][i] = f01 + (f5b << 8) + (fef << 16) + (fef << 24); m_tab[2][i] = f5b + (fef << 8) + (f01 << 16) + (fef << 24); f01 = q(0,i); f5b = ffm_5b(f01); fef = ffm_ef(f01); m_tab[1][i] = fef + (fef << 8) + (f5b << 16) + (f01 << 24); m_tab[3][i] = f5b + (f01 << 8) + (fef << 16) + (f5b << 24); } } /* initialise the key schedule from the user supplied key */ __device__ u4byte *twofish_set_key(TwofishInstance *instance, const u4byte in_key[], const u4byte key_len, u1byte** q_tab, u4byte** m_tab, u4byte* qt_gen, u4byte* mt_gen) { u4byte i, a, b, me_key[4], mo_key[4]; u4byte *l_key, *s_key; l_key = instance->l_key; s_key = instance->s_key; if(!*qt_gen) { gen_qtab(q_tab); *qt_gen = 1; } if(!*mt_gen) { gen_mtab(q_tab, m_tab); *mt_gen = 1; } instance->k_len = key_len / 64; /* 2, 3 or 4 */ for(i = 0; i < instance->k_len; ++i) { a = in_key[i + i]; me_key[i] = a; b = in_key[i + i + 1]; mo_key[i] = b; s_key[instance->k_len - i - 1] = mds_rem(a, b); } for(i = 0; i < 40; i += 2) { a = 0x01010101 * i; b = a + 0x01010101; a = h_fun(instance, a, me_key, q_tab, m_tab); b = rotl(h_fun(instance, b, mo_key, q_tab, m_tab), 8); l_key[i] = a + b; l_key[i + 1] = rotl(a + 2 * b, 9); } gen_mk_tab(instance, s_key, q_tab, m_tab); return l_key; } #define ROUNDS 16 #define GETBYTE(x, y) (unsigned int)(byte)((x)>>(8*(y))) __device__ void crypt_block(BF_KEY *key, const word32 in[2], word32 out[2]) { word32 left = in[0]; word32 right = in[1]; const word32 *const s=key->sbox; const word32 *p=key->pbox; unsigned i; left ^= p[0]; for (i=0; i<ROUNDS/2; i++) { right ^= (((s[GETBYTE(left,3)] + s[256+GETBYTE(left,2)]) ^ s[2*256+GETBYTE(left,1)]) + s[3*256+GETBYTE(left,0)]) ^ p[2*i+1]; left ^= (((s[GETBYTE(right,3)] + s[256+GETBYTE(right,2)]) ^ s[2*256+GETBYTE(right,1)]) + s[3*256+GETBYTE(right,0)]) ^ p[2*i+2]; } right ^= p[ROUNDS+1]; out[0] = right; out[1] = left; } __device__ void BlowfishSetKey (BF_KEY *key, int keylength, unsigned char *key_string) { unsigned i, j=0, k; word32 data, dspace[2] = {0, 0}; word32 *sbox = key->sbox; word32 *pbox = key->pbox; memcpy(pbox, p_init, sizeof(p_init)); memcpy(sbox, s_init, sizeof(s_init)); // Xor key string into encryption key vector for (i=0 ; i<ROUNDS+2 ; ++i) { data = 0 ; for (k=0 ; k<4 ; ++k ) data = (data << 8) | key_string[j++ % keylength]; pbox[i] ^= data; } crypt_block(key, dspace, pbox); for (i=0; i<ROUNDS; i+=2) crypt_block(key, pbox+i, pbox+i+2); crypt_block(key, pbox+ROUNDS, sbox); for (i=0; i<4*256-2; i+=2) crypt_block(key, sbox+i, sbox+i+2); for (i=0; i < ROUNDS+2; i++) key->pbox_dec[ROUNDS+1-i] = pbox[i]; } __device__ u32 MirrorBytes32 (u32 x) { u32 n = (u8) x; n <<= 8; n |= (u8) (x >> 8); n <<= 8; n |= (u8) (x >> 16); return (n << 8) | (u8) (x >> 24); } __device__ uint64 MirrorBytes64 (uint64 x) { uint64 n = (u8) x; n <<= 8; n |= (u8) (x >> 8); n <<= 8; n |= (u8) (x >> 16); n <<= 8; n |= (u8) (x >> 24); n <<= 8; n |= (u8) (x >> 32); n <<= 8; n |= (u8) (x >> 40); n <<= 8; n |= (u8) (x >> 48); return (n << 8) | (u8) (x >> 56); } #define BE32(x) MirrorBytes32(x) #define BE64(x) MirrorBytes64(x) __device__ void Cast5SetKey (CAST_KEY *key, unsigned int keylength, const byte *userKey) { unsigned int i; word32 *K = key->K; word32 X[4], Z[4]; X[0] = BE32 (((word32 *)userKey)[0]); X[1] = BE32 (((word32 *)userKey)[1]); X[2] = BE32 (((word32 *)userKey)[2]); X[3] = BE32 (((word32 *)userKey)[3]); #define x(i) GETBYTE(X[i/4], 3-i%4) #define z(i) GETBYTE(Z[i/4], 3-i%4) for (i=0; i<=16; i+=16) { // this part is copied directly from RFC 2144 (with some search and replace) by Wei Dai Z[0] = X[0] ^ S[4][x(0xD)] ^ S[5][x(0xF)] ^ S[6][x(0xC)] ^ S[7][x(0xE)] ^ S[6][x(0x8)]; Z[1] = X[2] ^ S[4][z(0x0)] ^ S[5][z(0x2)] ^ S[6][z(0x1)] ^ S[7][z(0x3)] ^ S[7][x(0xA)]; Z[2] = X[3] ^ S[4][z(0x7)] ^ S[5][z(0x6)] ^ S[6][z(0x5)] ^ S[7][z(0x4)] ^ S[4][x(0x9)]; Z[3] = X[1] ^ S[4][z(0xA)] ^ S[5][z(0x9)] ^ S[6][z(0xB)] ^ S[7][z(0x8)] ^ S[5][x(0xB)]; K[i+0] = S[4][z(0x8)] ^ S[5][z(0x9)] ^ S[6][z(0x7)] ^ S[7][z(0x6)] ^ S[4][z(0x2)]; K[i+1] = S[4][z(0xA)] ^ S[5][z(0xB)] ^ S[6][z(0x5)] ^ S[7][z(0x4)] ^ S[5][z(0x6)]; K[i+2] = S[4][z(0xC)] ^ S[5][z(0xD)] ^ S[6][z(0x3)] ^ S[7][z(0x2)] ^ S[6][z(0x9)]; K[i+3] = S[4][z(0xE)] ^ S[5][z(0xF)] ^ S[6][z(0x1)] ^ S[7][z(0x0)] ^ S[7][z(0xC)]; X[0] = Z[2] ^ S[4][z(0x5)] ^ S[5][z(0x7)] ^ S[6][z(0x4)] ^ S[7][z(0x6)] ^ S[6][z(0x0)]; X[1] = Z[0] ^ S[4][x(0x0)] ^ S[5][x(0x2)] ^ S[6][x(0x1)] ^ S[7][x(0x3)] ^ S[7][z(0x2)]; X[2] = Z[1] ^ S[4][x(0x7)] ^ S[5][x(0x6)] ^ S[6][x(0x5)] ^ S[7][x(0x4)] ^ S[4][z(0x1)]; X[3] = Z[3] ^ S[4][x(0xA)] ^ S[5][x(0x9)] ^ S[6][x(0xB)] ^ S[7][x(0x8)] ^ S[5][z(0x3)]; K[i+4] = S[4][x(0x3)] ^ S[5][x(0x2)] ^ S[6][x(0xC)] ^ S[7][x(0xD)] ^ S[4][x(0x8)]; K[i+5] = S[4][x(0x1)] ^ S[5][x(0x0)] ^ S[6][x(0xE)] ^ S[7][x(0xF)] ^ S[5][x(0xD)]; K[i+6] = S[4][x(0x7)] ^ S[5][x(0x6)] ^ S[6][x(0x8)] ^ S[7][x(0x9)] ^ S[6][x(0x3)]; K[i+7] = S[4][x(0x5)] ^ S[5][x(0x4)] ^ S[6][x(0xA)] ^ S[7][x(0xB)] ^ S[7][x(0x7)]; Z[0] = X[0] ^ S[4][x(0xD)] ^ S[5][x(0xF)] ^ S[6][x(0xC)] ^ S[7][x(0xE)] ^ S[6][x(0x8)]; Z[1] = X[2] ^ S[4][z(0x0)] ^ S[5][z(0x2)] ^ S[6][z(0x1)] ^ S[7][z(0x3)] ^ S[7][x(0xA)]; Z[2] = X[3] ^ S[4][z(0x7)] ^ S[5][z(0x6)] ^ S[6][z(0x5)] ^ S[7][z(0x4)] ^ S[4][x(0x9)]; Z[3] = X[1] ^ S[4][z(0xA)] ^ S[5][z(0x9)] ^ S[6][z(0xB)] ^ S[7][z(0x8)] ^ S[5][x(0xB)]; K[i+8] = S[4][z(0x3)] ^ S[5][z(0x2)] ^ S[6][z(0xC)] ^ S[7][z(0xD)] ^ S[4][z(0x9)]; K[i+9] = S[4][z(0x1)] ^ S[5][z(0x0)] ^ S[6][z(0xE)] ^ S[7][z(0xF)] ^ S[5][z(0xC)]; K[i+10] = S[4][z(0x7)] ^ S[5][z(0x6)] ^ S[6][z(0x8)] ^ S[7][z(0x9)] ^ S[6][z(0x2)]; K[i+11] = S[4][z(0x5)] ^ S[5][z(0x4)] ^ S[6][z(0xA)] ^ S[7][z(0xB)] ^ S[7][z(0x6)]; X[0] = Z[2] ^ S[4][z(0x5)] ^ S[5][z(0x7)] ^ S[6][z(0x4)] ^ S[7][z(0x6)] ^ S[6][z(0x0)]; X[1] = Z[0] ^ S[4][x(0x0)] ^ S[5][x(0x2)] ^ S[6][x(0x1)] ^ S[7][x(0x3)] ^ S[7][z(0x2)]; X[2] = Z[1] ^ S[4][x(0x7)] ^ S[5][x(0x6)] ^ S[6][x(0x5)] ^ S[7][x(0x4)] ^ S[4][z(0x1)]; X[3] = Z[3] ^ S[4][x(0xA)] ^ S[5][x(0x9)] ^ S[6][x(0xB)] ^ S[7][x(0x8)] ^ S[5][z(0x3)]; K[i+12] = S[4][x(0x8)] ^ S[5][x(0x9)] ^ S[6][x(0x7)] ^ S[7][x(0x6)] ^ S[4][x(0x3)]; K[i+13] = S[4][x(0xA)] ^ S[5][x(0xB)] ^ S[6][x(0x5)] ^ S[7][x(0x4)] ^ S[5][x(0x7)]; K[i+14] = S[4][x(0xC)] ^ S[5][x(0xD)] ^ S[6][x(0x3)] ^ S[7][x(0x2)] ^ S[6][x(0x8)]; K[i+15] = S[4][x(0xE)] ^ S[5][x(0xF)] ^ S[6][x(0x1)] ^ S[7][x(0x0)] ^ S[7][x(0xD)]; } for (i=16; i<32; i++) K[i] &= 0x1f; } /* Set key (initialize key schedule array) */ __device__ void RawSetKey (int encryption, const byte *key, word32 *scheduledKey) { byte buffer[56+56+8]; byte *const pc1m=buffer; /* place to modify pc1 into */ byte *const pcr=pc1m+56; /* place to rotate pc1 into */ byte *const ks=pcr+56; int i,j,l; int m; for (j=0; j<56; j++) { /* convert pc1 to bits of key */ l=pc1[j]-1; /* integer bit location */ m = l & 07; /* find bit */ pc1m[j]=(key[l>>3] & /* find which key byte l is in */ bytebit[m]) /* and which bit of that byte */ ? 1 : 0; /* and store 1-bit result */ } for (i=0; i<16; i++) { /* key chunk for each iteration */ memset(ks,0,8); /* Clear key schedule */ for (j=0; j<56; j++) /* rotate pc1 the right amount */ pcr[j] = pc1m[(l=j+totrot[i])<(j<28? 28 : 56) ? l: l-28]; /* rotate left and right halves independently */ for (j=0; j<48; j++){ /* select bits individually */ /* check bit that goes to ks[j] */ if (pcr[pc2[j]-1]){ /* mask it in if it's there */ l= j % 6; ks[j/6] |= bytebit[l] >> 2; } } /* Now convert to odd/even interleaved form for use in F */ scheduledKey[2*i] = ((word32)ks[0] << 24) | ((word32)ks[2] << 16) | ((word32)ks[4] << 8) | ((word32)ks[6]); scheduledKey[2*i+1] = ((word32)ks[1] << 24) | ((word32)ks[3] << 16) | ((word32)ks[5] << 8) | ((word32)ks[7]); } if (!encryption) // reverse key schedule order for (i=0; i<16; i+=2) { word32 b = scheduledKey[i]; scheduledKey[i] = scheduledKey[32-2-i]; scheduledKey[32-2-i] = b; b = scheduledKey[i+1]; scheduledKey[i+1] = scheduledKey[32-1-i]; scheduledKey[32-1-i] = b; } burn (buffer, sizeof (buffer)); } __device__ void TripleDesSetKey (const byte *userKey, unsigned int length, TDES_KEY *ks) { TDES_KEY *as = ks; RawSetKey (1, userKey + 0, as->k1); RawSetKey (1, userKey + 8, ks->k2); RawSetKey (1, userKey + 16, ks->k3); RawSetKey (0, userKey + 16, ks->k1d); RawSetKey (0, userKey + 8, ks->k2d); RawSetKey (0, userKey + 0, ks->k3d); } /* Return values: 0 = success, ERR_CIPHER_INIT_FAILURE (fatal), ERR_CIPHER_INIT_WEAK_KEY (non-fatal) */ __device__ int CipherInit (int cipher, unsigned char *key, u8 *ks, u1byte** q_tab, u4byte** m_tab, u4byte* qt_gen, u4byte* mt_gen) { int retVal = ERR_SUCCESS; switch (cipher) { case AES: if (aes_encrypt_key256 (key, (aes_encrypt_ctx *) ks) != EXIT_SUCCESS) return ERR_CIPHER_INIT_FAILURE; if (aes_decrypt_key256 (key, (aes_decrypt_ctx *) (ks + sizeof(aes_encrypt_ctx))) != EXIT_SUCCESS) return ERR_CIPHER_INIT_FAILURE; break; case SERPENT: serpent_set_key (key, CipherGetKeySize(SERPENT) * 8, ks); break; case TWOFISH: twofish_set_key ((TwofishInstance *)ks, (const u4byte *)key, CipherGetKeySize(TWOFISH) * 8, q_tab, m_tab, qt_gen, mt_gen); // FIXME: crash here break; case BLOWFISH: /* Deprecated/legacy */ BlowfishSetKey ((BF_KEY *)ks, CipherGetKeySize(BLOWFISH), key); break; case CAST: /* Deprecated/legacy */ Cast5SetKey ((CAST_KEY *) ks, CipherGetKeySize(CAST), key); break; case TRIPLEDES: /* Deprecated/legacy */ TripleDesSetKey (key, CipherGetKeySize (TRIPLEDES), (TDES_KEY *) ks); // Verify whether all three DES keys are mutually different if (((*((int64 *) key) ^ *((int64 *) key+1)) & 0xFEFEFEFEFEFEFEFEULL) == 0 || ((*((int64 *) key+1) ^ *((int64 *) key+2)) & 0xFEFEFEFEFEFEFEFEULL) == 0 || ((*((int64 *) key) ^ *((int64 *) key+2)) & 0xFEFEFEFEFEFEFEFEULL) == 0) retVal = ERR_CIPHER_INIT_WEAK_KEY; // Non-fatal error break; default: // Unknown/wrong cipher ID return ERR_CIPHER_INIT_FAILURE; } return retVal; } // Return values: 0 = success, ERR_CIPHER_INIT_FAILURE (fatal), ERR_CIPHER_INIT_WEAK_KEY (non-fatal) __device__ int EAInit (int ea, unsigned char *key, u8 *ks, u1byte** q_tab, u4byte** m_tab, u4byte* qt_gen, u4byte* mt_gen) { int c, retVal = ERR_SUCCESS; if (ea == 0) return ERR_CIPHER_INIT_FAILURE; for (c = EAGetFirstCipher (ea); c != 0; c = EAGetNextCipher (ea, c)) { switch (CipherInit (c, key, ks, q_tab, m_tab, qt_gen, mt_gen)) { case ERR_CIPHER_INIT_FAILURE: return ERR_CIPHER_INIT_FAILURE; case ERR_CIPHER_INIT_WEAK_KEY: retVal = ERR_CIPHER_INIT_WEAK_KEY; // Non-fatal error break; } key += CipherGetKeySize (c); ks += CipherGetKeyScheduleSize (c); } return retVal; } __device__ int IsBitSet128 (unsigned int bit, u8 *a) { return a[(127 - bit) / 8] & (0x80 >> ((127 - bit) % 8)); } __device__ int IsBitSet64 (unsigned int bit, u8 *a) { return a[(63 - bit) / 8] & (0x80 >> ((63 - bit) % 8)); } __device__ void SetBit128 (unsigned int bit, u8 *a) { a[(127 - bit) / 8] |= 0x80 >> ((127 - bit) % 8); } __device__ void SetBit64 (unsigned int bit, u8 *a) { a[(63 - bit) / 8] |= 0x80 >> ((63 - bit) % 8); } __device__ void MirrorBits128 (u8 *a) { u8 t[128 / 8]; int i; memset (t,0,16); for (i = 0; i < 128; i++) { if (IsBitSet128(i, a)) SetBit128 (127 - i, t); } memcpy (a, t, sizeof (t)); burn (t,sizeof (t)); } __device__ void MirrorBits64 (u8 *a) { u8 t[64 / 8]; int i; memset (t,0,8); for (i = 0; i < 64; i++) { if (IsBitSet64(i, a)) SetBit64 (63 - i, t); } memcpy (a, t, sizeof (t)); burn (t,sizeof (t)); } /* Multiply of a GF128 field element by x. The field element */ /* is held in an array of bytes in which field bits 8n..8n + 7 */ /* are held in byte[n], with lower indexed bits placed in the */ /* more numerically significant bit positions in bytes. */ /* This function multiples a field element x, in the polynomial */ /* field representation. It uses 32-bit word operations to gain */ /* speed but compensates for machine endianess and hence works */ /* correctly on both styles of machine */ __device__ in_line void mul_x(mode(32t) x[4]) { mode(32t) t; bsw_32(x, 4); /* at this point the filed element bits 0..127 are set out */ /* as follows in 32-bit words (where the most significant */ /* (ms) numeric bits are to the left) */ /* */ /* x[0] x[1] x[2] x[3] */ /* ms ls ms ls ms ls ms ls */ /* field: 0 ... 31 32 .. 63 64 .. 95 96 .. 127 */ t = gf_poly[x[3] & 1]; /* bit 127 of the element */ x[3] = (x[3] >> 1) | (x[2] << 31); /* shift bits up by one */ x[2] = (x[2] >> 1) | (x[1] << 31); /* position */ x[1] = (x[1] >> 1) | (x[0] << 31); /* if bit 7 is 1 xor in */ x[0] = (x[0] >> 1) ^ t; /* the field polynomial */ bsw_32(x, 4); } __device__ in_line void mul_lex8(mode(32t) x[4]) /* mutiply with long words */ { mode(32t) t = (x[3] >> 24); /* in little endian format */ x[3] = (x[3] << 8) | (x[2] >> 24); x[2] = (x[2] << 8) | (x[1] >> 24); x[1] = (x[1] << 8) | (x[0] >> 24); x[0] = (x[0] << 8) ^ gft_le[t]; } __device__ in_line void mul_x64(mode(32t) x[2]) { mode(32t) t; bsw_32(x, 2); /* at this point the filed element bits 0..127 are set out */ /* as follows in 32-bit words (where the most significant */ /* (ms) numeric bits are to the left) */ /* */ /* x[0] x[1] x[2] x[3] */ /* ms ls ms ls ms ls ms ls */ /* field: 0 ... 31 32 .. 63 64 .. 95 96 .. 127 */ t = gf_poly64[x[1] & 1]; /* bit 127 of the element */ /* shift bits up by one */ /* position */ x[1] = (x[1] >> 1) | (x[0] << 31); /* if bit 7 is 1 xor in */ x[0] = (x[0] >> 1) ^ t; /* the field polynomial */ bsw_32(x, 2); } __device__ in_line void mul_lex8_64(mode(32t) x[2]) /* mutiply with long words */ { mode(32t) t = (x[1] >> 24); /* in little endian format */ x[1] = (x[1] << 8) | (x[0] >> 24); x[0] = (x[0] << 8) ^ gft_le64[t]; } #define mul_x8 mul_lex8 #define mul_x8_64 mul_lex8_64 __device__ void compile_8k_table(u8 *a, GfCtx8k *ctx) { int i, j, k; memset(ctx->gf_t8k, 0, 32 * 16 * 16); for(i = 0; i < 2 * CBLK_LEN; ++i) { if(i == 0) { memcpy(ctx->gf_t8k[1][8], a, CBLK_LEN); for(j = 4; j > 0; j >>= 1) { memcpy(ctx->gf_t8k[1][j], ctx->gf_t8k[1][j + j], CBLK_LEN); mul_x(ctx->gf_t8k[1][j]); } memcpy(ctx->gf_t8k[0][8], ctx->gf_t8k[1][1], CBLK_LEN); mul_x(ctx->gf_t8k[0][8]); for(j = 4; j > 0; j >>= 1) { memcpy(ctx->gf_t8k[0][j], ctx->gf_t8k[0][j + j], CBLK_LEN); mul_x(ctx->gf_t8k[0][j]); } } else if(i > 1) for(j = 8; j > 0; j >>= 1) { memcpy(ctx->gf_t8k[i][j], ctx->gf_t8k[i - 2][j], CBLK_LEN); mul_x8(ctx->gf_t8k[i][j]); } for(j = 2; j < 16; j += j) { mode(32t) *pj = ctx->gf_t8k[i][j]; mode(32t) *pk = ctx->gf_t8k[i][1]; mode(32t) *pl = ctx->gf_t8k[i][j + 1]; for(k = 1; k < j; ++k) { *pl++ = pj[0] ^ *pk++; *pl++ = pj[1] ^ *pk++; *pl++ = pj[2] ^ *pk++; *pl++ = pj[3] ^ *pk++; } } } } __device__ void compile_4k_table64(u8 *a, GfCtx4k64 *ctx) { int i, j, k; memset(ctx->gf_t4k, 0, sizeof(ctx->gf_t4k)); for(i = 0; i < 2 * CBLK_LEN8; ++i) { if(i == 0) { memcpy(ctx->gf_t4k[1][8], a, CBLK_LEN8); for(j = 4; j > 0; j >>= 1) { memcpy(ctx->gf_t4k[1][j], ctx->gf_t4k[1][j + j], CBLK_LEN8); mul_x64(ctx->gf_t4k[1][j]); } memcpy(ctx->gf_t4k[0][8], ctx->gf_t4k[1][1], CBLK_LEN8); mul_x64(ctx->gf_t4k[0][8]); for(j = 4; j > 0; j >>= 1) { memcpy(ctx->gf_t4k[0][j], ctx->gf_t4k[0][j + j], CBLK_LEN8); mul_x64(ctx->gf_t4k[0][j]); } } else if(i > 1) for(j = 8; j > 0; j >>= 1) { memcpy(ctx->gf_t4k[i][j], ctx->gf_t4k[i - 2][j], CBLK_LEN8); mul_x8_64(ctx->gf_t4k[i][j]); } for(j = 2; j < 16; j += j) { mode(32t) *pj = ctx->gf_t4k[i][j]; mode(32t) *pk = ctx->gf_t4k[i][1]; mode(32t) *pl = ctx->gf_t4k[i][j + 1]; for(k = 1; k < j; ++k) { *pl++ = pj[0] ^ *pk++; *pl++ = pj[1] ^ *pk++; *pl++ = pj[2] ^ *pk++; *pl++ = pj[3] ^ *pk++; } } } } /* Allocate and initialize speed optimization table for multiplication by 64-bit operand in MSB-first mode */ __device__ int Gf128Tab64Init (u8 *a, GfCtx *ctx) { GfCtx8k ctx8k; u8 am[16]; int i, j; memcpy (am, a, 16); MirrorBits128 (am); compile_8k_table (am, &ctx8k); /* Convert 8k LSB-first table to 4k MSB-first */ for (i = 16; i < 32; i++) { for (j = 0; j < 16; j++) { int jm = 0; jm |= (j & 0x1) << 3; jm |= (j & 0x2) << 1; jm |= (j & 0x4) >> 1; jm |= (j & 0x8) >> 3; memcpy (&ctx->gf_t128[i-16][jm], (unsigned char *)&ctx8k.gf_t8k[31-i][j], 16); MirrorBits128 ((unsigned char *)&ctx->gf_t128[i-16][jm]); } } burn (am, sizeof (am)); return TRUE; } __device__ int Gf64TabInit (u8 *a, GfCtx *ctx) { /* Deprecated/legacy */ GfCtx4k64 ctx4k; u8 am[8]; int i, j; memcpy (am, a, 8); MirrorBits64 (am); compile_4k_table64 (am, &ctx4k); /* Convert LSB-first table to MSB-first */ for (i = 0; i < 16; i++) { for (j = 0; j < 16; j++) { int jm = 0; jm |= (j & 0x1) << 3; jm |= (j & 0x2) << 1; jm |= (j & 0x4) >> 1; jm |= (j & 0x8) >> 3; memcpy (&ctx->gf_t64[i][jm], (unsigned char *)&ctx4k.gf_t4k[15-i][j], 8); MirrorBits64 ((unsigned char *)&ctx->gf_t64[i][jm]); } } burn (am, sizeof (am)); return TRUE; } __device__ BOOL EAInitMode (PCRYPTO_INFO ci, u1byte** q_tab, u4byte** m_tab, u4byte* qt_gen, u4byte* mt_gen) { switch (ci->mode) { case XTS: // Secondary key schedule if (EAInit (ci->ea, ci->k2, ci->ks2, q_tab, m_tab, qt_gen, mt_gen) != ERR_SUCCESS) return FALSE; /* Note: XTS mode could potentially be initialized with a weak key causing all blocks in one data unit on the volume to be tweaked with zero tweaks (i.e. 512 bytes of the volume would be encrypted in ECB mode). However, to create a TrueCrypt volume with such a weak key, each human being on Earth would have to create approximately 11,378,125,361,078,862 (about eleven quadrillion) TrueCrypt volumes (provided that the size of each of the volumes is 1024 terabytes). */ break; case LRW: switch (CipherGetBlockSize (EAGetFirstCipher (ci->ea))) { case 8: /* Deprecated/legacy */ return Gf64TabInit (ci->k2, &ci->gf_ctx); case 16: return Gf128Tab64Init (ci->k2, &ci->gf_ctx); default: TC_THROW_FATAL_EXCEPTION; } break; case CBC: case INNER_CBC: case OUTER_CBC: // The mode does not need to be initialized or is initialized elsewhere return TRUE; default: // Unknown/wrong ID TC_THROW_FATAL_EXCEPTION; } return TRUE; } __device__ uint32 GetHeaderField32 (byte *header, int offset) { return BE32 (*(uint32 *) (header + offset)); } #define etab_0(x) t_fn[0][x] #define etab_1(x) t_fn[1][x] #define etab_2(x) t_fn[2][x] #define etab_3(x) t_fn[3][x] #define eltab_0(x) t_fl[0][x] #define eltab_1(x) t_fl[1][x] #define eltab_2(x) t_fl[2][x] #define eltab_3(x) t_fl[3][x] #define eltab(n, x) eltab_##n(x) __device__ void enc_round_c (uint_32t* eax, uint_32t* ebx, uint_32t* ecx, uint_32t* edx, uint_32t* ebp) { uint_32t key1, key2, tmp, esi, edi; uint_8t* ax = (uint_8t*)eax, *bx = (uint_8t*)ebx, *cx = (uint_8t*)ecx, *dx = (uint_8t*)edx; // ebp - key pointer, eax, ebx, ecx, edx - each contain 4 bytes (int) of outBlock, esi - 3rd int in key, edi - 4th int in key key1 = ebp[0]; key2 = ebp[1]; esi = ebp[2]; edi = ebp[3]; /* begin rnd_fun */ *ebx = (*ebx << 16) | (*ebx >> 16); // al - 0, ah - 1, bl - 4, bh - 5, cl - 8, ch - 9, dl - 12, dh - 13 esi ^= etab_0(cx[0]); esi ^= etab_1(dx[1]); esi ^= etab_3(bx[1]); edi ^= etab_0(dx[0]); edi ^= etab_1(ax[1]); edi ^= etab_2(bx[0]); tmp = etab_0(ax[0]); // ebp (restored later) *ebx >>= 16; *eax &= 0xffff0000; *eax |= *ebx; *edx >>= 16; tmp ^= etab_1(ax[1]); tmp ^= etab_3(dx[1]); *ebx = etab_2(dx[0]); // ebx *ebx ^= etab_1(cx[1]); *ebx ^= etab_0(ax[0]); *eax >>= 16; *ecx >>= 16; tmp ^= etab_2(cx[0]); edi ^= etab_3(cx[1]); esi ^= etab_2(ax[0]); *ebx ^= etab_3(ax[1]); /* end rnd_fun */ *eax = tmp; *ecx = esi; *edx = edi; *eax ^= key1; *ebx ^= key2; } __device__ void enc_last_round_c (uint_32t* eax, uint_32t* ebx, uint_32t* ecx, uint_32t* edx, uint_32t* ebp) { uint_32t key1, key2, tmp, esi, edi; uint_8t* ax = (uint_8t*)eax, *bx = (uint_8t*)ebx, *cx = (uint_8t*)ecx, *dx = (uint_8t*)edx; // ebp - key pointer, eax, ebx, ecx, edx - each contain 4 bytes (int) of outBlock, esi - 3rd int in key, edi - 4th int in key key1 = ebp[0]; key2 = ebp[1]; esi = ebp[2]; edi = ebp[3]; /* begin rnd_fun */ *ebx = (*ebx << 16) | (*ebx >> 16); // al - 0, ah - 1, bl - 4, bh - 5, cl - 8, ch - 9, dl - 12, dh - 13 esi ^= eltab(0, cx[0]); esi ^= eltab(1, dx[1]); esi ^= eltab(3, bx[1]); edi ^= eltab(0, dx[0]); edi ^= eltab(1, ax[1]); edi ^= eltab(2, bx[0]); tmp = eltab(0, ax[0]); // ebp (restored later) *ebx >>= 16; *eax &= 0xffff0000; *eax |= *ebx; *edx >>= 16; tmp ^= eltab(1, ax[1]); tmp ^= eltab(3, dx[1]); *ebx = eltab(2, dx[0]); // ebx *ebx ^= eltab(1, cx[1]); *ebx ^= eltab(0, ax[0]); *eax >>= 16; *ecx >>= 16; tmp ^= eltab(2, cx[0]); edi ^= eltab(3, cx[1]); esi ^= eltab(2, ax[0]); *ebx ^= eltab(3, ax[1]); /* end rnd_fun */ *eax = tmp; *ecx = esi; *edx = edi; *eax ^= key1; *ebx ^= key2; } __device__ AES_RETURN aes_encrypt_c(const u8 *inBlock, u8 *outBlock, void *ks) { uint_32t* kp = (uint_32t *)ks; // key pointer uint_32t inf = *(kp + KS_LENGTH); uint_32t* o = (uint_32t*)outBlock; unsigned int i; // xor 4 bytes in inBlock with 4 bytes in ks, 4 times, store result in outBlock o[0] = ((uint_32t*)inBlock)[0] ^ kp[0]; o[1] = ((uint_32t*)inBlock)[1] ^ kp[1]; o[2] = ((uint_32t*)inBlock)[2] ^ kp[2]; o[3] = ((uint_32t*)inBlock)[3] ^ kp[3]; if (inf == 10 * 16 || inf == 12 * 16 || inf == 14 * 16) { for (i = 0; i < inf >> 4; i++) { kp += 4; if (i < (inf >> 4) - 1) enc_round_c(&o[0], &o[1], &o[2], &o[3], kp); else enc_last_round_c(&o[0], &o[1], &o[2], &o[3], kp); } } else { // error return EXIT_FAILURE; } return EXIT_SUCCESS; } #define g0_fun(x) ( mk_tab[0 + 4*extract_byte(x,0)] ^ mk_tab[1 + 4*extract_byte(x,1)] \ ^ mk_tab[2 + 4*extract_byte(x,2)] ^ mk_tab[3 + 4*extract_byte(x,3)] ) #define g1_fun(x) ( mk_tab[0 + 4*extract_byte(x,3)] ^ mk_tab[1 + 4*extract_byte(x,0)] \ ^ mk_tab[2 + 4*extract_byte(x,1)] ^ mk_tab[3 + 4*extract_byte(x,2)] ) #define f_rnd(i) \ t1 = g1_fun(blk[1]); t0 = g0_fun(blk[0]); \ blk[2] = rotr(blk[2] ^ (t0 + t1 + l_key[4 * (i) + 8]), 1); \ blk[3] = rotl(blk[3], 1) ^ (t0 + 2 * t1 + l_key[4 * (i) + 9]); \ t1 = g1_fun(blk[3]); t0 = g0_fun(blk[2]); \ blk[0] = rotr(blk[0] ^ (t0 + t1 + l_key[4 * (i) + 10]), 1); \ blk[1] = rotl(blk[1], 1) ^ (t0 + 2 * t1 + l_key[4 * (i) + 11]) __device__ void twofish_encrypt(TwofishInstance *instance, const u4byte in_blk[4], u4byte out_blk[]) { u4byte t0, t1, blk[4]; u4byte *l_key = instance->l_key; u4byte *mk_tab = instance->mk_tab; blk[0] = in_blk[0] ^ l_key[0]; blk[1] = in_blk[1] ^ l_key[1]; blk[2] = in_blk[2] ^ l_key[2]; blk[3] = in_blk[3] ^ l_key[3]; f_rnd(0); f_rnd(1); f_rnd(2); f_rnd(3); f_rnd(4); f_rnd(5); f_rnd(6); f_rnd(7); out_blk[0] = blk[2] ^ l_key[4]; out_blk[1] = blk[3] ^ l_key[5]; out_blk[2] = blk[0] ^ l_key[6]; out_blk[3] = blk[1] ^ l_key[7]; } __device__ void KXf (const u32 *k, unsigned int r, u32 *a, u32 *b, u32 *c, u32 *d) { *a ^= k[r]; *b ^= k[r + 1]; *c ^= k[r + 2]; *d ^= k[r + 3]; } __device__ void LTf (uint32 *a, uint32 *b, uint32 *c, uint32 *d) { *a = rotlFixed(*a, 13); *c = rotlFixed(*c, 3); *d = rotlFixed(*d ^ *c ^ (*a << 3), 7); *b = rotlFixed(*b ^ *a ^ *c, 1); *a = rotlFixed(*a ^ *b ^ *d, 5); *c = rotlFixed(*c ^ *d ^ (*b << 7), 22); } __device__ void serpent_encrypt(const u8 *inBlock, u8 *outBlock, u8 *ks) { u32 a, b, c, d, e; unsigned int i=1; const u32 *k = (u32 *)ks + 8; u32 *in = (u32 *) inBlock; u32 *out = (u32 *) outBlock; a = in[0]; b = in[1]; c = in[2]; d = in[3]; do { KXf (k, 0, &a, &b, &c, &d); S0f (&a, &b, &c, &d, &e); LTf (&b, &e, &c, &a); KXf (k, 4, &b, &e, &c, &a); S1f (&b, &e, &c, &a, &d); LTf (&c, &b, &a, &e); KXf (k, 8, &c, &b, &a, &e); S2f (&c, &b, &a, &e, &d); LTf (&a, &e, &b, &d); KXf (k, 12, &a, &e, &b, &d); S3f (&a, &e, &b, &d, &c); LTf (&e, &b, &d, &c); KXf (k, 16, &e, &b, &d, &c); S4f (&e, &b, &d, &c, &a); LTf (&b, &a, &e, &c); KXf (k, 20, &b, &a, &e, &c); S5f (&b, &a, &e, &c, &d); LTf (&a, &c, &b, &e); KXf (k, 24, &a, &c, &b, &e); S6f (&a, &c, &b, &e, &d); LTf (&a, &c, &d, &b); KXf (k, 28, &a, &c, &d, &b); S7f (&a, &c, &d, &b, &e); if (i == 4) break; ++i; c = b; b = e; e = d; d = a; a = e; k += 32; LTf (&a,&b,&c,&d); } while (1); KXf (k, 32, &d, &e, &b, &a); out[0] = d; out[1] = e; out[2] = b; out[3] = a; } __device__ void BlowfishEncryptLE (unsigned char *inBlock, unsigned char *outBlock, BF_KEY *key, int encrypt) { word32 left = ((word32 *) inBlock)[0]; word32 right = ((word32 *) inBlock)[1]; const word32 *const s = key->sbox; const word32 * p = encrypt ? key->pbox : key->pbox_dec; unsigned i; left ^= p[0]; for (i=0; i<ROUNDS/2; i++) { right ^= (((s[GETBYTE(left,3)] + s[256+GETBYTE(left,2)]) ^ s[2*256+GETBYTE(left,1)]) + s[3*256+GETBYTE(left,0)]) ^ p[2*i+1]; left ^= (((s[GETBYTE(right,3)] + s[256+GETBYTE(right,2)]) ^ s[2*256+GETBYTE(right,1)]) + s[3*256+GETBYTE(right,0)]) ^ p[2*i+2]; } right ^= p[ROUNDS+1]; ((word32 *) outBlock)[0] = right; ((word32 *) outBlock)[1] = left; } __device__ word32 rotlVariable (word32 x, unsigned int y) { return (word32)((x<<y) | (x>>(sizeof(word32)*8-y))); } /* Macros to access 8-bit bytes out of a 32-bit word */ #define U8a(x) GETBYTE(x,3) #define U8b(x) GETBYTE(x,2) #define U8c(x) GETBYTE(x,1) #define U8d(x) GETBYTE(x,0) /* CAST uses three different round functions */ #define f1(l, r, km, kr) \ t = rotlVariable(km + r, kr); \ l ^= ((S[0][U8a(t)] ^ S[1][U8b(t)]) - \ S[2][U8c(t)]) + S[3][U8d(t)]; #undef f2 #define f2(l, r, km, kr) \ t = rotlVariable(km ^ r, kr); \ l ^= ((S[0][U8a(t)] - S[1][U8b(t)]) + \ S[2][U8c(t)]) ^ S[3][U8d(t)]; #undef f3 #define f3(l, r, km, kr) \ t = rotlVariable(km - r, kr); \ l ^= ((S[0][U8a(t)] + S[1][U8b(t)]) ^ \ S[2][U8c(t)]) - S[3][U8d(t)]; #define F1(l, r, i, j) f1(l, r, K[i], K[i+j]) #define F2(l, r, i, j) f2(l, r, K[i], K[i+j]) #define F3(l, r, i, j) f3(l, r, K[i], K[i+j]) __device__ void Cast5Encrypt (const byte *inBlock, byte *outBlock, CAST_KEY *key) { word32 l = BE32 (((word32 *)inBlock)[0]); word32 r = BE32 (((word32 *)inBlock)[1]); word32 *K = key->K; word32 t; /* Do the work */ F1(l, r, 0, 16); F2(r, l, 1, 16); F3(l, r, 2, 16); F1(r, l, 3, 16); F2(l, r, 4, 16); F3(r, l, 5, 16); F1(l, r, 6, 16); F2(r, l, 7, 16); F3(l, r, 8, 16); F1(r, l, 9, 16); F2(l, r, 10, 16); F3(r, l, 11, 16); F1(l, r, 12, 16); F2(r, l, 13, 16); F3(l, r, 14, 16); F1(r, l, 15, 16); /* Put l,r into outblock */ ((word32 *)outBlock)[0] = BE32 (r); ((word32 *)outBlock)[1] = BE32 (l); } __device__ void RawProcessBlock(word32 *l_, word32 *r_, const word32 *k) { word32 l = *l_, r = *r_; const word32 *kptr=k; unsigned i; for (i=0; i<8; i++) { word32 work = rotrFixed(r, 4U) ^ kptr[4*i+0]; l ^= Spbox[6][(work) & 0x3f] ^ Spbox[4][(work >> 8) & 0x3f] ^ Spbox[2][(work >> 16) & 0x3f] ^ Spbox[0][(work >> 24) & 0x3f]; work = r ^ kptr[4*i+1]; l ^= Spbox[7][(work) & 0x3f] ^ Spbox[5][(work >> 8) & 0x3f] ^ Spbox[3][(work >> 16) & 0x3f] ^ Spbox[1][(work >> 24) & 0x3f]; work = rotrFixed(l, 4U) ^ kptr[4*i+2]; r ^= Spbox[6][(work) & 0x3f] ^ Spbox[4][(work >> 8) & 0x3f] ^ Spbox[2][(work >> 16) & 0x3f] ^ Spbox[0][(work >> 24) & 0x3f]; work = l ^ kptr[4*i+3]; r ^= Spbox[7][(work) & 0x3f] ^ Spbox[5][(work >> 8) & 0x3f] ^ Spbox[3][(work >> 16) & 0x3f] ^ Spbox[1][(work >> 24) & 0x3f]; } *l_ = l; *r_ = r; } __device__ void TripleDesEncrypt (byte *inBlock, byte *outBlock, TDES_KEY *key, int encrypt) { word32 left = BE32 (((word32 *)inBlock)[0]); word32 right = BE32 (((word32 *)inBlock)[1]); word32 work; right = rotlFixed(right, 4U); work = (left ^ right) & 0xf0f0f0f0; left ^= work; right = rotrFixed(right^work, 20U); work = (left ^ right) & 0xffff0000; left ^= work; right = rotrFixed(right^work, 18U); work = (left ^ right) & 0x33333333; left ^= work; right = rotrFixed(right^work, 6U); work = (left ^ right) & 0x00ff00ff; left ^= work; right = rotlFixed(right^work, 9U); work = (left ^ right) & 0xaaaaaaaa; left = rotlFixed(left^work, 1U); right ^= work; RawProcessBlock (&left, &right, encrypt ? key->k1 : key->k1d); RawProcessBlock (&right, &left, !encrypt ? key->k2 : key->k2d); RawProcessBlock (&left, &right, encrypt ? key->k3 : key->k3d); right = rotrFixed(right, 1U); work = (left ^ right) & 0xaaaaaaaa; right ^= work; left = rotrFixed(left^work, 9U); work = (left ^ right) & 0x00ff00ff; right ^= work; left = rotlFixed(left^work, 6U); work = (left ^ right) & 0x33333333; right ^= work; left = rotlFixed(left^work, 18U); work = (left ^ right) & 0xffff0000; right ^= work; left = rotlFixed(left^work, 20U); work = (left ^ right) & 0xf0f0f0f0; right ^= work; left = rotrFixed(left^work, 4U); ((word32 *)outBlock)[0] = BE32 (right); ((word32 *)outBlock)[1] = BE32 (left); } __device__ void EncipherBlock(int cipher, void *data, void *ks) { switch (cipher) { case AES: aes_encrypt_c ((u8*)data, (u8*)data, ks); break; case TWOFISH: twofish_encrypt ((TwofishInstance*)ks, (u4byte*)data, (u4byte*)data); break; case SERPENT: serpent_encrypt ((u8*)data, (u8*)data, (u8*)ks); break; case BLOWFISH: BlowfishEncryptLE ((unsigned char*)data, (unsigned char*)data, (BF_KEY*)ks, 1); break; // Deprecated/legacy case CAST: Cast5Encrypt ((byte*)data, (byte*)data, (CAST_KEY*)ks); break; // Deprecated/legacy case TRIPLEDES: TripleDesEncrypt ((byte*)data, (byte*)data, (TDES_KEY*)ks, 1); break; // Deprecated/legacy default: TC_THROW_FATAL_EXCEPTION; // Unknown/wrong ID } } __device__ void ILTf (uint32 *a, uint32 *b, uint32 *c, uint32 *d) { *c = rotrFixed(*c, 22); *a = rotrFixed(*a, 5); *c ^= *d ^ (*b << 7); *a ^= *b ^ *d; *b = rotrFixed(*b, 1); *d = rotrFixed(*d, 7) ^ *c ^ (*a << 3); *b ^= *a ^ *c; *c = rotrFixed(*c, 3); *a = rotrFixed(*a, 13); } // order of output from S-box functions #define beforeS0(f) f(0,a,b,c,d,e) #define afterS0(f) f(1,b,e,c,a,d) #define afterS1(f) f(2,c,b,a,e,d) #define afterS2(f) f(3,a,e,b,d,c) #define afterS3(f) f(4,e,b,d,c,a) #define afterS4(f) f(5,b,a,e,c,d) #define afterS5(f) f(6,a,c,b,e,d) #define afterS6(f) f(7,a,c,d,b,e) #define afterS7(f) f(8,d,e,b,a,c) // order of output from inverse S-box functions #define beforeI7(f) f(8,a,b,c,d,e) #define afterI7(f) f(7,d,a,b,e,c) #define afterI6(f) f(6,a,b,c,e,d) #define afterI5(f) f(5,b,d,e,c,a) #define afterI4(f) f(4,b,c,e,a,d) #define afterI3(f) f(3,a,b,e,c,d) #define afterI2(f) f(2,b,d,e,c,a) #define afterI1(f) f(1,a,b,c,e,d) #define afterI0(f) f(0,a,d,b,e,c) // inverse linear transformation #define ILT(i,a,b,c,d,e) {\ c = rotrFixed(c, 22); \ a = rotrFixed(a, 5); \ c ^= d ^ (b << 7); \ a ^= b ^ d; \ b = rotrFixed(b, 1); \ d = rotrFixed(d, 7) ^ c ^ (a << 3); \ b ^= a ^ c; \ c = rotrFixed(c, 3); \ a = rotrFixed(a, 13);} #define I0(i, r0, r1, r2, r3, r4) \ { \ r2 = ~r2; \ r4 = r1; \ r1 |= r0; \ r4 = ~r4; \ r1 ^= r2; \ r2 |= r4; \ r1 ^= r3; \ r0 ^= r4; \ r2 ^= r0; \ r0 &= r3; \ r4 ^= r0; \ r0 |= r1; \ r0 ^= r2; \ r3 ^= r4; \ r2 ^= r1; \ r3 ^= r0; \ r3 ^= r1; \ r2 &= r3; \ r4 ^= r2; \ } #define I1(i, r0, r1, r2, r3, r4) \ { \ r4 = r1; \ r1 ^= r3; \ r3 &= r1; \ r4 ^= r2; \ r3 ^= r0; \ r0 |= r1; \ r2 ^= r3; \ r0 ^= r4; \ r0 |= r2; \ r1 ^= r3; \ r0 ^= r1; \ r1 |= r3; \ r1 ^= r0; \ r4 = ~r4; \ r4 ^= r1; \ r1 |= r0; \ r1 ^= r0; \ r1 |= r4; \ r3 ^= r1; \ } #define I2(i, r0, r1, r2, r3, r4) \ { \ r2 ^= r3; \ r3 ^= r0; \ r4 = r3; \ r3 &= r2; \ r3 ^= r1; \ r1 |= r2; \ r1 ^= r4; \ r4 &= r3; \ r2 ^= r3; \ r4 &= r0; \ r4 ^= r2; \ r2 &= r1; \ r2 |= r0; \ r3 = ~r3; \ r2 ^= r3; \ r0 ^= r3; \ r0 &= r1; \ r3 ^= r4; \ r3 ^= r0; \ } #define I3(i, r0, r1, r2, r3, r4) \ { \ r4 = r2; \ r2 ^= r1; \ r1 &= r2; \ r1 ^= r0; \ r0 &= r4; \ r4 ^= r3; \ r3 |= r1; \ r3 ^= r2; \ r0 ^= r4; \ r2 ^= r0; \ r0 |= r3; \ r0 ^= r1; \ r4 ^= r2; \ r2 &= r3; \ r1 |= r3; \ r1 ^= r2; \ r4 ^= r0; \ r2 ^= r4; \ } #define I4(i, r0, r1, r2, r3, r4) \ { \ r4 = r2; \ r2 &= r3; \ r2 ^= r1; \ r1 |= r3; \ r1 &= r0; \ r4 ^= r2; \ r4 ^= r1; \ r1 &= r2; \ r0 = ~r0; \ r3 ^= r4; \ r1 ^= r3; \ r3 &= r0; \ r3 ^= r2; \ r0 ^= r1; \ r2 &= r0; \ r3 ^= r0; \ r2 ^= r4; \ r2 |= r3; \ r3 ^= r0; \ r2 ^= r1; \ } #define I5(i, r0, r1, r2, r3, r4) \ { \ r1 = ~r1; \ r4 = r3; \ r2 ^= r1; \ r3 |= r0; \ r3 ^= r2; \ r2 |= r1; \ r2 &= r0; \ r4 ^= r3; \ r2 ^= r4; \ r4 |= r0; \ r4 ^= r1; \ r1 &= r2; \ r1 ^= r3; \ r4 ^= r2; \ r3 &= r4; \ r4 ^= r1; \ r3 ^= r0; \ r3 ^= r4; \ r4 = ~r4; \ } #define I6(i, r0, r1, r2, r3, r4) \ { \ r0 ^= r2; \ r4 = r2; \ r2 &= r0; \ r4 ^= r3; \ r2 = ~r2; \ r3 ^= r1; \ r2 ^= r3; \ r4 |= r0; \ r0 ^= r2; \ r3 ^= r4; \ r4 ^= r1; \ r1 &= r3; \ r1 ^= r0; \ r0 ^= r3; \ r0 |= r2; \ r3 ^= r1; \ r4 ^= r0; \ } #define I7(i, r0, r1, r2, r3, r4) \ { \ r4 = r2; \ r2 ^= r0; \ r0 &= r3; \ r2 = ~r2; \ r4 |= r3; \ r3 ^= r1; \ r1 |= r0; \ r0 ^= r2; \ r2 &= r4; \ r1 ^= r2; \ r2 ^= r0; \ r0 |= r2; \ r3 &= r4; \ r0 ^= r3; \ r4 ^= r1; \ r3 ^= r4; \ r4 |= r0; \ r3 ^= r2; \ r4 ^= r2; \ } __device__ void serpent_decrypt(const u8 *inBlock, u8 *outBlock, u8 *ks) { u32 a, b, c, d, e; const u32 *k = (u32 *)ks + 104; unsigned int i=4; u32 *in = (u32 *) inBlock; u32 *out = (u32 *) outBlock; a = in[0]; b = in[1]; c = in[2]; d = in[3]; KXf (k, 32, &a, &b, &c, &d); goto start; do { c = b; b = d; d = e; k -= 32; beforeI7(ILT); start: beforeI7(I7); KXf (k, 28, &d, &a, &b, &e); ILTf (&d, &a, &b, &e); afterI7(I6); KXf (k, 24, &a, &b, &c, &e); ILTf (&a, &b, &c, &e); afterI6(I5); KXf (k, 20, &b, &d, &e, &c); ILTf (&b, &d, &e, &c); afterI5(I4); KXf (k, 16, &b, &c, &e, &a); ILTf (&b, &c, &e, &a); afterI4(I3); KXf (k, 12, &a, &b, &e, &c); ILTf (&a, &b, &e, &c); afterI3(I2); KXf (k, 8, &b, &d, &e, &c); ILTf (&b, &d, &e, &c); afterI2(I1); KXf (k, 4, &a, &b, &c, &e); ILTf (&a, &b, &c, &e); afterI1(I0); KXf (k, 0, &a, &d, &b, &e); } while (--i != 0); out[0] = a; out[1] = d; out[2] = b; out[3] = e; } #define i_rnd(i) \ t1 = g1_fun(blk[1]); t0 = g0_fun(blk[0]); \ blk[2] = rotl(blk[2], 1) ^ (t0 + t1 + l_key[4 * (i) + 10]); \ blk[3] = rotr(blk[3] ^ (t0 + 2 * t1 + l_key[4 * (i) + 11]), 1); \ t1 = g1_fun(blk[3]); t0 = g0_fun(blk[2]); \ blk[0] = rotl(blk[0], 1) ^ (t0 + t1 + l_key[4 * (i) + 8]); \ blk[1] = rotr(blk[1] ^ (t0 + 2 * t1 + l_key[4 * (i) + 9]), 1) __device__ void twofish_decrypt(TwofishInstance *instance, const u4byte in_blk[4], u4byte out_blk[4]) { u4byte t0, t1, blk[4]; u4byte *l_key = instance->l_key; u4byte *mk_tab = instance->mk_tab; blk[0] = in_blk[0] ^ l_key[4]; blk[1] = in_blk[1] ^ l_key[5]; blk[2] = in_blk[2] ^ l_key[6]; blk[3] = in_blk[3] ^ l_key[7]; i_rnd(7); i_rnd(6); i_rnd(5); i_rnd(4); i_rnd(3); i_rnd(2); i_rnd(1); i_rnd(0); out_blk[0] = blk[2] ^ l_key[0]; out_blk[1] = blk[3] ^ l_key[1]; out_blk[2] = blk[0] ^ l_key[2]; out_blk[3] = blk[1] ^ l_key[3]; } #define AES_REV_DKS #define dtab_0(x) t_in[0][x] #define dtab_1(x) t_in[1][x] #define dtab_2(x) t_in[2][x] #define dtab_3(x) t_in[3][x] #define dltab_0(x) t_il[0][x] #define dltab_1(x) t_il[1][x] #define dltab_2(x) t_il[2][x] #define dltab_3(x) t_il[3][x] #define dltab(n, x) dltab_##n(x) __device__ void dec_round_c (uint_32t* eax, uint_32t* ebx, uint_32t* ecx, uint_32t* edx, uint_32t* ebp) { uint_32t key1, key2, tmp, esi, edi; uint_8t* ax = (uint_8t*)eax, *bx = (uint_8t*)ebx, *cx = (uint_8t*)ecx, *dx = (uint_8t*)edx; // ebp - key pointer, eax, ebx, ecx, edx - each contain 4 bytes (int) of outBlock, esi - 3rd int in key, edi - 4th int in key key1 = ebp[0]; key2 = ebp[1]; esi = ebp[2]; edi = ebp[3]; /* begin irn_fun */ *eax = (*eax << 16) | (*eax >> 16); // al - 0, ah - 1, bl - 4, bh - 5, cl - 8, ch - 9, dl - 12, dh - 13 esi ^= dtab_0(cx[0]); esi ^= dtab_1(bx[1]); esi ^= dtab_2(ax[0]); edi ^= dtab_0(dx[0]); edi ^= dtab_1(cx[1]); edi ^= dtab_3(ax[1]); tmp = dtab_0(bx[0]); // ebp (restored later) *eax >>= 16; *ebx &= 0xffff0000; *ebx |= *eax; *ecx >>= 16; tmp ^= dtab_1(bx[1]); tmp ^= dtab_3(cx[1]); *eax = dtab_2(cx[0]); // eax *eax ^= dtab_0(bx[0]); *eax ^= dtab_1(dx[1]); *ebx >>= 16; *edx >>= 16; esi ^= dtab_3(dx[1]); tmp ^= dtab_2(dx[0]); *eax ^= dtab_3(bx[1]); edi ^= dtab_2(bx[0]); /* end irn_fun */ *ebx = tmp; *ecx = esi; *edx = edi; *eax ^= key1; *ebx ^= key2; } __device__ void dec_last_round_c (uint_32t* eax, uint_32t* ebx, uint_32t* ecx, uint_32t* edx, uint_32t* ebp) { uint_32t key1, key2, tmp, esi, edi; uint_8t* ax = (uint_8t*)eax, *bx = (uint_8t*)ebx, *cx = (uint_8t*)ecx, *dx = (uint_8t*)edx; // ebp - key pointer, eax, ebx, ecx, edx - each contain 4 bytes (int) of outBlock, esi - 3rd int in key, edi - 4th int in key key1 = ebp[0]; key2 = ebp[1]; esi = ebp[2]; edi = ebp[3]; /* begin irn_fun */ *eax = (*eax << 16) | (*eax >> 16); // al - 0, ah - 1, bl - 4, bh - 5, cl - 8, ch - 9, dl - 12, dh - 13 esi ^= dltab(0, cx[0]); esi ^= dltab(1, bx[1]); esi ^= dltab(2, ax[0]); edi ^= dltab(0, dx[0]); edi ^= dltab(1, cx[1]); edi ^= dltab(3, ax[1]); tmp = dltab(0, bx[0]); // ebp (restored later) *eax >>= 16; *ebx &= 0xffff0000; *ebx |= *eax; *ecx >>= 16; tmp ^= dltab(1, bx[1]); tmp ^= dltab(3, cx[1]); *eax = dltab(2, cx[0]); // eax *eax ^= dltab(0, bx[0]); *eax ^= dltab(1, dx[1]); *ebx >>= 16; *edx >>= 16; esi ^= dltab(3, dx[1]); tmp ^= dltab(2, dx[0]); *eax ^= dltab(3, bx[1]); edi ^= dltab(2, bx[0]); /* end irn_fun */ *ebx = tmp; *ecx = esi; *edx = edi; *eax ^= key1; *ebx ^= key2; } __device__ AES_RETURN aes_decrypt_c(const u8 *inBlock, u8 *outBlock, void *ks) { uint_32t* kp = (uint_32t *)ks; // key pointer uint_32t inf = *(kp + KS_LENGTH); uint_32t* o = (uint_32t*)outBlock; unsigned int i; #ifndef AES_REV_DKS kp += inf >> 2; #endif // xor 4 bytes in inBlock with 4 bytes in ks, 4 times, store result in outBlock o[0] = ((uint_32t*)inBlock)[0] ^ kp[0]; o[1] = ((uint_32t*)inBlock)[1] ^ kp[1]; o[2] = ((uint_32t*)inBlock)[2] ^ kp[2]; o[3] = ((uint_32t*)inBlock)[3] ^ kp[3]; if (inf == 10 * 16 || inf == 12 * 16 || inf == 14 * 16) { for (i = 0; i < inf >> 4; i++) { #ifdef AES_REV_DKS kp += 4; #else kp -= 4; #endif if (i < (inf >> 4) - 1) dec_round_c(&o[0], &o[1], &o[2], &o[3], kp); else dec_last_round_c(&o[0], &o[1], &o[2], &o[3], kp); } } else { // error return EXIT_FAILURE; } return EXIT_SUCCESS; } __device__ void Cast5Decrypt (const byte *inBlock, byte *outBlock, CAST_KEY *key) { word32 r = BE32 (((word32 *)inBlock)[0]); word32 l = BE32 (((word32 *)inBlock)[1]); word32 *K = key->K; word32 t; /* Only do full 16 rounds if key length > 80 bits */ F1(r, l, 15, 16); F3(l, r, 14, 16); F2(r, l, 13, 16); F1(l, r, 12, 16); F3(r, l, 11, 16); F2(l, r, 10, 16); F1(r, l, 9, 16); F3(l, r, 8, 16); F2(r, l, 7, 16); F1(l, r, 6, 16); F3(r, l, 5, 16); F2(l, r, 4, 16); F1(r, l, 3, 16); F3(l, r, 2, 16); F2(r, l, 1, 16); F1(l, r, 0, 16); /* Put l,r into outblock */ ((word32 *)outBlock)[0] = BE32 (l); ((word32 *)outBlock)[1] = BE32 (r); /* Wipe clean */ t = l = r = 0; } __device__ void DecipherBlock(int cipher, void *data, void *ks) { switch (cipher) { case SERPENT: serpent_decrypt ((u8*)data, (u8*)data, (u8*)ks); break; case TWOFISH: twofish_decrypt ((TwofishInstance*)ks, (u4byte*)data, (u4byte*)data); break; case AES: { aes_decrypt_c ((u8*)data, (u8*)data, (void *) ((char *) ks + sizeof(aes_encrypt_ctx))); break; } case BLOWFISH: BlowfishEncryptLE ((unsigned char*)data, (unsigned char*)data, (BF_KEY*)ks, 0); break; // Deprecated/legacy case CAST: Cast5Decrypt ((byte*)data, (byte*)data, (CAST_KEY*)ks); break; // Deprecated/legacy case TRIPLEDES: TripleDesEncrypt ((byte*)data, (byte*)data, (TDES_KEY*)ks, 0); break; // Deprecated/legacy default: TC_THROW_FATAL_EXCEPTION; // Unknown/wrong ID } } __device__ void DecryptBufferXTS (u8 *buffer, TC_LARGEST_COMPILER_UINT length, const UINT64_STRUCT *startDataUnitNo, unsigned int startCipherBlockNo, u8 *ks, u8 *ks2, int cipher) { u8 finalCarry; ALIGN(32) u8 whiteningValue [BYTES_PER_XTS_BLOCK]; ALIGN(32) u8 byteBufUnitNo [BYTES_PER_XTS_BLOCK]; u64 *whiteningValuePtr64 = (u64 *) whiteningValue; u64 *bufPtr = (u64 *) buffer; unsigned int startBlock = startCipherBlockNo, endBlock, block; TC_LARGEST_COMPILER_UINT blockCount, dataUnitNo; // Convert the 64-bit data unit number into a little-endian 16-byte array. // Note that as we are converting a 64-bit number into a 16-byte array we can always zero the last 8 bytes. dataUnitNo = startDataUnitNo->Value; *((u64 *) byteBufUnitNo) = dataUnitNo; *((u64 *) byteBufUnitNo + 1) = 0; if (length % BYTES_PER_XTS_BLOCK) TC_THROW_FATAL_EXCEPTION; blockCount = length / BYTES_PER_XTS_BLOCK; // Process all blocks in the buffer while (blockCount > 0) { if (blockCount < BLOCKS_PER_XTS_DATA_UNIT) endBlock = startBlock + (unsigned int) blockCount; else endBlock = BLOCKS_PER_XTS_DATA_UNIT; whiteningValuePtr64 = (u64 *) whiteningValue; // Encrypt the data unit number using the secondary key (in order to generate the first // whitening value for this data unit) *whiteningValuePtr64 = *((u64 *) byteBufUnitNo); *(whiteningValuePtr64 + 1) = 0; EncipherBlock (cipher, whiteningValue, ks2); // Generate (and apply) subsequent whitening values for blocks in this data unit and // decrypt all relevant blocks in this data unit for (block = 0; block < endBlock; block++) { if (block >= startBlock) { // Post-whitening *bufPtr++ ^= *whiteningValuePtr64++; *bufPtr-- ^= *whiteningValuePtr64--; // Actual decryption DecipherBlock (cipher, bufPtr, ks); // Pre-whitening *bufPtr++ ^= *whiteningValuePtr64++; *bufPtr++ ^= *whiteningValuePtr64; } else whiteningValuePtr64++; // Derive the next whitening value finalCarry = (*whiteningValuePtr64 & 0x8000000000000000) ? 135 : 0; *whiteningValuePtr64-- <<= 1; if (*whiteningValuePtr64 & 0x8000000000000000) *(whiteningValuePtr64 + 1) |= 1; *whiteningValuePtr64 <<= 1; whiteningValue[0] ^= finalCarry; } blockCount -= endBlock - startBlock; startBlock = 0; dataUnitNo++; *((u64 *) byteBufUnitNo) = dataUnitNo; } FAST_ERASE64 (whiteningValue, sizeof (whiteningValue)); } __device__ void Xor128 (u64 *a, u64 *b) { *a++ ^= *b++; *a ^= *b; } __device__ void Xor64 (u64 *a, u64 *b) { *a ^= *b; } #define lp32(x) ((mode(32t)*)(x)) __device__ in_line void move_block_aligned( void *p, const void *q) { lp32(p)[0] = lp32(q)[0], lp32(p)[1] = lp32(q)[1], lp32(p)[2] = lp32(q)[2], lp32(p)[3] = lp32(q)[3]; } __device__ in_line void move_block_aligned64( void *p, const void *q) { lp32(p)[0] = lp32(q)[0], lp32(p)[1] = lp32(q)[1]; } __device__ in_line void xor_block_aligned( void *p, const void *q) { lp32(p)[0] ^= lp32(q)[0], lp32(p)[1] ^= lp32(q)[1], lp32(p)[2] ^= lp32(q)[2], lp32(p)[3] ^= lp32(q)[3]; } __device__ in_line void xor_block_aligned64( void *p, const void *q) { lp32(p)[0] ^= lp32(q)[0], lp32(p)[1] ^= lp32(q)[1]; } #define xor_8kt64(i) \ xor_block_aligned(r, ctx->gf_t128[i + i][a[i] & 15]); \ xor_block_aligned(r, ctx->gf_t128[i + i + 1][a[i] >> 4]) /* Multiply a 128-bit number by a 64-bit number in the finite field GF(2^128) */ __device__ void Gf128MulBy64Tab (u8 a[8], u8 p[16], GfCtx *ctx) { ALIGN(32) u32 r[CBLK_LEN >> 2]; move_block_aligned(r, ctx->gf_t128[7*2][a[7] & 15]); xor_block_aligned(r, ctx->gf_t128[7*2+1][a[7] >> 4]); if (*(u16 *)a) { xor_8kt64(0); xor_8kt64(1); } if (a[2]) { xor_8kt64(2); } xor_8kt64(3); xor_8kt64(4); xor_8kt64(5); xor_8kt64(6); move_block_aligned(p, r); } #define xor_8k64(i) \ xor_block_aligned64(r, ctx->gf_t64[i + i][a[i] & 15]); \ xor_block_aligned64(r, ctx->gf_t64[i + i + 1][a[i] >> 4]) /* Multiply two 64-bit numbers in the finite field GF(2^64) */ __device__ void Gf64MulTab (unsigned char a[8], unsigned char p[8], GfCtx *ctx) { /* Deprecated/legacy */ ALIGN(32) u32 r[CBLK_LEN8 >> 2]; move_block_aligned64(r, ctx->gf_t64[7*2][a[7] & 15]); xor_block_aligned64(r, ctx->gf_t64[7*2+1][a[7] >> 4]); if (*(u16 *)a) { xor_8k64(0); xor_8k64(1); } if (a[2]) { xor_8k64(2); } xor_8k64(3); xor_8k64(4); xor_8k64(5); xor_8k64(6); move_block_aligned64(p, r); } __device__ void DecryptBufferLRW128 (byte *buffer, uint64 length, uint64 blockIndex, PCRYPTO_INFO cryptoInfo) { /* Deprecated/legacy */ int cipher = EAGetFirstCipher (cryptoInfo->ea); int cipherCount = EAGetCipherCount (cryptoInfo->ea); u8 *p = buffer; u8 *ks = cryptoInfo->ks; ALIGN(32) u8 i[8]; ALIGN(32) u8 t[16]; u64 b; u8 j; *(u64 *)i = BE64(blockIndex); if (length % 16) TC_THROW_FATAL_EXCEPTION; // Note that the maximum supported volume size is 8589934592 GB (i.e., 2^63 bytes). for (b = 0; b < length >> 4; b++) { Gf128MulBy64Tab (i, t, &cryptoInfo->gf_ctx); Xor128 ((u64 *)p, (u64 *)t); if (cipherCount > 1) { // Cipher cascade ks = cryptoInfo->ks + EAGetKeyScheduleSize (cryptoInfo->ea); for (cipher = EAGetLastCipher (cryptoInfo->ea); cipher != 0; cipher = EAGetPreviousCipher (cryptoInfo->ea, cipher)) { ks -= CipherGetKeyScheduleSize (cipher); DecipherBlock (cipher, p, ks); } } else { DecipherBlock (cipher, p, ks); } Xor128 ((u64 *)p, (u64 *)t); p += 16; if (i[7] != 0xff) { j = i[7]; *(u64 *)i &= ~((j & 0xffffffffffffffff) << 56); j++; *(u64 *)i |= (j & 0xffffffffffffffff) << 56; } else *(u64 *)i = BE64 ( BE64(*(u64 *)i) + 1 ); } FAST_ERASE64 (t, sizeof(t)); } __device__ void DecryptBufferLRW64 (byte *buffer, uint64 length, uint64 blockIndex, PCRYPTO_INFO cryptoInfo) { /* Deprecated/legacy */ int cipher = EAGetFirstCipher (cryptoInfo->ea); u8 *p = buffer; u8 *ks = cryptoInfo->ks; ALIGN(32) u8 i[8]; ALIGN(32) u8 t[8]; u64 b; u8 j; *(u64 *)i = BE64(blockIndex); if (length % 8) TC_THROW_FATAL_EXCEPTION; for (b = 0; b < length >> 3; b++) { Gf64MulTab (i, t, &cryptoInfo->gf_ctx); Xor64 ((u64 *)p, (u64 *)t); DecipherBlock (cipher, p, ks); Xor64 ((u64 *)p, (u64 *)t); p += 8; if (i[7] != 0xff) { j = i[7]; *(u64 *)i &= ~((j & 0xffffffffffffffff) << 56); j++; *(u64 *)i |= (j & 0xffffffffffffffff) << 56; } else *(u64 *)i = BE64 ( BE64(*(u64 *)i) + 1 ); } FAST_ERASE64 (t, sizeof(t)); } __device__ void DecryptBufferCBC (u32 *data, unsigned int len, u8 *ks, u32 *iv, u32 *whitening, int ea, int cipher) { /* IMPORTANT: This function has been deprecated (legacy) */ u32 bufIV[4]; u64 i; u32 ct[4]; int blockSize = CipherGetBlockSize (ea != 0 ? EAGetFirstCipher (ea) : cipher); if (len % blockSize) TC_THROW_FATAL_EXCEPTION; // IV bufIV[0] = iv[0]; bufIV[1] = iv[1]; if (blockSize == 16) { bufIV[2] = iv[2]; bufIV[3] = iv[3]; } // Decrypt each block for (i = 0; i < len/blockSize; i++) { // Dewhitening data[0] ^= whitening[0]; data[1] ^= whitening[1]; if (blockSize == 16) { data[2] ^= whitening[0]; data[3] ^= whitening[1]; } // CBC ct[0] = data[0]; ct[1] = data[1]; if (blockSize == 16) { ct[2] = data[2]; ct[3] = data[3]; } if (ea != 0) { // Outer-CBC ks += EAGetKeyScheduleSize (ea); for (cipher = EAGetLastCipher (ea); cipher != 0; cipher = EAGetPreviousCipher (ea, cipher)) { ks -= CipherGetKeyScheduleSize (cipher); DecipherBlock (cipher, data, ks); } } else { // CBC/inner-CBC DecipherBlock (cipher, data, ks); } // CBC data[0] ^= bufIV[0]; data[1] ^= bufIV[1]; bufIV[0] = ct[0]; bufIV[1] = ct[1]; if (blockSize == 16) { data[2] ^= bufIV[2]; data[3] ^= bufIV[3]; bufIV[2] = ct[2]; bufIV[3] = ct[3]; } data += blockSize / sizeof(*data); } } // DecryptBuffer // // buf: data to be decrypted; the start of the buffer is assumed to be aligned with the start of a data unit. // len: number of bytes to decrypt; must be divisible by the block size (for cascaded ciphers, divisible // by the largest block size used within the cascade) __device__ void DecryptBuffer (u8 *buf, TC_LARGEST_COMPILER_UINT len, PCRYPTO_INFO cryptoInfo) { switch (cryptoInfo->mode) { case XTS: { u8 *ks = cryptoInfo->ks + EAGetKeyScheduleSize (cryptoInfo->ea); u8 *ks2 = cryptoInfo->ks2 + EAGetKeyScheduleSize (cryptoInfo->ea); UINT64_STRUCT dataUnitNo; int cipher; // When encrypting/decrypting a buffer (typically a volume header) the sequential number // of the first XTS data unit in the buffer is always 0 and the start of the buffer is // always assumed to be aligned with the start of the data unit 0. dataUnitNo.LowPart = 0; dataUnitNo.HighPart = 0; for (cipher = EAGetLastCipher (cryptoInfo->ea); cipher != 0; cipher = EAGetPreviousCipher (cryptoInfo->ea, cipher)) { ks -= CipherGetKeyScheduleSize (cipher); ks2 -= CipherGetKeyScheduleSize (cipher); DecryptBufferXTS (buf, len, &dataUnitNo, 0, ks, ks2, cipher); } } break; case LRW: /* Deprecated/legacy */ switch (CipherGetBlockSize (EAGetFirstCipher (cryptoInfo->ea))) { case 8: DecryptBufferLRW64 (buf, (u64) len, 1, cryptoInfo); break; case 16: DecryptBufferLRW128 (buf, (u64) len, 1, cryptoInfo); break; default: TC_THROW_FATAL_EXCEPTION; } break; case CBC: case INNER_CBC: { /* Deprecated/legacy */ u8 *ks = cryptoInfo->ks + EAGetKeyScheduleSize (cryptoInfo->ea); int cipher; for (cipher = EAGetLastCipher (cryptoInfo->ea); cipher != 0; cipher = EAGetPreviousCipher (cryptoInfo->ea, cipher)) { ks -= CipherGetKeyScheduleSize (cipher); DecryptBufferCBC ((u32 *) buf, (unsigned int) len, ks, (u32 *) cryptoInfo->k2, (u32 *) &cryptoInfo->k2[8], 0, cipher); } } break; case OUTER_CBC: /* Deprecated/legacy */ DecryptBufferCBC ((u32 *) buf, (unsigned int) len, cryptoInfo->ks, (u32 *) cryptoInfo->k2, (u32 *) &cryptoInfo->k2[8], cryptoInfo->ea, 0); break; default: // Unknown/wrong ID TC_THROW_FATAL_EXCEPTION; } } #define keyInfo td->keyInfo #define cryptoInfo (&td->cryptoInfo) // --opencc-options -OPT:Olimit=180335 for optimization // dk is the expected input for the next phase // the kernel was split into 5 smaller kernels, because otherwise ptxas gives a memory allocation error // x passwords are processed at a time, going through each of the five kernels, and then the process is repeated // for the next x passwords until all passwords have been processed #define dk td->dk[RIPEMD160 - 1] // 9.4 seconds __global__ static void reduceKernel_ripemd160 (char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; PTHREAD_DATA td = &d_Output[tid]; crypto_loadkey (&keyInfo, (char*)d_Input[tid].Text, (int) d_Input[tid].Length); // PKCS5 is used to derive the primary header key(s) and secondary header key(s) (XTS mode) from the password memcpy (keyInfo.salt, d_EncryptedHeader + HEADER_SALT_OFFSET, PKCS5_SALT_SIZE); keyInfo.noIterations = get_pkcs5_iteration_count (RIPEMD160, 0); derive_key_ripemd160 (keyInfo.userKey, keyInfo.keyLength, keyInfo.salt, PKCS5_SALT_SIZE, keyInfo.noIterations, dk, GetMaxPkcs5OutSize(), td); } #undef dk #define dk td->dk[SHA512 - 1] // ~8 seconds __global__ static void reduceKernel_sha512 (char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; PTHREAD_DATA td = &d_Output[tid]; crypto_loadkey (&keyInfo, (char*)d_Input[tid].Text, (int) d_Input[tid].Length); // PKCS5 is used to derive the primary header key(s) and secondary header key(s) (XTS mode) from the password memcpy (keyInfo.salt, d_EncryptedHeader + HEADER_SALT_OFFSET, PKCS5_SALT_SIZE); keyInfo.noIterations = get_pkcs5_iteration_count (SHA512, 0); derive_key_sha512 (keyInfo.userKey, keyInfo.keyLength, keyInfo.salt, PKCS5_SALT_SIZE, keyInfo.noIterations, dk, GetMaxPkcs5OutSize()); #undef dk } #define dk td->dk[SHA1 - 1] // ~10 seconds (27 seconds so far) __global__ static void reduceKernel_sha1 (char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; PTHREAD_DATA td = &d_Output[tid]; crypto_loadkey (&keyInfo, (char*)d_Input[tid].Text, (int) d_Input[tid].Length); // PKCS5 is used to derive the primary header key(s) and secondary header key(s) (XTS mode) from the password memcpy (keyInfo.salt, d_EncryptedHeader + HEADER_SALT_OFFSET, PKCS5_SALT_SIZE); keyInfo.noIterations = get_pkcs5_iteration_count (SHA1, 0); derive_key_sha1 (keyInfo.userKey, keyInfo.keyLength, keyInfo.salt, PKCS5_SALT_SIZE, keyInfo.noIterations, dk, GetMaxPkcs5OutSize()); #undef dk } #define dk td->dk[WHIRLPOOL - 1] // slowest by far: ~31 seconds, total: ~58 seconds __global__ static void reduceKernel_whirlpool (char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; PTHREAD_DATA td = &d_Output[tid]; crypto_loadkey (&keyInfo, (char*)d_Input[tid].Text, (int) d_Input[tid].Length); // PKCS5 is used to derive the primary header key(s) and secondary header key(s) (XTS mode) from the password memcpy (keyInfo.salt, d_EncryptedHeader + HEADER_SALT_OFFSET, PKCS5_SALT_SIZE); keyInfo.noIterations = get_pkcs5_iteration_count (WHIRLPOOL, 0); derive_key_whirlpool (keyInfo.userKey, keyInfo.keyLength, keyInfo.salt, PKCS5_SALT_SIZE, keyInfo.noIterations, dk, GetMaxPkcs5OutSize(), td); #undef dk } #define dk td->dk[enqPkcs5Prf - 1] __global__ static void reduceKernel_final (char* d_EncryptedHeader, PTHREAD_DATA d_Input, PTHREAD_RESULT d_Output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; PTHREAD_DATA td = &d_Input[tid]; ALIGN(32) char header[TC_VOLUME_HEADER_EFFECTIVE_SIZE]; int enqPkcs5Prf; int primaryKeyOffset; ALIGN(32) u1byte q_tab[2][256]; ALIGN(32) u4byte m_tab[4][256]; u4byte qt_gen = 0, mt_gen = 0; crypto_open(cryptoInfo); // Test all available PKCS5 PRFs for (enqPkcs5Prf = FIRST_PRF_ID; enqPkcs5Prf <= LAST_PRF_ID; ++enqPkcs5Prf) { BOOL lrw64InitDone = FALSE; // Deprecated/legacy BOOL lrw128InitDone = FALSE; // Deprecated/legacy if (enqPkcs5Prf == SHA512/* || enqPkcs5Prf == WHIRLPOOL*/) continue; // Test all available modes of operation for (cryptoInfo->mode = FIRST_MODE_OF_OPERATION_ID; cryptoInfo->mode <= LAST_MODE_OF_OPERATION; cryptoInfo->mode++) { switch (cryptoInfo->mode) { case LRW: case CBC: case INNER_CBC: case OUTER_CBC: // For LRW (deprecated/legacy), copy the tweak key // For CBC (deprecated/legacy), copy the IV/whitening seed memcpy (cryptoInfo->k2, dk, LEGACY_VOL_IV_SIZE); primaryKeyOffset = LEGACY_VOL_IV_SIZE; break; default: primaryKeyOffset = 0; } // Test all available encryption algorithms for (cryptoInfo->ea = EAGetFirst (); cryptoInfo->ea != 0; cryptoInfo->ea = EAGetNext (cryptoInfo->ea)) { int blockSize; if (!EAIsModeSupported (cryptoInfo->ea, cryptoInfo->mode)) continue; // This encryption algorithm has never been available with this mode of operation blockSize = CipherGetBlockSize (EAGetFirstCipher (cryptoInfo->ea)); if (EAInit (cryptoInfo->ea, (unsigned char *)(dk + primaryKeyOffset), cryptoInfo->ks, (u1byte**)q_tab, (u4byte**)m_tab, &qt_gen, &mt_gen) == ERR_CIPHER_INIT_FAILURE) goto ret; // Init objects related to the mode of operation if (cryptoInfo->mode == XTS) { // Copy the secondary key (if cascade, multiple concatenated) memcpy (cryptoInfo->k2, dk + EAGetKeySize (cryptoInfo->ea), EAGetKeySize (cryptoInfo->ea)); // Secondary key schedule if (!EAInitMode (cryptoInfo, (u1byte**)q_tab, (u4byte**)m_tab, &qt_gen, &mt_gen)) { goto ret; } } else if (cryptoInfo->mode == LRW && (blockSize == 8 && !lrw64InitDone || blockSize == 16 && !lrw128InitDone)) { // Deprecated/legacy if (!EAInitMode (cryptoInfo,(u1byte**)q_tab, (u4byte**)m_tab, &qt_gen, &mt_gen)) { goto ret; } if (blockSize == 8) lrw64InitDone = TRUE; else if (blockSize == 16) lrw128InitDone = TRUE; } // Copy the header for decryption memcpy (header, d_EncryptedHeader, sizeof (header)); // Try to decrypt header DecryptBuffer ((unsigned char*)(header + HEADER_ENCRYPTED_DATA_OFFSET), HEADER_ENCRYPTED_DATA_SIZE, cryptoInfo); // fixme: crash here due to twofish // Magic 'TRUE' if (GetHeaderField32 ((byte*)header, TC_HEADER_OFFSET_MAGIC) == 0x54525545){ d_Output->tid = tid; d_Output->ea = cryptoInfo->ea; d_Output->mode = cryptoInfo->mode; d_Output->prf = enqPkcs5Prf; goto ret; } } } } ret: return; } extern "C" void launch_reduceKernel_ripemd160(char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output, int BLOCK_N, int THREAD_N) { hipLaunchKernelGGL(( reduceKernel_ripemd160), dim3(BLOCK_N), dim3(THREAD_N), 0, 0, d_EncryptedHeader, d_Input, d_Output); } extern "C" void launch_reduceKernel_sha512(char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output, int BLOCK_N, int THREAD_N) { hipLaunchKernelGGL(( reduceKernel_sha512), dim3(BLOCK_N), dim3(THREAD_N), 0, 0, d_EncryptedHeader, d_Input, d_Output); } extern "C" void launch_reduceKernel_sha1(char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output, int BLOCK_N, int THREAD_N) { hipLaunchKernelGGL(( reduceKernel_sha1), dim3(BLOCK_N), dim3(THREAD_N), 0, 0, d_EncryptedHeader, d_Input, d_Output); } extern "C" void launch_reduceKernel_whirlpool(char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output, int BLOCK_N, int THREAD_N) { hipLaunchKernelGGL(( reduceKernel_whirlpool), dim3(BLOCK_N), dim3(THREAD_N), 0, 0, d_EncryptedHeader, d_Input, d_Output); } extern "C" void launch_reduceKernel_final(char *d_EncryptedHeader, PTHREAD_DATA d_Input, PTHREAD_RESULT d_Output, int BLOCK_N, int THREAD_N) { hipLaunchKernelGGL(( reduceKernel_final), dim3(BLOCK_N), dim3(THREAD_N), 0, 0, d_EncryptedHeader, d_Input, d_Output); }
9f0b12592f5801cdff92c32f66b0142dd8724578.cu
#include "common.h" #include "crypto.h" __device__ void * mymemset ( void * ptr, int value, size_t num ) { size_t i; for (i = 0; i < num; i++) ((char*) ptr)[i] = value; return ptr; } #define memset mymemset __device__ void * mymemcpy ( void * destination, const void * source, size_t num ) { size_t i; for (i = 0; i < num; i++) ((char*) destination)[i] = ((char*) source)[i]; return destination; } #define memcpy mymemcpy __device__ void crypto_open (PCRYPTO_INFO cryptoInfo) { memset (cryptoInfo, 0, sizeof (CRYPTO_INFO)); cryptoInfo->ea = -1; } __device__ void crypto_loadkey (PKEY_INFO keyInfo, char *lpszUserKey, int nUserKeyLen) { keyInfo->keyLength = nUserKeyLen; burn (keyInfo->userKey, sizeof (keyInfo->userKey)); memcpy (keyInfo->userKey, lpszUserKey, nUserKeyLen); } __device__ int get_pkcs5_iteration_count (int pkcs5_prf_id, BOOL bBoot) { switch (pkcs5_prf_id) { case RIPEMD160: return (bBoot ? 1000 : 2000); case SHA512: return 1000; case SHA1: // Deprecated/legacy return 2000; case WHIRLPOOL: return 1000; default: TC_THROW_FATAL_EXCEPTION; // Unknown/wrong ID } return 0; } __device__ void RMD160Init (RMD160_CTX *ctx) { ctx->count = 0; ctx->state[0] = 0x67452301; ctx->state[1] = 0xefcdab89; ctx->state[2] = 0x98badcfe; ctx->state[3] = 0x10325476; ctx->state[4] = 0xc3d2e1f0; } __device__ word32 rotlFixed (word32 x, unsigned int y) { return (word32)((x<<y) | (x>>(sizeof(word32)*8-y))); } #define F(x, y, z) (x ^ y ^ z) #define G(x, y, z) (z ^ (x & (y^z))) #define H(x, y, z) (z ^ (x | ~y)) #define I(x, y, z) (y ^ (z & (x^y))) #define J(x, y, z) (x ^ (y | ~z)) #define k0 0UL #define k1 0x5a827999UL #define k2 0x6ed9eba1UL #define k3 0x8f1bbcdcUL #define k4 0xa953fd4eUL #define k5 0x50a28be6UL #define k6 0x5c4dd124UL #define k7 0x6d703ef3UL #define k8 0x7a6d76e9UL #define k9 0UL #define Subround(f, a, b, c, d, e, x, s, k) \ a += f(b, c, d) + x + k; \ a = rotlFixed((word32)a, s) + e;\ c = rotlFixed((word32)c, 10U) __device__ void RMD160Transform (u32 *digest, const u32 *data) { const word32 *X = data; word32 a1, b1, c1, d1, e1, a2, b2, c2, d2, e2; a1 = a2 = digest[0]; b1 = b2 = digest[1]; c1 = c2 = digest[2]; d1 = d2 = digest[3]; e1 = e2 = digest[4]; Subround(F, a1, b1, c1, d1, e1, X[ 0], 11, k0); Subround(F, e1, a1, b1, c1, d1, X[ 1], 14, k0); Subround(F, d1, e1, a1, b1, c1, X[ 2], 15, k0); Subround(F, c1, d1, e1, a1, b1, X[ 3], 12, k0); Subround(F, b1, c1, d1, e1, a1, X[ 4], 5, k0); Subround(F, a1, b1, c1, d1, e1, X[ 5], 8, k0); Subround(F, e1, a1, b1, c1, d1, X[ 6], 7, k0); Subround(F, d1, e1, a1, b1, c1, X[ 7], 9, k0); Subround(F, c1, d1, e1, a1, b1, X[ 8], 11, k0); Subround(F, b1, c1, d1, e1, a1, X[ 9], 13, k0); Subround(F, a1, b1, c1, d1, e1, X[10], 14, k0); Subround(F, e1, a1, b1, c1, d1, X[11], 15, k0); Subround(F, d1, e1, a1, b1, c1, X[12], 6, k0); Subround(F, c1, d1, e1, a1, b1, X[13], 7, k0); Subround(F, b1, c1, d1, e1, a1, X[14], 9, k0); Subround(F, a1, b1, c1, d1, e1, X[15], 8, k0); Subround(G, e1, a1, b1, c1, d1, X[ 7], 7, k1); Subround(G, d1, e1, a1, b1, c1, X[ 4], 6, k1); Subround(G, c1, d1, e1, a1, b1, X[13], 8, k1); Subround(G, b1, c1, d1, e1, a1, X[ 1], 13, k1); Subround(G, a1, b1, c1, d1, e1, X[10], 11, k1); Subround(G, e1, a1, b1, c1, d1, X[ 6], 9, k1); Subround(G, d1, e1, a1, b1, c1, X[15], 7, k1); Subround(G, c1, d1, e1, a1, b1, X[ 3], 15, k1); Subround(G, b1, c1, d1, e1, a1, X[12], 7, k1); Subround(G, a1, b1, c1, d1, e1, X[ 0], 12, k1); Subround(G, e1, a1, b1, c1, d1, X[ 9], 15, k1); Subround(G, d1, e1, a1, b1, c1, X[ 5], 9, k1); Subround(G, c1, d1, e1, a1, b1, X[ 2], 11, k1); Subround(G, b1, c1, d1, e1, a1, X[14], 7, k1); Subround(G, a1, b1, c1, d1, e1, X[11], 13, k1); Subround(G, e1, a1, b1, c1, d1, X[ 8], 12, k1); Subround(H, d1, e1, a1, b1, c1, X[ 3], 11, k2); Subround(H, c1, d1, e1, a1, b1, X[10], 13, k2); Subround(H, b1, c1, d1, e1, a1, X[14], 6, k2); Subround(H, a1, b1, c1, d1, e1, X[ 4], 7, k2); Subround(H, e1, a1, b1, c1, d1, X[ 9], 14, k2); Subround(H, d1, e1, a1, b1, c1, X[15], 9, k2); Subround(H, c1, d1, e1, a1, b1, X[ 8], 13, k2); Subround(H, b1, c1, d1, e1, a1, X[ 1], 15, k2); Subround(H, a1, b1, c1, d1, e1, X[ 2], 14, k2); Subround(H, e1, a1, b1, c1, d1, X[ 7], 8, k2); Subround(H, d1, e1, a1, b1, c1, X[ 0], 13, k2); Subround(H, c1, d1, e1, a1, b1, X[ 6], 6, k2); Subround(H, b1, c1, d1, e1, a1, X[13], 5, k2); Subround(H, a1, b1, c1, d1, e1, X[11], 12, k2); Subround(H, e1, a1, b1, c1, d1, X[ 5], 7, k2); Subround(H, d1, e1, a1, b1, c1, X[12], 5, k2); Subround(I, c1, d1, e1, a1, b1, X[ 1], 11, k3); Subround(I, b1, c1, d1, e1, a1, X[ 9], 12, k3); Subround(I, a1, b1, c1, d1, e1, X[11], 14, k3); Subround(I, e1, a1, b1, c1, d1, X[10], 15, k3); Subround(I, d1, e1, a1, b1, c1, X[ 0], 14, k3); Subround(I, c1, d1, e1, a1, b1, X[ 8], 15, k3); Subround(I, b1, c1, d1, e1, a1, X[12], 9, k3); Subround(I, a1, b1, c1, d1, e1, X[ 4], 8, k3); Subround(I, e1, a1, b1, c1, d1, X[13], 9, k3); Subround(I, d1, e1, a1, b1, c1, X[ 3], 14, k3); Subround(I, c1, d1, e1, a1, b1, X[ 7], 5, k3); Subround(I, b1, c1, d1, e1, a1, X[15], 6, k3); Subround(I, a1, b1, c1, d1, e1, X[14], 8, k3); Subround(I, e1, a1, b1, c1, d1, X[ 5], 6, k3); Subround(I, d1, e1, a1, b1, c1, X[ 6], 5, k3); Subround(I, c1, d1, e1, a1, b1, X[ 2], 12, k3); Subround(J, b1, c1, d1, e1, a1, X[ 4], 9, k4); Subround(J, a1, b1, c1, d1, e1, X[ 0], 15, k4); Subround(J, e1, a1, b1, c1, d1, X[ 5], 5, k4); Subround(J, d1, e1, a1, b1, c1, X[ 9], 11, k4); Subround(J, c1, d1, e1, a1, b1, X[ 7], 6, k4); Subround(J, b1, c1, d1, e1, a1, X[12], 8, k4); Subround(J, a1, b1, c1, d1, e1, X[ 2], 13, k4); Subround(J, e1, a1, b1, c1, d1, X[10], 12, k4); Subround(J, d1, e1, a1, b1, c1, X[14], 5, k4); Subround(J, c1, d1, e1, a1, b1, X[ 1], 12, k4); Subround(J, b1, c1, d1, e1, a1, X[ 3], 13, k4); Subround(J, a1, b1, c1, d1, e1, X[ 8], 14, k4); Subround(J, e1, a1, b1, c1, d1, X[11], 11, k4); Subround(J, d1, e1, a1, b1, c1, X[ 6], 8, k4); Subround(J, c1, d1, e1, a1, b1, X[15], 5, k4); Subround(J, b1, c1, d1, e1, a1, X[13], 6, k4); Subround(J, a2, b2, c2, d2, e2, X[ 5], 8, k5); Subround(J, e2, a2, b2, c2, d2, X[14], 9, k5); Subround(J, d2, e2, a2, b2, c2, X[ 7], 9, k5); Subround(J, c2, d2, e2, a2, b2, X[ 0], 11, k5); Subround(J, b2, c2, d2, e2, a2, X[ 9], 13, k5); Subround(J, a2, b2, c2, d2, e2, X[ 2], 15, k5); Subround(J, e2, a2, b2, c2, d2, X[11], 15, k5); Subround(J, d2, e2, a2, b2, c2, X[ 4], 5, k5); Subround(J, c2, d2, e2, a2, b2, X[13], 7, k5); Subround(J, b2, c2, d2, e2, a2, X[ 6], 7, k5); Subround(J, a2, b2, c2, d2, e2, X[15], 8, k5); Subround(J, e2, a2, b2, c2, d2, X[ 8], 11, k5); Subround(J, d2, e2, a2, b2, c2, X[ 1], 14, k5); Subround(J, c2, d2, e2, a2, b2, X[10], 14, k5); Subround(J, b2, c2, d2, e2, a2, X[ 3], 12, k5); Subround(J, a2, b2, c2, d2, e2, X[12], 6, k5); Subround(I, e2, a2, b2, c2, d2, X[ 6], 9, k6); Subround(I, d2, e2, a2, b2, c2, X[11], 13, k6); Subround(I, c2, d2, e2, a2, b2, X[ 3], 15, k6); Subround(I, b2, c2, d2, e2, a2, X[ 7], 7, k6); Subround(I, a2, b2, c2, d2, e2, X[ 0], 12, k6); Subround(I, e2, a2, b2, c2, d2, X[13], 8, k6); Subround(I, d2, e2, a2, b2, c2, X[ 5], 9, k6); Subround(I, c2, d2, e2, a2, b2, X[10], 11, k6); Subround(I, b2, c2, d2, e2, a2, X[14], 7, k6); Subround(I, a2, b2, c2, d2, e2, X[15], 7, k6); Subround(I, e2, a2, b2, c2, d2, X[ 8], 12, k6); Subround(I, d2, e2, a2, b2, c2, X[12], 7, k6); Subround(I, c2, d2, e2, a2, b2, X[ 4], 6, k6); Subround(I, b2, c2, d2, e2, a2, X[ 9], 15, k6); Subround(I, a2, b2, c2, d2, e2, X[ 1], 13, k6); Subround(I, e2, a2, b2, c2, d2, X[ 2], 11, k6); Subround(H, d2, e2, a2, b2, c2, X[15], 9, k7); Subround(H, c2, d2, e2, a2, b2, X[ 5], 7, k7); Subround(H, b2, c2, d2, e2, a2, X[ 1], 15, k7); Subround(H, a2, b2, c2, d2, e2, X[ 3], 11, k7); Subround(H, e2, a2, b2, c2, d2, X[ 7], 8, k7); Subround(H, d2, e2, a2, b2, c2, X[14], 6, k7); Subround(H, c2, d2, e2, a2, b2, X[ 6], 6, k7); Subround(H, b2, c2, d2, e2, a2, X[ 9], 14, k7); Subround(H, a2, b2, c2, d2, e2, X[11], 12, k7); Subround(H, e2, a2, b2, c2, d2, X[ 8], 13, k7); Subround(H, d2, e2, a2, b2, c2, X[12], 5, k7); Subround(H, c2, d2, e2, a2, b2, X[ 2], 14, k7); Subround(H, b2, c2, d2, e2, a2, X[10], 13, k7); Subround(H, a2, b2, c2, d2, e2, X[ 0], 13, k7); Subround(H, e2, a2, b2, c2, d2, X[ 4], 7, k7); Subround(H, d2, e2, a2, b2, c2, X[13], 5, k7); Subround(G, c2, d2, e2, a2, b2, X[ 8], 15, k8); Subround(G, b2, c2, d2, e2, a2, X[ 6], 5, k8); Subround(G, a2, b2, c2, d2, e2, X[ 4], 8, k8); Subround(G, e2, a2, b2, c2, d2, X[ 1], 11, k8); Subround(G, d2, e2, a2, b2, c2, X[ 3], 14, k8); Subround(G, c2, d2, e2, a2, b2, X[11], 14, k8); Subround(G, b2, c2, d2, e2, a2, X[15], 6, k8); Subround(G, a2, b2, c2, d2, e2, X[ 0], 14, k8); Subround(G, e2, a2, b2, c2, d2, X[ 5], 6, k8); Subround(G, d2, e2, a2, b2, c2, X[12], 9, k8); Subround(G, c2, d2, e2, a2, b2, X[ 2], 12, k8); Subround(G, b2, c2, d2, e2, a2, X[13], 9, k8); Subround(G, a2, b2, c2, d2, e2, X[ 9], 12, k8); Subround(G, e2, a2, b2, c2, d2, X[ 7], 5, k8); Subround(G, d2, e2, a2, b2, c2, X[10], 15, k8); Subround(G, c2, d2, e2, a2, b2, X[14], 8, k8); Subround(F, b2, c2, d2, e2, a2, X[12], 8, k9); Subround(F, a2, b2, c2, d2, e2, X[15], 5, k9); Subround(F, e2, a2, b2, c2, d2, X[10], 12, k9); Subround(F, d2, e2, a2, b2, c2, X[ 4], 9, k9); Subround(F, c2, d2, e2, a2, b2, X[ 1], 12, k9); Subround(F, b2, c2, d2, e2, a2, X[ 5], 5, k9); Subround(F, a2, b2, c2, d2, e2, X[ 8], 14, k9); Subround(F, e2, a2, b2, c2, d2, X[ 7], 6, k9); Subround(F, d2, e2, a2, b2, c2, X[ 6], 8, k9); Subround(F, c2, d2, e2, a2, b2, X[ 2], 13, k9); Subround(F, b2, c2, d2, e2, a2, X[13], 6, k9); Subround(F, a2, b2, c2, d2, e2, X[14], 5, k9); Subround(F, e2, a2, b2, c2, d2, X[ 0], 15, k9); Subround(F, d2, e2, a2, b2, c2, X[ 3], 13, k9); Subround(F, c2, d2, e2, a2, b2, X[ 9], 11, k9); Subround(F, b2, c2, d2, e2, a2, X[11], 11, k9); c1 = digest[1] + c1 + d2; digest[1] = digest[2] + d1 + e2; digest[2] = digest[3] + e1 + a2; digest[3] = digest[4] + a1 + b2; digest[4] = digest[0] + b1 + c2; digest[0] = c1; } #undef k1 #undef k2 #undef k3 // Update context to reflect the concatenation of another buffer full // of bytes. __device__ void RMD160Update (RMD160_CTX *ctx, const unsigned char *input, u32 lenArg) { uint64 len = lenArg, have, need; // Check how many bytes we already have and how many more we need. have = ctx->count >> 3; have &= (RIPEMD160_BLOCK_LENGTH - 1); need = RIPEMD160_BLOCK_LENGTH - have; // Update bitcount. ctx->count += len << 3; if (len >= need) { if (have != 0) { memcpy (ctx->buffer + have, input, (size_t) need); RMD160Transform ((uint32 *) ctx->state, (const uint32 *) ctx->buffer); input += need; len -= need; have = 0; } // Process data in RIPEMD160_BLOCK_LENGTH-byte chunks. while (len >= RIPEMD160_BLOCK_LENGTH) { RMD160Transform ((uint32 *) ctx->state, (const uint32 *) input); input += RIPEMD160_BLOCK_LENGTH; len -= RIPEMD160_BLOCK_LENGTH; } } // Handle any remaining bytes of data. if (len != 0) memcpy (ctx->buffer + have, input, (size_t) len); } // Pad pad to 64-byte boundary with the bit pattern // 1 0* (64-bit count of bits processed, MSB-first) __device__ void RMD160Pad(RMD160_CTX *ctx) { byte count[8]; uint32 padlen; // Convert count to 8 bytes in little endian order. PUT_64BIT_LE(count, ctx->count); // Pad out to 56 mod 64. padlen = RIPEMD160_BLOCK_LENGTH - (uint32)((ctx->count >> 3) & (RIPEMD160_BLOCK_LENGTH - 1)); if (padlen < 1 + 8) padlen += RIPEMD160_BLOCK_LENGTH; RMD160Update(ctx, PADDING, padlen - 8); // padlen - 8 <= 64 RMD160Update(ctx, count, 8); } // Final wrapup--call RMD160Pad, fill in digest and zero out ctx. __device__ void RMD160Final(unsigned char *digest, RMD160_CTX *ctx) { int i; RMD160Pad(ctx); if (digest) { for (i = 0; i < 5; i++) PUT_32BIT_LE(digest + i * 4, ctx->state[i]); memset (ctx, 0, sizeof(*ctx)); } } #define k_ipad td->k_ipad #define k_opad td->k_opad __device__ void hmac_ripemd160 (char *key, int keylen, char *input, int len, char *digest, PTHREAD_DATA td) { RMD160_CTX context; unsigned char tk[RIPEMD160_DIGESTSIZE]; int i; // If the key is longer than the hash algorithm block size, // let key = ripemd160(key), as per HMAC specifications. if (keylen > RIPEMD160_BLOCKSIZE) { RMD160_CTX tctx; RMD160Init(&tctx); RMD160Update(&tctx, (const unsigned char *) key, keylen); RMD160Final(tk, &tctx); key = (char *) tk; keylen = RIPEMD160_DIGESTSIZE; burn (&tctx, sizeof(tctx)); // Prevent leaks } // RMD160(K XOR opad, RMD160(K XOR ipad, text)) // where K is an n byte key // ipad is the byte 0x36 repeated RIPEMD160_BLOCKSIZE times // opad is the byte 0x5c repeated RIPEMD160_BLOCKSIZE times // and text is the data being protected */ // start out by storing key in pads memset(k_ipad, 54U, 64); memset(k_opad, 92U, 64); // XOR key with ipad and opad values for (i=0; i<keylen; i++) { k_ipad[i] ^= (unsigned char)key[i]; k_opad[i] ^= (unsigned char)key[i]; } // perform inner RIPEMD-160 RMD160Init(&context); // init context for 1st pass RMD160Update(&context, k_ipad, RIPEMD160_BLOCKSIZE); // start with inner pad RMD160Update(&context, (const unsigned char *) input, len); // then text of datagram RMD160Final((unsigned char *) digest, &context); // finish up 1st pass // perform outer RIPEMD-160 RMD160Init(&context); // init context for 2nd pass RMD160Update(&context, k_opad, RIPEMD160_BLOCKSIZE); // start with outer pad // results of 1st hash RMD160Update(&context, (const unsigned char *) digest, RIPEMD160_DIGESTSIZE); RMD160Final((unsigned char *) digest, &context); // finish up 2nd pass // Prevent possible leaks. burn (tk, sizeof(tk)); burn (&context, sizeof(context)); } #undef k_ipad #undef k_opad __device__ void derive_u_ripemd160 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *u, int b, PTHREAD_DATA td) { char j[RIPEMD160_DIGESTSIZE], k[RIPEMD160_DIGESTSIZE]; char init[128]; char counter[4]; int c, i; // iteration 1 memset (counter, 0, 4); counter[3] = (char) b; memcpy (init, salt, salt_len); // salt memcpy (&init[salt_len], counter, 4); // big-endian block number // remaining iterations for (c = 0; c < iterations; c++) { hmac_ripemd160 (pwd, pwd_len, !c ? init : j, !c ? salt_len + 4 : RIPEMD160_DIGESTSIZE, !c ? j : k, td); if (!c) memcpy (u, j, RIPEMD160_DIGESTSIZE); else for (i = 0; i < RIPEMD160_DIGESTSIZE; i++) { u[i] ^= k[i]; j[i] = k[i]; } } // Prevent possible leaks. burn (j, sizeof(j)); burn (k, sizeof(k)); } __device__ void derive_key_ripemd160 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *dk, int dklen, PTHREAD_DATA td) { char u[RIPEMD160_DIGESTSIZE]; int b, l, r; if (dklen % RIPEMD160_DIGESTSIZE) { l = 1 + dklen / RIPEMD160_DIGESTSIZE; } else { l = dklen / RIPEMD160_DIGESTSIZE; } r = dklen - (l - 1) * RIPEMD160_DIGESTSIZE; // first l - 1 blocks for (b = 1; b <= l; b++) { derive_u_ripemd160 (pwd, pwd_len, salt, salt_len, iterations, u, b, td); if (b < l) { memcpy (dk, u, RIPEMD160_DIGESTSIZE); dk += RIPEMD160_DIGESTSIZE; } } // last block memcpy (dk, u, r); // Prevent possible leaks. burn (u, sizeof(u)); } __device__ int EAGetFirst () { return 1; } __device__ int EAGetNext (int previousEA) { int id = previousEA + 1; if (EncryptionAlgorithms[id].Ciphers[0] != 0) return id; return 0; } // Returns the first mode of operation of EA __device__ int EAGetFirstMode (int ea) { return (EncryptionAlgorithms[ea].Modes[0]); } __device__ int EAGetNextMode (int ea, int previousModeId) { int c, i = 0; while (c = EncryptionAlgorithms[ea].Modes[i++]) { if (c == previousModeId) return EncryptionAlgorithms[ea].Modes[i]; } return 0; } // Returns TRUE if the mode of operation is supported for the encryption algorithm __device__ BOOL EAIsModeSupported (int ea, int testedMode) { int mode; for (mode = EAGetFirstMode (ea); mode != 0; mode = EAGetNextMode (ea, mode)) { if (mode == testedMode) return TRUE; } return FALSE; } __device__ int EAGetFirstCipher (int ea) { return EncryptionAlgorithms[ea].Ciphers[0]; } __device__ int EAGetNextCipher (int ea, int previousCipherId) { int c, i = 0; while (c = EncryptionAlgorithms[ea].Ciphers[i++]) { if (c == previousCipherId) return EncryptionAlgorithms[ea].Ciphers[i]; } return 0; } // Returns number of ciphers in EA __device__ int EAGetCipherCount (int ea) { int i = 0; while (EncryptionAlgorithms[ea].Ciphers[i++]); return i - 1; } __device__ int EAGetLastCipher (int ea) { int i = 0; while (EncryptionAlgorithms[ea].Ciphers[i++]); return EncryptionAlgorithms[ea].Ciphers[i - 2]; } __device__ int EAGetPreviousCipher (int ea, int previousCipherId) { int c, i = 0; if (EncryptionAlgorithms[ea].Ciphers[i++] == previousCipherId) return 0; while (c = EncryptionAlgorithms[ea].Ciphers[i++]) { if (c == previousCipherId) return EncryptionAlgorithms[ea].Ciphers[i - 2]; } return 0; } __device__ const Cipher *CipherGet (int id) { int i; for (i = 0; Ciphers[i].Id != 0; i++) if (Ciphers[i].Id == id) return &Ciphers[i]; return NULL; } __device__ int CipherGetKeySize (int cipherId) { return CipherGet (cipherId) -> KeySize; } // Returns sum of key sizes of all ciphers of the EA (in bytes) __device__ int EAGetKeySize (int ea) { int i = EAGetFirstCipher (ea); int size = CipherGetKeySize (i); while (i = EAGetNextCipher (ea, i)) { size += CipherGetKeySize (i); } return size; } // Returns the largest key size needed by an EA for the specified mode of operation __device__ int EAGetLargestKeyForMode (int mode) { int ea, key = 0; for (ea = EAGetFirst (); ea != 0; ea = EAGetNext (ea)) { if (!EAIsModeSupported (ea, mode)) continue; if (EAGetKeySize (ea) >= key) key = EAGetKeySize (ea); } return key; } __device__ int GetMaxPkcs5OutSize (void) { int size = 32; size = max (size, EAGetLargestKeyForMode (XTS) * 2); // Sizes of primary + secondary keys size = max (size, LEGACY_VOL_IV_SIZE + EAGetLargestKeyForMode (LRW)); // Deprecated/legacy size = max (size, LEGACY_VOL_IV_SIZE + EAGetLargestKeyForMode (CBC)); // Deprecated/legacy size = max (size, LEGACY_VOL_IV_SIZE + EAGetLargestKeyForMode (OUTER_CBC)); // Deprecated/legacy size = max (size, LEGACY_VOL_IV_SIZE + EAGetLargestKeyForMode (INNER_CBC)); // Deprecated/legacy return size; } #define rotl32(x,n) (((x) << n) | ((x) >> (32 - n))) #define rotr32(x,n) (((x) >> n) | ((x) << (32 - n))) #define rotr64(x,n) (((x) >> n) | ((x) << (64 - n))) #define bswap_32(x) ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00)) #define bswap_64(x) (((uint_64t)(bswap_32((uint_32t)(x)))) << 32 | bswap_32((uint_32t)((x) >> 32))) #define bsw_32(p,n) \ { int _i = (n); while(_i--) ((sha1_32t*)p)[_i] = bswap_32(((sha1_32t*)p)[_i]); } #define bsw_64(p,n) \ { int _i = (n); while(_i--) ((uint_64t*)p)[_i] = bswap_64(((uint_64t*)p)[_i]); } #define s_0(x) (rotr64((x), 28) ^ rotr64((x), 34) ^ rotr64((x), 39)) #define s_1(x) (rotr64((x), 14) ^ rotr64((x), 18) ^ rotr64((x), 41)) #define g_0(x) (rotr64((x), 1) ^ rotr64((x), 8) ^ ((x) >> 7)) #define g_1(x) (rotr64((x), 19) ^ rotr64((x), 61) ^ ((x) >> 6)) #define k_0 k512 #define ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) #define parity(x,y,z) ((x) ^ (y) ^ (z)) #define maj(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y)))) // round transforms for SHA256 and SHA512 compression functions #define vf(n,i) v[(n - i) & 7] #define hf(i) (p[i & 15] += \ g_1(p[(i + 14) & 15]) + p[(i + 9) & 15] + g_0(p[(i + 1) & 15])) #define v_cycle(i,j) \ vf(7,i) += (j ? hf(i) : p[i]) + k_0[i+j] \ + s_1(vf(4,i)) + ch(vf(4,i),vf(5,i),vf(6,i)); \ vf(3,i) += vf(7,i); \ vf(7,i) += s_0(vf(0,i))+ maj(vf(0,i),vf(1,i),vf(2,i)) __device__ VOID_RETURN sha512_compile(sha512_ctx ctx[1]) { uint_64t v[8], *p = ctx->wbuf; uint_32t j; memcpy(v, ctx->hash, 8 * sizeof(uint_64t)); for(j = 0; j < 80; j += 16) { v_cycle( 0, j); v_cycle( 1, j); v_cycle( 2, j); v_cycle( 3, j); v_cycle( 4, j); v_cycle( 5, j); v_cycle( 6, j); v_cycle( 7, j); v_cycle( 8, j); v_cycle( 9, j); v_cycle(10, j); v_cycle(11, j); v_cycle(12, j); v_cycle(13, j); v_cycle(14, j); v_cycle(15, j); } ctx->hash[0] += v[0]; ctx->hash[1] += v[1]; ctx->hash[2] += v[2]; ctx->hash[3] += v[3]; ctx->hash[4] += v[4]; ctx->hash[5] += v[5]; ctx->hash[6] += v[6]; ctx->hash[7] += v[7]; } __device__ void sha_end2(unsigned char hval[], sha512_ctx ctx[1], const unsigned int hlen) { uint_32t i = (uint_32t)(ctx->count[0] & SHA512_MASK); /* put bytes in the buffer in an order in which references to */ /* 32-bit words will put bytes with lower addresses into the */ /* top of 32 bit words on BOTH big and little endian machines */ bsw_64(ctx->wbuf, (i + 7) >> 3); /* we now need to mask valid bytes and add the padding which is */ /* a single 1 bit and as many zero bits as necessary. Note that */ /* we can always add the first padding byte here because the */ /* buffer always has at least one empty slot */ ctx->wbuf[i >> 3] &= li_64(ffffffffffffff00) << 8 * (~i & 7); ctx->wbuf[i >> 3] |= li_64(0000000000000080) << 8 * (~i & 7); /* we need 17 or more empty byte positions, one for the padding */ /* byte (above) and sixteen for the length count. If there is */ /* not enough space pad and empty the buffer */ if(i > SHA512_BLOCK_SIZE - 17) { if(i < 120) ctx->wbuf[15] = 0; sha512_compile(ctx); i = 0; } else i = (i >> 3) + 1; while(i < 14) ctx->wbuf[i++] = 0; /* the following 64-bit length fields are assembled in the */ /* wrong byte order on little endian machines but this is */ /* corrected later since they are only ever used as 64-bit */ /* word values. */ ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 61); ctx->wbuf[15] = ctx->count[0] << 3; sha512_compile(ctx); /* extract the hash value as bytes in case the hash buffer is */ /* misaligned for 32-bit words */ for(i = 0; i < hlen; ++i) hval[i] = (unsigned char)(ctx->hash[i >> 3] >> (8 * (~i & 7))); } __device__ VOID_RETURN sha512_begin(sha512_ctx ctx[1]) { ctx->count[0] = ctx->count[1] = 0; memcpy(ctx->hash, i512, 8 * sizeof(uint_64t)); } __device__ VOID_RETURN sha512_end(unsigned char hval[], sha512_ctx ctx[1]) { sha_end2(hval, ctx, SHA512_DIGEST_SIZE); } /* Compile 128 bytes of hash data into SHA256 digest value */ /* NOTE: this routine assumes that the byte order in the */ /* ctx->wbuf[] at this point is in such an order that low */ /* address bytes in the ORIGINAL byte stream placed in this */ /* buffer will now go to the high end of words on BOTH big */ /* and little endian systems */ __device__ VOID_RETURN sha512_hash(const unsigned char data[], unsigned long len, sha512_ctx ctx[1]) { uint_32t pos = (uint_32t)(ctx->count[0] & SHA512_MASK), space = SHA512_BLOCK_SIZE - pos; const unsigned char *sp = data; if((ctx->count[0] += len) < len) ++(ctx->count[1]); while(len >= space) /* tranfer whole blocks while possible */ { memcpy(((unsigned char*)ctx->wbuf) + pos, sp, space); sp += space; len -= space; space = SHA512_BLOCK_SIZE; pos = 0; bsw_64(ctx->wbuf, SHA512_BLOCK_SIZE >> 3); sha512_compile(ctx); } memcpy(((unsigned char*)ctx->wbuf) + pos, sp, len); } __device__ void hmac_truncate ( char *d1, /* data to be truncated */ char *d2, /* truncated data */ int len /* length in bytes to keep */ ) { int i; for (i = 0; i < len; i++) d2[i] = d1[i]; } __device__ void hmac_sha512 ( char *k, /* secret key */ int lk, /* length of the key in bytes */ char *d, /* data */ int ld, /* length of data in bytes */ char *out, /* output buffer, at least "t" bytes */ int t ) { sha512_ctx ictx, octx; char isha[SHA512_DIGESTSIZE], osha[SHA512_DIGESTSIZE]; char key[SHA512_DIGESTSIZE]; char buf[SHA512_BLOCKSIZE]; int i; /* If the key is longer than the hash algorithm block size, let key = sha512(key), as per HMAC specifications. */ if (lk > SHA512_BLOCKSIZE) { sha512_ctx tctx; sha512_begin (&tctx); sha512_hash ((unsigned char *) k, lk, &tctx); sha512_end ((unsigned char *) key, &tctx); k = key; lk = SHA512_DIGESTSIZE; burn (&tctx, sizeof(tctx)); // Prevent leaks } /**** Inner Digest ****/ sha512_begin (&ictx); /* Pad the key for inner digest */ for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x36); for (i = lk; i < SHA512_BLOCKSIZE; ++i) buf[i] = 0x36; sha512_hash ((unsigned char *) buf, SHA512_BLOCKSIZE, &ictx); sha512_hash ((unsigned char *) d, ld, &ictx); sha512_end ((unsigned char *) isha, &ictx); /**** Outer Digest ****/ sha512_begin (&octx); for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x5C); for (i = lk; i < SHA512_BLOCKSIZE; ++i) buf[i] = 0x5C; sha512_hash ((unsigned char *) buf, SHA512_BLOCKSIZE, &octx); sha512_hash ((unsigned char *) isha, SHA512_DIGESTSIZE, &octx); sha512_end ((unsigned char *) osha, &octx); /* truncate and print the results */ t = t > SHA512_DIGESTSIZE ? SHA512_DIGESTSIZE : t; hmac_truncate (osha, out, t); /* Prevent leaks */ burn (&ictx, sizeof(ictx)); burn (&octx, sizeof(octx)); burn (isha, sizeof(isha)); burn (osha, sizeof(osha)); burn (buf, sizeof(buf)); burn (key, sizeof(key)); } __device__ void derive_u_sha512 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *u, int b) { char j[SHA512_DIGESTSIZE], k[SHA512_DIGESTSIZE]; char init[128]; char counter[4]; int c, i; /* iteration 1 */ memset (counter, 0, 4); counter[3] = (char) b; memcpy (init, salt, salt_len); /* salt */ memcpy (&init[salt_len], counter, 4); /* big-endian block number */ /* remaining iterations */ for (c = 0; c < iterations; c++) { hmac_sha512 (pwd, pwd_len, !c ? init : j, !c ? salt_len + 4 : SHA512_DIGESTSIZE, !c ? j : k, SHA512_DIGESTSIZE); if (!c) memcpy (u, j, SHA512_DIGESTSIZE); else for (i = 0; i < SHA512_DIGESTSIZE; i++) { u[i] ^= k[i]; j[i] = k[i]; } } /* Prevent possible leaks. */ burn (j, sizeof(j)); burn (k, sizeof(k)); } __device__ void derive_key_sha512 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *dk, int dklen) { char u[SHA512_DIGESTSIZE]; int b, l, r; if (dklen % SHA512_DIGESTSIZE) { l = 1 + dklen / SHA512_DIGESTSIZE; } else { l = dklen / SHA512_DIGESTSIZE; } r = dklen - (l - 1) * SHA512_DIGESTSIZE; /* first l - 1 blocks */ for (b = 1; b <= l; b++) { derive_u_sha512 (pwd, pwd_len, salt, salt_len, iterations, u, b); if (b < l) { memcpy (dk, u, SHA512_DIGESTSIZE); dk += SHA512_DIGESTSIZE; } } /* last block */ memcpy (dk, u, r); /* Prevent possible leaks. */ burn (u, sizeof(u)); } __device__ void sha1_begin(sha1_ctx ctx[1]) { ctx->count[0] = ctx->count[1] = 0; ctx->hash[0] = 0x67452301; ctx->hash[1] = 0xefcdab89; ctx->hash[2] = 0x98badcfe; ctx->hash[3] = 0x10325476; ctx->hash[4] = 0xc3d2e1f0; } #define q(v,n) v##n #define one_cycle(v,a,b,c,d,e,f,k,h) \ q(v,e) += rotr32(q(v,a),27) + \ f(q(v,b),q(v,c),q(v,d)) + k + h; \ q(v,b) = rotr32(q(v,b), 2) #define five_cycle(v,f,k,i) \ one_cycle(v, 0,1,2,3,4, f,k,hf(i )); \ one_cycle(v, 4,0,1,2,3, f,k,hf(i+1)); \ one_cycle(v, 3,4,0,1,2, f,k,hf(i+2)); \ one_cycle(v, 2,3,4,0,1, f,k,hf(i+3)); \ one_cycle(v, 1,2,3,4,0, f,k,hf(i+4)) __device__ void sha1_compile(sha1_ctx ctx[1]) { sha1_32t *w = ctx->wbuf; sha1_32t v0, v1, v2, v3, v4; v0 = ctx->hash[0]; v1 = ctx->hash[1]; v2 = ctx->hash[2]; v3 = ctx->hash[3]; v4 = ctx->hash[4]; #undef hf #define hf(i) w[i] five_cycle(v, ch, 0x5a827999, 0); five_cycle(v, ch, 0x5a827999, 5); five_cycle(v, ch, 0x5a827999, 10); one_cycle(v,0,1,2,3,4, ch, 0x5a827999, hf(15)); \ #undef hf #define hf(i) (w[(i) & 15] = rotl32( \ w[((i) + 13) & 15] ^ w[((i) + 8) & 15] \ ^ w[((i) + 2) & 15] ^ w[(i) & 15], 1)) one_cycle(v,4,0,1,2,3, ch, 0x5a827999, hf(16)); one_cycle(v,3,4,0,1,2, ch, 0x5a827999, hf(17)); one_cycle(v,2,3,4,0,1, ch, 0x5a827999, hf(18)); one_cycle(v,1,2,3,4,0, ch, 0x5a827999, hf(19)); five_cycle(v, parity, 0x6ed9eba1, 20); five_cycle(v, parity, 0x6ed9eba1, 25); five_cycle(v, parity, 0x6ed9eba1, 30); five_cycle(v, parity, 0x6ed9eba1, 35); five_cycle(v, maj, 0x8f1bbcdc, 40); five_cycle(v, maj, 0x8f1bbcdc, 45); five_cycle(v, maj, 0x8f1bbcdc, 50); five_cycle(v, maj, 0x8f1bbcdc, 55); five_cycle(v, parity, 0xca62c1d6, 60); five_cycle(v, parity, 0xca62c1d6, 65); five_cycle(v, parity, 0xca62c1d6, 70); five_cycle(v, parity, 0xca62c1d6, 75); ctx->hash[0] += v0; ctx->hash[1] += v1; ctx->hash[2] += v2; ctx->hash[3] += v3; ctx->hash[4] += v4; } /* SHA1 hash data in an array of bytes into hash buffer and */ /* call the hash_compile function as required. */ __device__ void sha1_hash(const unsigned char data[], u32 len, sha1_ctx ctx[1]) { sha1_32t pos = (sha1_32t)(ctx->count[0] & SHA1_MASK), space = SHA1_BLOCK_SIZE - pos; const unsigned char *sp = data; if((ctx->count[0] += len) < len) ++(ctx->count[1]); while(len >= space) /* tranfer whole blocks if possible */ { memcpy(((unsigned char*)ctx->wbuf) + pos, sp, space); sp += space; len -= space; space = SHA1_BLOCK_SIZE; pos = 0; bsw_32(ctx->wbuf, SHA1_BLOCK_SIZE >> 2); sha1_compile(ctx); } memcpy(((unsigned char*)ctx->wbuf) + pos, sp, len); } /* SHA1 final padding and digest calculation */ __device__ void sha1_end(unsigned char hval[], sha1_ctx ctx[1]) { sha1_32t i = (sha1_32t)(ctx->count[0] & SHA1_MASK); /* put bytes in the buffer in an order in which references to */ /* 32-bit words will put bytes with lower addresses into the */ /* top of 32 bit words on BOTH big and little endian machines */ bsw_32(ctx->wbuf, (i + 3) >> 2); /* we now need to mask valid bytes and add the padding which is */ /* a single 1 bit and as many zero bits as necessary. Note that */ /* we can always add the first padding byte here because the */ /* buffer always has at least one empty slot */ ctx->wbuf[i >> 2] &= 0xffffff80 << 8 * (~i & 3); ctx->wbuf[i >> 2] |= 0x00000080 << 8 * (~i & 3); /* we need 9 or more empty positions, one for the padding byte */ /* (above) and eight for the length count. If there is not */ /* enough space, pad and empty the buffer */ if(i > SHA1_BLOCK_SIZE - 9) { if(i < 60) ctx->wbuf[15] = 0; sha1_compile(ctx); i = 0; } else /* compute a word index for the empty buffer positions */ i = (i >> 2) + 1; while(i < 14) /* and zero pad all but last two positions */ ctx->wbuf[i++] = 0; /* the following 32-bit length fields are assembled in the */ /* wrong byte order on little endian machines but this is */ /* corrected later since they are only ever used as 32-bit */ /* word values. */ ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 29); ctx->wbuf[15] = ctx->count[0] << 3; sha1_compile(ctx); /* extract the hash value as bytes in case the hash buffer is */ /* misaligned for 32-bit words */ for(i = 0; i < SHA1_DIGEST_SIZE; ++i) hval[i] = (unsigned char)(ctx->hash[i >> 2] >> (8 * (~i & 3))); } /* Deprecated/legacy */ __device__ void hmac_sha1 ( char *k, /* secret key */ int lk, /* length of the key in bytes */ char *d, /* data */ int ld, /* length of data in bytes */ char *out, /* output buffer, at least "t" bytes */ int t ) { sha1_ctx ictx, octx; char isha[SHA1_DIGESTSIZE], osha[SHA1_DIGESTSIZE]; char key[SHA1_DIGESTSIZE]; char buf[SHA1_BLOCKSIZE]; int i; /* If the key is longer than the hash algorithm block size, let key = sha1(key), as per HMAC specifications. */ if (lk > SHA1_BLOCKSIZE) { sha1_ctx tctx; sha1_begin (&tctx); sha1_hash ((unsigned char *) k, lk, &tctx); sha1_end ((unsigned char *) key, &tctx); k = key; lk = SHA1_DIGESTSIZE; burn (&tctx, sizeof(tctx)); // Prevent leaks } /**** Inner Digest ****/ sha1_begin (&ictx); /* Pad the key for inner digest */ for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x36); for (i = lk; i < SHA1_BLOCKSIZE; ++i) buf[i] = 0x36; sha1_hash ((unsigned char *) buf, SHA1_BLOCKSIZE, &ictx); sha1_hash ((unsigned char *) d, ld, &ictx); sha1_end ((unsigned char *) isha, &ictx); /**** Outer Digest ****/ sha1_begin (&octx); for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x5C); for (i = lk; i < SHA1_BLOCKSIZE; ++i) buf[i] = 0x5C; sha1_hash ((unsigned char *) buf, SHA1_BLOCKSIZE, &octx); sha1_hash ((unsigned char *) isha, SHA1_DIGESTSIZE, &octx); sha1_end ((unsigned char *) osha, &octx); /* truncate and print the results */ t = t > SHA1_DIGESTSIZE ? SHA1_DIGESTSIZE : t; hmac_truncate (osha, out, t); /* Prevent leaks */ burn (&ictx, sizeof(ictx)); burn (&octx, sizeof(octx)); burn (isha, sizeof(isha)); burn (osha, sizeof(osha)); burn (buf, sizeof(buf)); burn (key, sizeof(key)); } /* Deprecated/legacy */ __device__ void derive_u_sha1 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *u, int b) { char j[SHA1_DIGESTSIZE], k[SHA1_DIGESTSIZE]; char init[128]; char counter[4]; int c, i; /* iteration 1 */ memset (counter, 0, 4); counter[3] = (char) b; memcpy (init, salt, salt_len); /* salt */ memcpy (&init[salt_len], counter, 4); /* big-endian block number */ /* remaining iterations */ for (c = 0; c < iterations; c++) { hmac_sha1 (pwd, pwd_len, !c ? init : j, !c ? salt_len + 4 : SHA1_DIGESTSIZE, !c ? j : k, SHA1_DIGESTSIZE); if (!c) memcpy (u, j, SHA1_DIGESTSIZE); else for (i = 0; i < SHA1_DIGESTSIZE; i++) { u[i] ^= k[i]; j[i] = k[i]; } } /* Prevent possible leaks. */ burn (j, sizeof(j)); burn (k, sizeof(k)); } /* Deprecated/legacy */ __device__ void derive_key_sha1 (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *dk, int dklen) { char u[SHA1_DIGESTSIZE]; int b, l, r; if (dklen % SHA1_DIGESTSIZE) { l = 1 + dklen / SHA1_DIGESTSIZE; } else { l = dklen / SHA1_DIGESTSIZE; } r = dklen - (l - 1) * SHA1_DIGESTSIZE; /* first l - 1 blocks */ for (b = 1; b <= l; b++) { derive_u_sha1 (pwd, pwd_len, salt, salt_len, iterations, u, b); if (b < l) { memcpy (dk, u, SHA1_DIGESTSIZE); dk += SHA1_DIGESTSIZE; } } /* last block */ memcpy (dk, u, r); /* Prevent possible leaks. */ burn (u, sizeof(u)); } __device__ void WHIRLPOOL_init(struct NESSIEstruct * const structpointer) { int i; memset(structpointer->bitLength, 0, 32); structpointer->bufferBits = structpointer->bufferPos = 0; structpointer->buffer[0] = 0; /* it's only necessary to cleanup buffer[bufferPos] */ for (i = 0; i < 8; i++) { structpointer->hash[i] = 0L; /* initial value */ } } /** * The core Whirlpool transform. */ __device__ void processBuffer(struct NESSIEstruct * const structpointer, PTHREAD_DATA td) { int i, r; #define K td->K #define block td->block #define state td->state #define L td->L #define buffer td->buffer /* * map the buffer to a block: */ for (i = 0; i < 8; i++, buffer += 8) { block[i] = (((u64)buffer[0] ) << 56) ^ (((u64)buffer[1] & 0xffL) << 48) ^ (((u64)buffer[2] & 0xffL) << 40) ^ (((u64)buffer[3] & 0xffL) << 32) ^ (((u64)buffer[4] & 0xffL) << 24) ^ (((u64)buffer[5] & 0xffL) << 16) ^ (((u64)buffer[6] & 0xffL) << 8) ^ (((u64)buffer[7] & 0xffL) ); } /* * compute and apply K^0 to the cipher state: */ state[0] = block[0] ^ (K[0] = structpointer->hash[0]); state[1] = block[1] ^ (K[1] = structpointer->hash[1]); state[2] = block[2] ^ (K[2] = structpointer->hash[2]); state[3] = block[3] ^ (K[3] = structpointer->hash[3]); state[4] = block[4] ^ (K[4] = structpointer->hash[4]); state[5] = block[5] ^ (K[5] = structpointer->hash[5]); state[6] = block[6] ^ (K[6] = structpointer->hash[6]); state[7] = block[7] ^ (K[7] = structpointer->hash[7]); /* * iterate over all rounds: */ for (r = 1; r <= R; r++) { /* * compute K^r from K^{r-1}: */ L[0] = C0[(int)(K[0] >> 56) ] ^ C1[(int)(K[7] >> 48) & 0xff] ^ C2[(int)(K[6] >> 40) & 0xff] ^ C3[(int)(K[5] >> 32) & 0xff] ^ C4[(int)(K[4] >> 24) & 0xff] ^ C5[(int)(K[3] >> 16) & 0xff] ^ C6[(int)(K[2] >> 8) & 0xff] ^ C7[(int)(K[1] ) & 0xff] ^ rc[r]; L[1] = C0[(int)(K[1] >> 56) ] ^ C1[(int)(K[0] >> 48) & 0xff] ^ C2[(int)(K[7] >> 40) & 0xff] ^ C3[(int)(K[6] >> 32) & 0xff] ^ C4[(int)(K[5] >> 24) & 0xff] ^ C5[(int)(K[4] >> 16) & 0xff] ^ C6[(int)(K[3] >> 8) & 0xff] ^ C7[(int)(K[2] ) & 0xff]; L[2] = C0[(int)(K[2] >> 56) ] ^ C1[(int)(K[1] >> 48) & 0xff] ^ C2[(int)(K[0] >> 40) & 0xff] ^ C3[(int)(K[7] >> 32) & 0xff] ^ C4[(int)(K[6] >> 24) & 0xff] ^ C5[(int)(K[5] >> 16) & 0xff] ^ C6[(int)(K[4] >> 8) & 0xff] ^ C7[(int)(K[3] ) & 0xff]; L[3] = C0[(int)(K[3] >> 56) ] ^ C1[(int)(K[2] >> 48) & 0xff] ^ C2[(int)(K[1] >> 40) & 0xff] ^ C3[(int)(K[0] >> 32) & 0xff] ^ C4[(int)(K[7] >> 24) & 0xff] ^ C5[(int)(K[6] >> 16) & 0xff] ^ C6[(int)(K[5] >> 8) & 0xff] ^ C7[(int)(K[4] ) & 0xff]; L[4] = C0[(int)(K[4] >> 56) ] ^ C1[(int)(K[3] >> 48) & 0xff] ^ C2[(int)(K[2] >> 40) & 0xff] ^ C3[(int)(K[1] >> 32) & 0xff] ^ C4[(int)(K[0] >> 24) & 0xff] ^ C5[(int)(K[7] >> 16) & 0xff] ^ C6[(int)(K[6] >> 8) & 0xff] ^ C7[(int)(K[5] ) & 0xff]; L[5] = C0[(int)(K[5] >> 56) ] ^ C1[(int)(K[4] >> 48) & 0xff] ^ C2[(int)(K[3] >> 40) & 0xff] ^ C3[(int)(K[2] >> 32) & 0xff] ^ C4[(int)(K[1] >> 24) & 0xff] ^ C5[(int)(K[0] >> 16) & 0xff] ^ C6[(int)(K[7] >> 8) & 0xff] ^ C7[(int)(K[6] ) & 0xff]; L[6] = C0[(int)(K[6] >> 56) ] ^ C1[(int)(K[5] >> 48) & 0xff] ^ C2[(int)(K[4] >> 40) & 0xff] ^ C3[(int)(K[3] >> 32) & 0xff] ^ C4[(int)(K[2] >> 24) & 0xff] ^ C5[(int)(K[1] >> 16) & 0xff] ^ C6[(int)(K[0] >> 8) & 0xff] ^ C7[(int)(K[7] ) & 0xff]; L[7] = C0[(int)(K[7] >> 56) ] ^ C1[(int)(K[6] >> 48) & 0xff] ^ C2[(int)(K[5] >> 40) & 0xff] ^ C3[(int)(K[4] >> 32) & 0xff] ^ C4[(int)(K[3] >> 24) & 0xff] ^ C5[(int)(K[2] >> 16) & 0xff] ^ C6[(int)(K[1] >> 8) & 0xff] ^ C7[(int)(K[0] ) & 0xff]; K[0] = L[0]; K[1] = L[1]; K[2] = L[2]; K[3] = L[3]; K[4] = L[4]; K[5] = L[5]; K[6] = L[6]; K[7] = L[7]; /* * apply the r-th round transformation: */ L[0] = C0[(int)(state[0] >> 56) ] ^ C1[(int)(state[7] >> 48) & 0xff] ^ C2[(int)(state[6] >> 40) & 0xff] ^ C3[(int)(state[5] >> 32) & 0xff] ^ C4[(int)(state[4] >> 24) & 0xff] ^ C5[(int)(state[3] >> 16) & 0xff] ^ C6[(int)(state[2] >> 8) & 0xff] ^ C7[(int)(state[1] ) & 0xff] ^ K[0]; L[1] = C0[(int)(state[1] >> 56) ] ^ C1[(int)(state[0] >> 48) & 0xff] ^ C2[(int)(state[7] >> 40) & 0xff] ^ C3[(int)(state[6] >> 32) & 0xff] ^ C4[(int)(state[5] >> 24) & 0xff] ^ C5[(int)(state[4] >> 16) & 0xff] ^ C6[(int)(state[3] >> 8) & 0xff] ^ C7[(int)(state[2] ) & 0xff] ^ K[1]; L[2] = C0[(int)(state[2] >> 56) ] ^ C1[(int)(state[1] >> 48) & 0xff] ^ C2[(int)(state[0] >> 40) & 0xff] ^ C3[(int)(state[7] >> 32) & 0xff] ^ C4[(int)(state[6] >> 24) & 0xff] ^ C5[(int)(state[5] >> 16) & 0xff] ^ C6[(int)(state[4] >> 8) & 0xff] ^ C7[(int)(state[3] ) & 0xff] ^ K[2]; L[3] = C0[(int)(state[3] >> 56) ] ^ C1[(int)(state[2] >> 48) & 0xff] ^ C2[(int)(state[1] >> 40) & 0xff] ^ C3[(int)(state[0] >> 32) & 0xff] ^ C4[(int)(state[7] >> 24) & 0xff] ^ C5[(int)(state[6] >> 16) & 0xff] ^ C6[(int)(state[5] >> 8) & 0xff] ^ C7[(int)(state[4] ) & 0xff] ^ K[3]; L[4] = C0[(int)(state[4] >> 56) ] ^ C1[(int)(state[3] >> 48) & 0xff] ^ C2[(int)(state[2] >> 40) & 0xff] ^ C3[(int)(state[1] >> 32) & 0xff] ^ C4[(int)(state[0] >> 24) & 0xff] ^ C5[(int)(state[7] >> 16) & 0xff] ^ C6[(int)(state[6] >> 8) & 0xff] ^ C7[(int)(state[5] ) & 0xff] ^ K[4]; L[5] = C0[(int)(state[5] >> 56) ] ^ C1[(int)(state[4] >> 48) & 0xff] ^ C2[(int)(state[3] >> 40) & 0xff] ^ C3[(int)(state[2] >> 32) & 0xff] ^ C4[(int)(state[1] >> 24) & 0xff] ^ C5[(int)(state[0] >> 16) & 0xff] ^ C6[(int)(state[7] >> 8) & 0xff] ^ C7[(int)(state[6] ) & 0xff] ^ K[5]; L[6] = C0[(int)(state[6] >> 56) ] ^ C1[(int)(state[5] >> 48) & 0xff] ^ C2[(int)(state[4] >> 40) & 0xff] ^ C3[(int)(state[3] >> 32) & 0xff] ^ C4[(int)(state[2] >> 24) & 0xff] ^ C5[(int)(state[1] >> 16) & 0xff] ^ C6[(int)(state[0] >> 8) & 0xff] ^ C7[(int)(state[7] ) & 0xff] ^ K[6]; L[7] = C0[(int)(state[7] >> 56) ] ^ C1[(int)(state[6] >> 48) & 0xff] ^ C2[(int)(state[5] >> 40) & 0xff] ^ C3[(int)(state[4] >> 32) & 0xff] ^ C4[(int)(state[3] >> 24) & 0xff] ^ C5[(int)(state[2] >> 16) & 0xff] ^ C6[(int)(state[1] >> 8) & 0xff] ^ C7[(int)(state[0] ) & 0xff] ^ K[7]; state[0] = L[0]; state[1] = L[1]; state[2] = L[2]; state[3] = L[3]; state[4] = L[4]; state[5] = L[5]; state[6] = L[6]; state[7] = L[7]; } /* * apply the Miyaguchi-Preneel compression function: */ structpointer->hash[0] ^= state[0] ^ block[0]; structpointer->hash[1] ^= state[1] ^ block[1]; structpointer->hash[2] ^= state[2] ^ block[2]; structpointer->hash[3] ^= state[3] ^ block[3]; structpointer->hash[4] ^= state[4] ^ block[4]; structpointer->hash[5] ^= state[5] ^ block[5]; structpointer->hash[6] ^= state[6] ^ block[6]; structpointer->hash[7] ^= state[7] ^ block[7]; } #undef buffer #undef K #undef block #undef state #undef L /** * Delivers input data to the hashing algorithm. * * @param source plaintext data to hash. * @param sourceBits how many bits of plaintext to process. * * This method maintains the invariant: bufferBits < DIGESTBITS */ __device__ void WHIRLPOOL_add(const unsigned char * const source, u32 sourceBits, struct NESSIEstruct * const structpointer, PTHREAD_DATA td) { /* sourcePos | +-------+-------+------- ||||||||||||||||||||| source +-------+-------+------- +-------+-------+-------+-------+-------+------- |||||||||||||||||||||| buffer +-------+-------+-------+-------+-------+------- | bufferPos */ int sourcePos = 0; /* index of leftmost source u8 containing data (1 to 8 bits). */ int sourceGap = (8 - ((int)sourceBits & 7)) & 7; /* space on source[sourcePos]. */ int bufferRem = structpointer->bufferBits & 7; /* occupied bits on buffer[bufferPos]. */ int i; u32 b, carry; u8 *buffer = structpointer->buffer; u8 *bitLength = structpointer->bitLength; int bufferBits = structpointer->bufferBits; int bufferPos = structpointer->bufferPos; /* * tally the length of the added data: */ u64 value = sourceBits; for (i = 31, carry = 0; i >= 0 && (carry != 0 || value != LL(0)); i--) { carry += bitLength[i] + ((u32)value & 0xff); bitLength[i] = (u8)carry; carry >>= 8; value >>= 8; } /* * process data in chunks of 8 bits (a more efficient approach would be to take whole-word chunks): */ while (sourceBits > 8) { /* N.B. at least source[sourcePos] and source[sourcePos+1] contain data. */ /* * take a byte from the source: */ b = ((source[sourcePos] << sourceGap) & 0xff) | ((source[sourcePos + 1] & 0xff) >> (8 - sourceGap)); /* * process this byte: */ buffer[bufferPos++] |= (u8)(b >> bufferRem); bufferBits += 8 - bufferRem; /* bufferBits = 8*bufferPos; */ if (bufferBits == DIGESTBITS) { /* * process data block: */ processBuffer(structpointer, td); /* * reset buffer: */ bufferBits = bufferPos = 0; } buffer[bufferPos] = (u8) (b << (8 - bufferRem)); bufferBits += bufferRem; /* * proceed to remaining data: */ sourceBits -= 8; sourcePos++; } /* now 0 <= sourceBits <= 8; * furthermore, all data (if any is left) is in source[sourcePos]. */ if (sourceBits > 0) { b = (source[sourcePos] << sourceGap) & 0xff; /* bits are left-justified on b. */ /* * process the remaining bits: */ buffer[bufferPos] |= b >> bufferRem; } else { b = 0; } if (bufferRem + sourceBits < 8) { /* * all remaining data fits on buffer[bufferPos], * and there still remains some space. */ bufferBits += sourceBits; } else { /* * buffer[bufferPos] is full: */ bufferPos++; bufferBits += 8 - bufferRem; /* bufferBits = 8*bufferPos; */ sourceBits -= 8 - bufferRem; /* now 0 <= sourceBits < 8; * furthermore, all data (if any is left) is in source[sourcePos]. */ if (bufferBits == DIGESTBITS) { /* * process data block: */ processBuffer(structpointer, td); /* * reset buffer: */ bufferBits = bufferPos = 0; } buffer[bufferPos] = (u8) (b << (8 - bufferRem)); bufferBits += (int)sourceBits; } structpointer->bufferBits = bufferBits; structpointer->bufferPos = bufferPos; } /** * Get the hash value from the hashing state. * * This method uses the invariant: bufferBits < DIGESTBITS */ __device__ void WHIRLPOOL_finalize(struct NESSIEstruct * const structpointer, unsigned char * const result, PTHREAD_DATA td) { int i; u8 *buffer = structpointer->buffer; u8 *bitLength = structpointer->bitLength; int bufferBits = structpointer->bufferBits; int bufferPos = structpointer->bufferPos; u8 *digest = result; /* * append a '1'-bit: */ buffer[bufferPos] |= 0x80U >> (bufferBits & 7); bufferPos++; /* all remaining bits on the current u8 are set to zero. */ /* * pad with zero bits to complete (N*WBLOCKBITS - LENGTHBITS) bits: */ if (bufferPos > WBLOCKBYTES - LENGTHBYTES) { if (bufferPos < WBLOCKBYTES) { memset(&buffer[bufferPos], 0, WBLOCKBYTES - bufferPos); } /* * process data block: */ processBuffer(structpointer, td); /* * reset buffer: */ bufferPos = 0; } if (bufferPos < WBLOCKBYTES - LENGTHBYTES) { memset(&buffer[bufferPos], 0, (WBLOCKBYTES - LENGTHBYTES) - bufferPos); } bufferPos = WBLOCKBYTES - LENGTHBYTES; /* * append bit length of hashed data: */ memcpy(&buffer[WBLOCKBYTES - LENGTHBYTES], bitLength, LENGTHBYTES); /* * process data block: */ processBuffer(structpointer, td); /* * return the completed message digest: */ for (i = 0; i < DIGESTBYTES/8; i++) { digest[0] = (u8)(structpointer->hash[i] >> 56); digest[1] = (u8)(structpointer->hash[i] >> 48); digest[2] = (u8)(structpointer->hash[i] >> 40); digest[3] = (u8)(structpointer->hash[i] >> 32); digest[4] = (u8)(structpointer->hash[i] >> 24); digest[5] = (u8)(structpointer->hash[i] >> 16); digest[6] = (u8)(structpointer->hash[i] >> 8); digest[7] = (u8)(structpointer->hash[i] ); digest += 8; } structpointer->bufferBits = bufferBits; structpointer->bufferPos = bufferPos; } __device__ void hmac_whirlpool ( char *k, /* secret key */ int lk, /* length of the key in bytes */ char *d, /* data */ int ld, /* length of data in bytes */ char *out, /* output buffer, at least "t" bytes */ int t, PTHREAD_DATA td ) { #define ictx td->ictx #define octx td->octx #define iwhi td->iwhi #define owhi td->owhi #define key td->key #define buf td->buf #define tctx td->tctx int i; /* If the key is longer than the hash algorithm block size, let key = whirlpool(key), as per HMAC specifications. */ if (lk > WHIRLPOOL_BLOCKSIZE) { WHIRLPOOL_init (&tctx); WHIRLPOOL_add ((unsigned char *) k, lk * 8, &tctx, td); WHIRLPOOL_finalize (&tctx, (unsigned char *) key, td); k = key; lk = WHIRLPOOL_DIGESTSIZE; burn (&tctx, sizeof(tctx)); // Prevent leaks } /**** Inner Digest ****/ WHIRLPOOL_init (&ictx); /* Pad the key for inner digest */ for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x36); for (i = lk; i < WHIRLPOOL_BLOCKSIZE; ++i) buf[i] = 0x36; WHIRLPOOL_add ((unsigned char *) buf, WHIRLPOOL_BLOCKSIZE * 8, &ictx, td); WHIRLPOOL_add ((unsigned char *) d, ld * 8, &ictx, td); WHIRLPOOL_finalize (&ictx, (unsigned char *) iwhi, td); /**** Outer Digest ****/ WHIRLPOOL_init (&octx); for (i = 0; i < lk; ++i) buf[i] = (char) (k[i] ^ 0x5C); for (i = lk; i < WHIRLPOOL_BLOCKSIZE; ++i) buf[i] = 0x5C; WHIRLPOOL_add ((unsigned char *) buf, WHIRLPOOL_BLOCKSIZE * 8, &octx, td); WHIRLPOOL_add ((unsigned char *) iwhi, WHIRLPOOL_DIGESTSIZE * 8, &octx, td); WHIRLPOOL_finalize (&octx, (unsigned char *) owhi, td); /* truncate and print the results */ t = t > WHIRLPOOL_DIGESTSIZE ? WHIRLPOOL_DIGESTSIZE : t; hmac_truncate (owhi, out, t); /* Prevent possible leaks. */ burn (&ictx, sizeof(ictx)); burn (&octx, sizeof(octx)); burn (owhi, sizeof(owhi)); burn (iwhi, sizeof(iwhi)); burn (buf, sizeof(buf)); burn (key, sizeof(key)); } #undef ictx #undef octx #undef tctx #undef iwhi #undef owhi #undef key #undef buf __device__ void derive_u_whirlpool (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *u, int b, PTHREAD_DATA td) { #define j td->j #define k td->k #define init td->init char counter[4]; int c, i; /* iteration 1 */ memset (counter, 0, 4); counter[3] = (char) b; memcpy (init, salt, salt_len); /* salt */ memcpy (&init[salt_len], counter, 4); /* big-endian block number */ /* remaining iterations */ for (c = 0; c < iterations; c++) { hmac_whirlpool (pwd, pwd_len, !c ? init : j, !c ? salt_len + 4 : WHIRLPOOL_DIGESTSIZE, !c ? j : k, WHIRLPOOL_DIGESTSIZE, td); if (!c) memcpy (u, j, WHIRLPOOL_DIGESTSIZE); else for (i = 0; i < WHIRLPOOL_DIGESTSIZE; i++) { u[i] ^= k[i]; j[i] = k[i]; } } /* Prevent possible leaks. */ burn (j, sizeof(j)); burn (k, sizeof(k)); } #undef j #undef k #undef init __device__ void derive_key_whirlpool (char *pwd, int pwd_len, char *salt, int salt_len, int iterations, char *dk, int dklen, PTHREAD_DATA td) { #define u td->u int b, l, r; if (dklen % WHIRLPOOL_DIGESTSIZE) { l = 1 + dklen / WHIRLPOOL_DIGESTSIZE; } else { l = dklen / WHIRLPOOL_DIGESTSIZE; } r = dklen - (l - 1) * WHIRLPOOL_DIGESTSIZE; /* first l - 1 blocks */ for (b = 1; b <= l; b++) { derive_u_whirlpool (pwd, pwd_len, salt, salt_len, iterations, u, b, td); if (b < l) { memcpy (dk, u, WHIRLPOOL_DIGESTSIZE); dk += WHIRLPOOL_DIGESTSIZE; } } /* last block */ memcpy (dk, u, r); /* Prevent possible leaks. */ burn (u, sizeof(u)); } #undef u __device__ int CipherGetBlockSize (int cipherId) { return CipherGet (cipherId) -> BlockSize; } __device__ int CipherGetKeyScheduleSize (int cipherId) { return CipherGet (cipherId) -> KeyScheduleSize; } // Returns sum of key schedule sizes of all ciphers of the EA __device__ int EAGetKeyScheduleSize (int ea) { int i = EAGetFirstCipher(ea); int size = CipherGetKeyScheduleSize (i); while (i = EAGetNextCipher(ea, i)) { size += CipherGetKeyScheduleSize (i); } return size; } #define vf1(x,r,c) (x) #define rf1(r,c) (r) #define rf2(r,c) ((8+r-c)&3) #define bval(x,n) ((uint_8t)((x) >> (8 * (n)))) #define four_tables(x,tab,vf,rf,c) \ ( tab[0][bval(vf(x,0,c),rf(0,c))] \ ^ tab[1][bval(vf(x,1,c),rf(1,c))] \ ^ tab[2][bval(vf(x,2,c),rf(2,c))] \ ^ tab[3][bval(vf(x,3,c),rf(3,c))]) #define t_use(m,n) t_##m##n #define ls_box(x,c) four_tables(x,t_use(f,l),vf1,rf2,c) #define kef8(k,i) \ { k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; \ k[8*(i)+ 9] = ss[1] ^= ss[0]; \ k[8*(i)+10] = ss[2] ^= ss[1]; \ k[8*(i)+11] = ss[3] ^= ss[2]; \ } #define ke8(k,i) \ { kef8(k,i); \ k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0); \ k[8*(i)+13] = ss[5] ^= ss[4]; \ k[8*(i)+14] = ss[6] ^= ss[5]; \ k[8*(i)+15] = ss[7] ^= ss[6]; \ } #define word_in(x,c) (*((uint_32t*)(x)+(c))) __device__ AES_RETURN aes_encrypt_key256(const unsigned char *key, aes_encrypt_ctx cx[1]) { uint_32t ss[8]; cx->ks[0] = ss[0] = word_in(key, 0); cx->ks[1] = ss[1] = word_in(key, 1); cx->ks[2] = ss[2] = word_in(key, 2); cx->ks[3] = ss[3] = word_in(key, 3); cx->ks[4] = ss[4] = word_in(key, 4); cx->ks[5] = ss[5] = word_in(key, 5); cx->ks[6] = ss[6] = word_in(key, 6); cx->ks[7] = ss[7] = word_in(key, 7); ke8(cx->ks, 0); ke8(cx->ks, 1); ke8(cx->ks, 2); ke8(cx->ks, 3); ke8(cx->ks, 4); ke8(cx->ks, 5); kef8(cx->ks, 6); cx->inf.l = 0; cx->inf.b[0] = 14 * 16; return EXIT_SUCCESS; } #define v(n,i) ((n) - (i) + 2 * ((i) & 3)) #define kdf8(k,i) \ { ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[v(56,(8*(i))+ 8)] = ff(ss[0]); \ ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ff(ss[1]); \ ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ff(ss[2]); \ ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ff(ss[3]); \ ss[4] ^= ls_box(ss[3],0); k[v(56,(8*(i))+12)] = ff(ss[4]); \ ss[5] ^= ss[4]; k[v(56,(8*(i))+13)] = ff(ss[5]); \ ss[6] ^= ss[5]; k[v(56,(8*(i))+14)] = ff(ss[6]); \ ss[7] ^= ss[6]; k[v(56,(8*(i))+15)] = ff(ss[7]); \ } #define kd8(k,i) \ { ss[8] = ls_box(ss[7],3) ^ t_use(r,c)[i]; \ ss[0] ^= ss[8]; ss[8] = ff(ss[8]); k[v(56,(8*(i))+ 8)] = ss[8] ^= k[v(56,(8*(i)))]; \ ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ss[8] ^= k[v(56,(8*(i))+ 1)]; \ ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ss[8] ^= k[v(56,(8*(i))+ 2)]; \ ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ss[8] ^= k[v(56,(8*(i))+ 3)]; \ ss[8] = ls_box(ss[3],0); \ ss[4] ^= ss[8]; ss[8] = ff(ss[8]); k[v(56,(8*(i))+12)] = ss[8] ^= k[v(56,(8*(i))+ 4)]; \ ss[5] ^= ss[4]; k[v(56,(8*(i))+13)] = ss[8] ^= k[v(56,(8*(i))+ 5)]; \ ss[6] ^= ss[5]; k[v(56,(8*(i))+14)] = ss[8] ^= k[v(56,(8*(i))+ 6)]; \ ss[7] ^= ss[6]; k[v(56,(8*(i))+15)] = ss[8] ^= k[v(56,(8*(i))+ 7)]; \ } #define kdl8(k,i) \ { ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[v(56,(8*(i))+ 8)] = ss[0]; \ ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ss[1]; \ ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ss[2]; \ ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ss[3]; \ } #define inv_mcol(x) four_tables(x,t_use(i,m),vf1,rf1,0) #define ff(x) inv_mcol(x) __device__ AES_RETURN aes_decrypt_key256(const unsigned char *key, aes_decrypt_ctx cx[1]) { uint_32t ss[9]; cx->ks[v(56,(0))] = ss[0] = word_in(key, 0); cx->ks[v(56,(1))] = ss[1] = word_in(key, 1); cx->ks[v(56,(2))] = ss[2] = word_in(key, 2); cx->ks[v(56,(3))] = ss[3] = word_in(key, 3); cx->ks[v(56,(4))] = ff(ss[4] = word_in(key, 4)); cx->ks[v(56,(5))] = ff(ss[5] = word_in(key, 5)); cx->ks[v(56,(6))] = ff(ss[6] = word_in(key, 6)); cx->ks[v(56,(7))] = ff(ss[7] = word_in(key, 7)); kdf8(cx->ks, 0); kd8(cx->ks, 1); kd8(cx->ks, 2); kd8(cx->ks, 3); kd8(cx->ks, 4); kd8(cx->ks, 5); kdl8(cx->ks, 6); cx->inf.l = 0; cx->inf.b[0] = 14 * 16; return EXIT_SUCCESS; } __device__ void LKf (u32 *k, unsigned int r, u32 *a, u32 *b, u32 *c, u32 *d) { *a = k[r]; *b = k[r + 1]; *c = k[r + 2]; *d = k[r + 3]; } __device__ void SKf (u32 *k, unsigned int r, u32 *a, u32 *b, u32 *c, u32 *d) { k[r + 4] = *a; k[r + 5] = *b; k[r + 6] = *c; k[r + 7] = *d; } #define rotlFixed(x,n) (((x) << (n)) | ((x) >> (32 - (n)))) #define rotrFixed(x,n) (((x) >> (n)) | ((x) << (32 - (n)))) __device__ void S0f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r3 ^= *r0; *r4 = *r1; *r1 &= *r3; *r4 ^= *r2; *r1 ^= *r0; *r0 |= *r3; *r0 ^= *r4; *r4 ^= *r3; *r3 ^= *r2; *r2 |= *r1; *r2 ^= *r4; *r4 = ~*r4; *r4 |= *r1; *r1 ^= *r3; *r1 ^= *r4; *r3 |= *r0; *r1 ^= *r3; *r4 ^= *r3; } __device__ void S1f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r0 = ~*r0; *r2 = ~*r2; *r4 = *r0; *r0 &= *r1; *r2 ^= *r0; *r0 |= *r3; *r3 ^= *r2; *r1 ^= *r0; *r0 ^= *r4; *r4 |= *r1; *r1 ^= *r3; *r2 |= *r0; *r2 &= *r4; *r0 ^= *r1; *r1 &= *r2; *r1 ^= *r0; *r0 &= *r2; *r0 ^= *r4; } __device__ void S2f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r4 = *r0; *r0 &= *r2; *r0 ^= *r3; *r2 ^= *r1; *r2 ^= *r0; *r3 |= *r4; *r3 ^= *r1; *r4 ^= *r2; *r1 = *r3; *r3 |= *r4; *r3 ^= *r0; *r0 &= *r1; *r4 ^= *r0; *r1 ^= *r3; *r1 ^= *r4; *r4 = ~*r4; } __device__ void S3f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r4 = *r0; *r0 |= *r3; *r3 ^= *r1; *r1 &= *r4; *r4 ^= *r2; *r2 ^= *r3; *r3 &= *r0; *r4 |= *r1; *r3 ^= *r4; *r0 ^= *r1; *r4 &= *r0; *r1 ^= *r3; *r4 ^= *r2; *r1 |= *r0; *r1 ^= *r2; *r0 ^= *r3; *r2 = *r1; *r1 |= *r3; *r1 ^= *r0; } __device__ void S4f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r1 ^= *r3; *r3 = ~*r3; *r2 ^= *r3; *r3 ^= *r0; *r4 = *r1; *r1 &= *r3; *r1 ^= *r2; *r4 ^= *r3; *r0 ^= *r4; *r2 &= *r4; *r2 ^= *r0; *r0 &= *r1; *r3 ^= *r0; *r4 |= *r1; *r4 ^= *r0; *r0 |= *r3; *r0 ^= *r2; *r2 &= *r3; *r0 = ~*r0; *r4 ^= *r2; } __device__ void S5f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r0 ^= *r1; *r1 ^= *r3; *r3 = ~*r3; *r4 = *r1; *r1 &= *r0; *r2 ^= *r3; *r1 ^= *r2; *r2 |= *r4; *r4 ^= *r3; *r3 &= *r1; *r3 ^= *r0; *r4 ^= *r1; *r4 ^= *r2; *r2 ^= *r0; *r0 &= *r3; *r2 = ~*r2; *r0 ^= *r4; *r4 |= *r3; *r2 ^= *r4; } __device__ void S6f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r2 = ~*r2; *r4 = *r3; *r3 &= *r0; *r0 ^= *r4; *r3 ^= *r2; *r2 |= *r4; *r1 ^= *r3; *r2 ^= *r0; *r0 |= *r1; *r2 ^= *r1; *r4 ^= *r0; *r0 |= *r3; *r0 ^= *r2; *r4 ^= *r3; *r4 ^= *r0; *r3 = ~*r3; *r2 &= *r4; *r2 ^= *r3; } __device__ void S7f (u32 *r0, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { *r4 = *r2; *r2 &= *r1; *r2 ^= *r3; *r3 &= *r1; *r4 ^= *r2; *r2 ^= *r1; *r1 ^= *r0; *r0 |= *r4; *r0 ^= *r2; *r3 ^= *r1; *r2 ^= *r3; *r3 &= *r0; *r3 ^= *r4; *r4 ^= *r2; *r2 &= *r0; *r4 = ~*r4; *r2 ^= *r4; *r4 &= *r0; *r1 ^= *r3; *r4 ^= *r1; } __device__ void serpent_set_key(const u8 userKey[], int keylen, u8 *ks) { u32 a,b,c,d,e; u32 *k = (u32 *)ks; u32 t; int i; for (i = 0; i < keylen / (int)sizeof(int); i++) k[i] = ((u32*)userKey)[i]; if (keylen < 32) k[keylen/4] |= (u32)1 << ((keylen%4)*8); k += 8; t = k[-1]; for (i = 0; i < 132; ++i) k[i] = t = rotlFixed(k[i-8] ^ k[i-5] ^ k[i-3] ^ t ^ 0x9e3779b9 ^ i, 11); k -= 20; for (i=0; i<4; i++) { LKf (k, 20, &a, &e, &b, &d); S3f (&a, &e, &b, &d, &c); SKf (k, 16, &e, &b, &d, &c); LKf (k, 24, &c, &b, &a, &e); S2f (&c, &b, &a, &e, &d); SKf (k, 20, &a, &e, &b, &d); LKf (k, 28, &b, &e, &c, &a); S1f (&b, &e, &c, &a, &d); SKf (k, 24, &c, &b, &a, &e); LKf (k, 32, &a, &b, &c, &d); S0f (&a, &b, &c, &d, &e); SKf (k, 28, &b, &e, &c, &a); k += 8*4; LKf (k, 4, &a, &c, &d, &b); S7f (&a, &c, &d, &b, &e); SKf (k, 0, &d, &e, &b, &a); LKf (k, 8, &a, &c, &b, &e); S6f (&a, &c, &b, &e, &d); SKf (k, 4, &a, &c, &d, &b); LKf (k, 12, &b, &a, &e, &c); S5f (&b, &a, &e, &c, &d); SKf (k, 8, &a, &c, &b, &e); LKf (k, 16, &e, &b, &d, &c); S4f (&e, &b, &d, &c, &a); SKf (k, 12, &b, &a, &e, &c); } LKf (k, 20, &a, &e, &b, &d); S3f (&a, &e, &b, &d, &c); SKf (k, 16, &e, &b, &d, &c); } #define G_MOD 0x0000014d __device__ u4byte mds_rem(u4byte p0, u4byte p1) { u4byte i, t, u; for(i = 0; i < 8; ++i) { t = p1 >> 24; // get most significant coefficient p1 = (p1 << 8) | (p0 >> 24); p0 <<= 8; // shift others up // multiply t by a (the primitive element - i.e. left shift) u = (t << 1); if(t & 0x80) // subtract modular polynomial on overflow u ^= G_MOD; p1 ^= t ^ (u << 16); // remove t * (a * x^2 + 1) u ^= (t >> 1); // form u = a * t + t / a = t * (a + 1 / a); if(t & 0x01) // add the modular polynomial on underflow u ^= G_MOD >> 1; p1 ^= (u << 24) | (u << 8); // remove t * (a + 1/a) * (x^3 + x) } return p1; } #define extract_byte(x,n) ((u1byte)((x) >> (8 * n))) #undef q #define q(n,x) q_tab[n][x] #define mds(n,x) m_tab[n][x] #define rotr(x,n) (((x)>>(n))|((x)<<(32-(n)))) #define rotl(x,n) (((x)<<(n))|((x)>>(32-(n)))) #define q20(x) q(0,q(0,x) ^ extract_byte(key[1],0)) ^ extract_byte(key[0],0) #define q21(x) q(0,q(1,x) ^ extract_byte(key[1],1)) ^ extract_byte(key[0],1) #define q22(x) q(1,q(0,x) ^ extract_byte(key[1],2)) ^ extract_byte(key[0],2) #define q23(x) q(1,q(1,x) ^ extract_byte(key[1],3)) ^ extract_byte(key[0],3) #define q30(x) q(0,q(0,q(1, x) ^ extract_byte(key[2],0)) ^ extract_byte(key[1],0)) ^ extract_byte(key[0],0) #define q31(x) q(0,q(1,q(1, x) ^ extract_byte(key[2],1)) ^ extract_byte(key[1],1)) ^ extract_byte(key[0],1) #define q32(x) q(1,q(0,q(0, x) ^ extract_byte(key[2],2)) ^ extract_byte(key[1],2)) ^ extract_byte(key[0],2) #define q33(x) q(1,q(1,q(0, x) ^ extract_byte(key[2],3)) ^ extract_byte(key[1],3)) ^ extract_byte(key[0],3) #define q40(x) q(0,q(0,q(1, q(1, x) ^ extract_byte(key[3],0)) ^ extract_byte(key[2],0)) ^ extract_byte(key[1],0)) ^ extract_byte(key[0],0) #define q41(x) q(0,q(1,q(1, q(0, x) ^ extract_byte(key[3],1)) ^ extract_byte(key[2],1)) ^ extract_byte(key[1],1)) ^ extract_byte(key[0],1) #define q42(x) q(1,q(0,q(0, q(0, x) ^ extract_byte(key[3],2)) ^ extract_byte(key[2],2)) ^ extract_byte(key[1],2)) ^ extract_byte(key[0],2) #define q43(x) q(1,q(1,q(0, q(1, x) ^ extract_byte(key[3],3)) ^ extract_byte(key[2],3)) ^ extract_byte(key[1],3)) ^ extract_byte(key[0],3) __device__ void gen_mk_tab(TwofishInstance *instance, u4byte key[], u1byte** q_tab, u4byte** m_tab) { u4byte i; u1byte by; u4byte *mk_tab = instance->mk_tab; switch(instance->k_len) { case 2: for(i = 0; i < 256; ++i) { by = (u1byte)i; mk_tab[0 + 4*i] = mds(0, q20(by)); mk_tab[1 + 4*i] = mds(1, q21(by)); mk_tab[2 + 4*i] = mds(2, q22(by)); mk_tab[3 + 4*i] = mds(3, q23(by)); } break; case 3: for(i = 0; i < 256; ++i) { by = (u1byte)i; mk_tab[0 + 4*i] = mds(0, q30(by)); mk_tab[1 + 4*i] = mds(1, q31(by)); mk_tab[2 + 4*i] = mds(2, q32(by)); mk_tab[3 + 4*i] = mds(3, q33(by)); } break; case 4: for(i = 0; i < 256; ++i) { by = (u1byte)i; mk_tab[0 + 4*i] = mds(0, q40(by)); mk_tab[1 + 4*i] = mds(1, q41(by)); mk_tab[2 + 4*i] = mds(2, q42(by)); mk_tab[3 + 4*i] = mds(3, q43(by)); } } } __device__ u1byte qp(const u4byte n, const u1byte x) { u1byte a0, a1, a2, a3, a4, b0, b1, b2, b3, b4; a0 = x >> 4; b0 = x & 15; a1 = a0 ^ b0; b1 = ror4[b0] ^ ashx[a0]; a2 = qt0[n][a1]; b2 = qt1[n][b1]; a3 = a2 ^ b2; b3 = ror4[b2] ^ ashx[a2]; a4 = qt2[n][a3]; b4 = qt3[n][b3]; return (b4 << 4) | a4; } __device__ u4byte h_fun(TwofishInstance *instance, const u4byte x, const u4byte key[], u1byte** q_tab, u4byte** m_tab) { u4byte b0, b1, b2, b3; b0 = extract_byte(x, 0); b1 = extract_byte(x, 1); b2 = extract_byte(x, 2); b3 = extract_byte(x, 3); switch(instance->k_len) { case 4: b0 = q(1, (u1byte) b0) ^ extract_byte(key[3],0); b1 = q(0, (u1byte) b1) ^ extract_byte(key[3],1); b2 = q(0, (u1byte) b2) ^ extract_byte(key[3],2); b3 = q(1, (u1byte) b3) ^ extract_byte(key[3],3); case 3: b0 = q(1, (u1byte) b0) ^ extract_byte(key[2],0); b1 = q(1, (u1byte) b1) ^ extract_byte(key[2],1); b2 = q(0, (u1byte) b2) ^ extract_byte(key[2],2); b3 = q(0, (u1byte) b3) ^ extract_byte(key[2],3); case 2: b0 = q(0, (u1byte) (q(0, (u1byte) b0) ^ extract_byte(key[1],0))) ^ extract_byte(key[0],0); b1 = q(0, (u1byte) (q(1, (u1byte) b1) ^ extract_byte(key[1],1))) ^ extract_byte(key[0],1); b2 = q(1, (u1byte) (q(0, (u1byte) b2) ^ extract_byte(key[1],2))) ^ extract_byte(key[0],2); b3 = q(1, (u1byte) (q(1, (u1byte) b3) ^ extract_byte(key[1],3))) ^ extract_byte(key[0],3); } return mds(0, b0) ^ mds(1, b1) ^ mds(2, b2) ^ mds(3, b3); } __device__ void gen_qtab(u1byte** q_tab) { u4byte i; for(i = 0; i < 256; ++i) { q(0,i) = qp(0, (u1byte)i); q(1,i) = qp(1, (u1byte)i); } } #define ffm_5b(x) ((x) ^ ((x) >> 2) ^ tab_5b[(x) & 3]) #define ffm_ef(x) ((x) ^ ((x) >> 1) ^ ((x) >> 2) ^ tab_ef[(x) & 3]) __device__ void gen_mtab(u1byte** q_tab, u4byte** m_tab) { u4byte i, f01, f5b, fef; for(i = 0; i < 256; ++i) { f01 = q(1,i); f5b = ffm_5b(f01); fef = ffm_ef(f01); m_tab[0][i] = f01 + (f5b << 8) + (fef << 16) + (fef << 24); m_tab[2][i] = f5b + (fef << 8) + (f01 << 16) + (fef << 24); f01 = q(0,i); f5b = ffm_5b(f01); fef = ffm_ef(f01); m_tab[1][i] = fef + (fef << 8) + (f5b << 16) + (f01 << 24); m_tab[3][i] = f5b + (f01 << 8) + (fef << 16) + (f5b << 24); } } /* initialise the key schedule from the user supplied key */ __device__ u4byte *twofish_set_key(TwofishInstance *instance, const u4byte in_key[], const u4byte key_len, u1byte** q_tab, u4byte** m_tab, u4byte* qt_gen, u4byte* mt_gen) { u4byte i, a, b, me_key[4], mo_key[4]; u4byte *l_key, *s_key; l_key = instance->l_key; s_key = instance->s_key; if(!*qt_gen) { gen_qtab(q_tab); *qt_gen = 1; } if(!*mt_gen) { gen_mtab(q_tab, m_tab); *mt_gen = 1; } instance->k_len = key_len / 64; /* 2, 3 or 4 */ for(i = 0; i < instance->k_len; ++i) { a = in_key[i + i]; me_key[i] = a; b = in_key[i + i + 1]; mo_key[i] = b; s_key[instance->k_len - i - 1] = mds_rem(a, b); } for(i = 0; i < 40; i += 2) { a = 0x01010101 * i; b = a + 0x01010101; a = h_fun(instance, a, me_key, q_tab, m_tab); b = rotl(h_fun(instance, b, mo_key, q_tab, m_tab), 8); l_key[i] = a + b; l_key[i + 1] = rotl(a + 2 * b, 9); } gen_mk_tab(instance, s_key, q_tab, m_tab); return l_key; } #define ROUNDS 16 #define GETBYTE(x, y) (unsigned int)(byte)((x)>>(8*(y))) __device__ void crypt_block(BF_KEY *key, const word32 in[2], word32 out[2]) { word32 left = in[0]; word32 right = in[1]; const word32 *const s=key->sbox; const word32 *p=key->pbox; unsigned i; left ^= p[0]; for (i=0; i<ROUNDS/2; i++) { right ^= (((s[GETBYTE(left,3)] + s[256+GETBYTE(left,2)]) ^ s[2*256+GETBYTE(left,1)]) + s[3*256+GETBYTE(left,0)]) ^ p[2*i+1]; left ^= (((s[GETBYTE(right,3)] + s[256+GETBYTE(right,2)]) ^ s[2*256+GETBYTE(right,1)]) + s[3*256+GETBYTE(right,0)]) ^ p[2*i+2]; } right ^= p[ROUNDS+1]; out[0] = right; out[1] = left; } __device__ void BlowfishSetKey (BF_KEY *key, int keylength, unsigned char *key_string) { unsigned i, j=0, k; word32 data, dspace[2] = {0, 0}; word32 *sbox = key->sbox; word32 *pbox = key->pbox; memcpy(pbox, p_init, sizeof(p_init)); memcpy(sbox, s_init, sizeof(s_init)); // Xor key string into encryption key vector for (i=0 ; i<ROUNDS+2 ; ++i) { data = 0 ; for (k=0 ; k<4 ; ++k ) data = (data << 8) | key_string[j++ % keylength]; pbox[i] ^= data; } crypt_block(key, dspace, pbox); for (i=0; i<ROUNDS; i+=2) crypt_block(key, pbox+i, pbox+i+2); crypt_block(key, pbox+ROUNDS, sbox); for (i=0; i<4*256-2; i+=2) crypt_block(key, sbox+i, sbox+i+2); for (i=0; i < ROUNDS+2; i++) key->pbox_dec[ROUNDS+1-i] = pbox[i]; } __device__ u32 MirrorBytes32 (u32 x) { u32 n = (u8) x; n <<= 8; n |= (u8) (x >> 8); n <<= 8; n |= (u8) (x >> 16); return (n << 8) | (u8) (x >> 24); } __device__ uint64 MirrorBytes64 (uint64 x) { uint64 n = (u8) x; n <<= 8; n |= (u8) (x >> 8); n <<= 8; n |= (u8) (x >> 16); n <<= 8; n |= (u8) (x >> 24); n <<= 8; n |= (u8) (x >> 32); n <<= 8; n |= (u8) (x >> 40); n <<= 8; n |= (u8) (x >> 48); return (n << 8) | (u8) (x >> 56); } #define BE32(x) MirrorBytes32(x) #define BE64(x) MirrorBytes64(x) __device__ void Cast5SetKey (CAST_KEY *key, unsigned int keylength, const byte *userKey) { unsigned int i; word32 *K = key->K; word32 X[4], Z[4]; X[0] = BE32 (((word32 *)userKey)[0]); X[1] = BE32 (((word32 *)userKey)[1]); X[2] = BE32 (((word32 *)userKey)[2]); X[3] = BE32 (((word32 *)userKey)[3]); #define x(i) GETBYTE(X[i/4], 3-i%4) #define z(i) GETBYTE(Z[i/4], 3-i%4) for (i=0; i<=16; i+=16) { // this part is copied directly from RFC 2144 (with some search and replace) by Wei Dai Z[0] = X[0] ^ S[4][x(0xD)] ^ S[5][x(0xF)] ^ S[6][x(0xC)] ^ S[7][x(0xE)] ^ S[6][x(0x8)]; Z[1] = X[2] ^ S[4][z(0x0)] ^ S[5][z(0x2)] ^ S[6][z(0x1)] ^ S[7][z(0x3)] ^ S[7][x(0xA)]; Z[2] = X[3] ^ S[4][z(0x7)] ^ S[5][z(0x6)] ^ S[6][z(0x5)] ^ S[7][z(0x4)] ^ S[4][x(0x9)]; Z[3] = X[1] ^ S[4][z(0xA)] ^ S[5][z(0x9)] ^ S[6][z(0xB)] ^ S[7][z(0x8)] ^ S[5][x(0xB)]; K[i+0] = S[4][z(0x8)] ^ S[5][z(0x9)] ^ S[6][z(0x7)] ^ S[7][z(0x6)] ^ S[4][z(0x2)]; K[i+1] = S[4][z(0xA)] ^ S[5][z(0xB)] ^ S[6][z(0x5)] ^ S[7][z(0x4)] ^ S[5][z(0x6)]; K[i+2] = S[4][z(0xC)] ^ S[5][z(0xD)] ^ S[6][z(0x3)] ^ S[7][z(0x2)] ^ S[6][z(0x9)]; K[i+3] = S[4][z(0xE)] ^ S[5][z(0xF)] ^ S[6][z(0x1)] ^ S[7][z(0x0)] ^ S[7][z(0xC)]; X[0] = Z[2] ^ S[4][z(0x5)] ^ S[5][z(0x7)] ^ S[6][z(0x4)] ^ S[7][z(0x6)] ^ S[6][z(0x0)]; X[1] = Z[0] ^ S[4][x(0x0)] ^ S[5][x(0x2)] ^ S[6][x(0x1)] ^ S[7][x(0x3)] ^ S[7][z(0x2)]; X[2] = Z[1] ^ S[4][x(0x7)] ^ S[5][x(0x6)] ^ S[6][x(0x5)] ^ S[7][x(0x4)] ^ S[4][z(0x1)]; X[3] = Z[3] ^ S[4][x(0xA)] ^ S[5][x(0x9)] ^ S[6][x(0xB)] ^ S[7][x(0x8)] ^ S[5][z(0x3)]; K[i+4] = S[4][x(0x3)] ^ S[5][x(0x2)] ^ S[6][x(0xC)] ^ S[7][x(0xD)] ^ S[4][x(0x8)]; K[i+5] = S[4][x(0x1)] ^ S[5][x(0x0)] ^ S[6][x(0xE)] ^ S[7][x(0xF)] ^ S[5][x(0xD)]; K[i+6] = S[4][x(0x7)] ^ S[5][x(0x6)] ^ S[6][x(0x8)] ^ S[7][x(0x9)] ^ S[6][x(0x3)]; K[i+7] = S[4][x(0x5)] ^ S[5][x(0x4)] ^ S[6][x(0xA)] ^ S[7][x(0xB)] ^ S[7][x(0x7)]; Z[0] = X[0] ^ S[4][x(0xD)] ^ S[5][x(0xF)] ^ S[6][x(0xC)] ^ S[7][x(0xE)] ^ S[6][x(0x8)]; Z[1] = X[2] ^ S[4][z(0x0)] ^ S[5][z(0x2)] ^ S[6][z(0x1)] ^ S[7][z(0x3)] ^ S[7][x(0xA)]; Z[2] = X[3] ^ S[4][z(0x7)] ^ S[5][z(0x6)] ^ S[6][z(0x5)] ^ S[7][z(0x4)] ^ S[4][x(0x9)]; Z[3] = X[1] ^ S[4][z(0xA)] ^ S[5][z(0x9)] ^ S[6][z(0xB)] ^ S[7][z(0x8)] ^ S[5][x(0xB)]; K[i+8] = S[4][z(0x3)] ^ S[5][z(0x2)] ^ S[6][z(0xC)] ^ S[7][z(0xD)] ^ S[4][z(0x9)]; K[i+9] = S[4][z(0x1)] ^ S[5][z(0x0)] ^ S[6][z(0xE)] ^ S[7][z(0xF)] ^ S[5][z(0xC)]; K[i+10] = S[4][z(0x7)] ^ S[5][z(0x6)] ^ S[6][z(0x8)] ^ S[7][z(0x9)] ^ S[6][z(0x2)]; K[i+11] = S[4][z(0x5)] ^ S[5][z(0x4)] ^ S[6][z(0xA)] ^ S[7][z(0xB)] ^ S[7][z(0x6)]; X[0] = Z[2] ^ S[4][z(0x5)] ^ S[5][z(0x7)] ^ S[6][z(0x4)] ^ S[7][z(0x6)] ^ S[6][z(0x0)]; X[1] = Z[0] ^ S[4][x(0x0)] ^ S[5][x(0x2)] ^ S[6][x(0x1)] ^ S[7][x(0x3)] ^ S[7][z(0x2)]; X[2] = Z[1] ^ S[4][x(0x7)] ^ S[5][x(0x6)] ^ S[6][x(0x5)] ^ S[7][x(0x4)] ^ S[4][z(0x1)]; X[3] = Z[3] ^ S[4][x(0xA)] ^ S[5][x(0x9)] ^ S[6][x(0xB)] ^ S[7][x(0x8)] ^ S[5][z(0x3)]; K[i+12] = S[4][x(0x8)] ^ S[5][x(0x9)] ^ S[6][x(0x7)] ^ S[7][x(0x6)] ^ S[4][x(0x3)]; K[i+13] = S[4][x(0xA)] ^ S[5][x(0xB)] ^ S[6][x(0x5)] ^ S[7][x(0x4)] ^ S[5][x(0x7)]; K[i+14] = S[4][x(0xC)] ^ S[5][x(0xD)] ^ S[6][x(0x3)] ^ S[7][x(0x2)] ^ S[6][x(0x8)]; K[i+15] = S[4][x(0xE)] ^ S[5][x(0xF)] ^ S[6][x(0x1)] ^ S[7][x(0x0)] ^ S[7][x(0xD)]; } for (i=16; i<32; i++) K[i] &= 0x1f; } /* Set key (initialize key schedule array) */ __device__ void RawSetKey (int encryption, const byte *key, word32 *scheduledKey) { byte buffer[56+56+8]; byte *const pc1m=buffer; /* place to modify pc1 into */ byte *const pcr=pc1m+56; /* place to rotate pc1 into */ byte *const ks=pcr+56; int i,j,l; int m; for (j=0; j<56; j++) { /* convert pc1 to bits of key */ l=pc1[j]-1; /* integer bit location */ m = l & 07; /* find bit */ pc1m[j]=(key[l>>3] & /* find which key byte l is in */ bytebit[m]) /* and which bit of that byte */ ? 1 : 0; /* and store 1-bit result */ } for (i=0; i<16; i++) { /* key chunk for each iteration */ memset(ks,0,8); /* Clear key schedule */ for (j=0; j<56; j++) /* rotate pc1 the right amount */ pcr[j] = pc1m[(l=j+totrot[i])<(j<28? 28 : 56) ? l: l-28]; /* rotate left and right halves independently */ for (j=0; j<48; j++){ /* select bits individually */ /* check bit that goes to ks[j] */ if (pcr[pc2[j]-1]){ /* mask it in if it's there */ l= j % 6; ks[j/6] |= bytebit[l] >> 2; } } /* Now convert to odd/even interleaved form for use in F */ scheduledKey[2*i] = ((word32)ks[0] << 24) | ((word32)ks[2] << 16) | ((word32)ks[4] << 8) | ((word32)ks[6]); scheduledKey[2*i+1] = ((word32)ks[1] << 24) | ((word32)ks[3] << 16) | ((word32)ks[5] << 8) | ((word32)ks[7]); } if (!encryption) // reverse key schedule order for (i=0; i<16; i+=2) { word32 b = scheduledKey[i]; scheduledKey[i] = scheduledKey[32-2-i]; scheduledKey[32-2-i] = b; b = scheduledKey[i+1]; scheduledKey[i+1] = scheduledKey[32-1-i]; scheduledKey[32-1-i] = b; } burn (buffer, sizeof (buffer)); } __device__ void TripleDesSetKey (const byte *userKey, unsigned int length, TDES_KEY *ks) { TDES_KEY *as = ks; RawSetKey (1, userKey + 0, as->k1); RawSetKey (1, userKey + 8, ks->k2); RawSetKey (1, userKey + 16, ks->k3); RawSetKey (0, userKey + 16, ks->k1d); RawSetKey (0, userKey + 8, ks->k2d); RawSetKey (0, userKey + 0, ks->k3d); } /* Return values: 0 = success, ERR_CIPHER_INIT_FAILURE (fatal), ERR_CIPHER_INIT_WEAK_KEY (non-fatal) */ __device__ int CipherInit (int cipher, unsigned char *key, u8 *ks, u1byte** q_tab, u4byte** m_tab, u4byte* qt_gen, u4byte* mt_gen) { int retVal = ERR_SUCCESS; switch (cipher) { case AES: if (aes_encrypt_key256 (key, (aes_encrypt_ctx *) ks) != EXIT_SUCCESS) return ERR_CIPHER_INIT_FAILURE; if (aes_decrypt_key256 (key, (aes_decrypt_ctx *) (ks + sizeof(aes_encrypt_ctx))) != EXIT_SUCCESS) return ERR_CIPHER_INIT_FAILURE; break; case SERPENT: serpent_set_key (key, CipherGetKeySize(SERPENT) * 8, ks); break; case TWOFISH: twofish_set_key ((TwofishInstance *)ks, (const u4byte *)key, CipherGetKeySize(TWOFISH) * 8, q_tab, m_tab, qt_gen, mt_gen); // FIXME: crash here break; case BLOWFISH: /* Deprecated/legacy */ BlowfishSetKey ((BF_KEY *)ks, CipherGetKeySize(BLOWFISH), key); break; case CAST: /* Deprecated/legacy */ Cast5SetKey ((CAST_KEY *) ks, CipherGetKeySize(CAST), key); break; case TRIPLEDES: /* Deprecated/legacy */ TripleDesSetKey (key, CipherGetKeySize (TRIPLEDES), (TDES_KEY *) ks); // Verify whether all three DES keys are mutually different if (((*((int64 *) key) ^ *((int64 *) key+1)) & 0xFEFEFEFEFEFEFEFEULL) == 0 || ((*((int64 *) key+1) ^ *((int64 *) key+2)) & 0xFEFEFEFEFEFEFEFEULL) == 0 || ((*((int64 *) key) ^ *((int64 *) key+2)) & 0xFEFEFEFEFEFEFEFEULL) == 0) retVal = ERR_CIPHER_INIT_WEAK_KEY; // Non-fatal error break; default: // Unknown/wrong cipher ID return ERR_CIPHER_INIT_FAILURE; } return retVal; } // Return values: 0 = success, ERR_CIPHER_INIT_FAILURE (fatal), ERR_CIPHER_INIT_WEAK_KEY (non-fatal) __device__ int EAInit (int ea, unsigned char *key, u8 *ks, u1byte** q_tab, u4byte** m_tab, u4byte* qt_gen, u4byte* mt_gen) { int c, retVal = ERR_SUCCESS; if (ea == 0) return ERR_CIPHER_INIT_FAILURE; for (c = EAGetFirstCipher (ea); c != 0; c = EAGetNextCipher (ea, c)) { switch (CipherInit (c, key, ks, q_tab, m_tab, qt_gen, mt_gen)) { case ERR_CIPHER_INIT_FAILURE: return ERR_CIPHER_INIT_FAILURE; case ERR_CIPHER_INIT_WEAK_KEY: retVal = ERR_CIPHER_INIT_WEAK_KEY; // Non-fatal error break; } key += CipherGetKeySize (c); ks += CipherGetKeyScheduleSize (c); } return retVal; } __device__ int IsBitSet128 (unsigned int bit, u8 *a) { return a[(127 - bit) / 8] & (0x80 >> ((127 - bit) % 8)); } __device__ int IsBitSet64 (unsigned int bit, u8 *a) { return a[(63 - bit) / 8] & (0x80 >> ((63 - bit) % 8)); } __device__ void SetBit128 (unsigned int bit, u8 *a) { a[(127 - bit) / 8] |= 0x80 >> ((127 - bit) % 8); } __device__ void SetBit64 (unsigned int bit, u8 *a) { a[(63 - bit) / 8] |= 0x80 >> ((63 - bit) % 8); } __device__ void MirrorBits128 (u8 *a) { u8 t[128 / 8]; int i; memset (t,0,16); for (i = 0; i < 128; i++) { if (IsBitSet128(i, a)) SetBit128 (127 - i, t); } memcpy (a, t, sizeof (t)); burn (t,sizeof (t)); } __device__ void MirrorBits64 (u8 *a) { u8 t[64 / 8]; int i; memset (t,0,8); for (i = 0; i < 64; i++) { if (IsBitSet64(i, a)) SetBit64 (63 - i, t); } memcpy (a, t, sizeof (t)); burn (t,sizeof (t)); } /* Multiply of a GF128 field element by x. The field element */ /* is held in an array of bytes in which field bits 8n..8n + 7 */ /* are held in byte[n], with lower indexed bits placed in the */ /* more numerically significant bit positions in bytes. */ /* This function multiples a field element x, in the polynomial */ /* field representation. It uses 32-bit word operations to gain */ /* speed but compensates for machine endianess and hence works */ /* correctly on both styles of machine */ __device__ in_line void mul_x(mode(32t) x[4]) { mode(32t) t; bsw_32(x, 4); /* at this point the filed element bits 0..127 are set out */ /* as follows in 32-bit words (where the most significant */ /* (ms) numeric bits are to the left) */ /* */ /* x[0] x[1] x[2] x[3] */ /* ms ls ms ls ms ls ms ls */ /* field: 0 ... 31 32 .. 63 64 .. 95 96 .. 127 */ t = gf_poly[x[3] & 1]; /* bit 127 of the element */ x[3] = (x[3] >> 1) | (x[2] << 31); /* shift bits up by one */ x[2] = (x[2] >> 1) | (x[1] << 31); /* position */ x[1] = (x[1] >> 1) | (x[0] << 31); /* if bit 7 is 1 xor in */ x[0] = (x[0] >> 1) ^ t; /* the field polynomial */ bsw_32(x, 4); } __device__ in_line void mul_lex8(mode(32t) x[4]) /* mutiply with long words */ { mode(32t) t = (x[3] >> 24); /* in little endian format */ x[3] = (x[3] << 8) | (x[2] >> 24); x[2] = (x[2] << 8) | (x[1] >> 24); x[1] = (x[1] << 8) | (x[0] >> 24); x[0] = (x[0] << 8) ^ gft_le[t]; } __device__ in_line void mul_x64(mode(32t) x[2]) { mode(32t) t; bsw_32(x, 2); /* at this point the filed element bits 0..127 are set out */ /* as follows in 32-bit words (where the most significant */ /* (ms) numeric bits are to the left) */ /* */ /* x[0] x[1] x[2] x[3] */ /* ms ls ms ls ms ls ms ls */ /* field: 0 ... 31 32 .. 63 64 .. 95 96 .. 127 */ t = gf_poly64[x[1] & 1]; /* bit 127 of the element */ /* shift bits up by one */ /* position */ x[1] = (x[1] >> 1) | (x[0] << 31); /* if bit 7 is 1 xor in */ x[0] = (x[0] >> 1) ^ t; /* the field polynomial */ bsw_32(x, 2); } __device__ in_line void mul_lex8_64(mode(32t) x[2]) /* mutiply with long words */ { mode(32t) t = (x[1] >> 24); /* in little endian format */ x[1] = (x[1] << 8) | (x[0] >> 24); x[0] = (x[0] << 8) ^ gft_le64[t]; } #define mul_x8 mul_lex8 #define mul_x8_64 mul_lex8_64 __device__ void compile_8k_table(u8 *a, GfCtx8k *ctx) { int i, j, k; memset(ctx->gf_t8k, 0, 32 * 16 * 16); for(i = 0; i < 2 * CBLK_LEN; ++i) { if(i == 0) { memcpy(ctx->gf_t8k[1][8], a, CBLK_LEN); for(j = 4; j > 0; j >>= 1) { memcpy(ctx->gf_t8k[1][j], ctx->gf_t8k[1][j + j], CBLK_LEN); mul_x(ctx->gf_t8k[1][j]); } memcpy(ctx->gf_t8k[0][8], ctx->gf_t8k[1][1], CBLK_LEN); mul_x(ctx->gf_t8k[0][8]); for(j = 4; j > 0; j >>= 1) { memcpy(ctx->gf_t8k[0][j], ctx->gf_t8k[0][j + j], CBLK_LEN); mul_x(ctx->gf_t8k[0][j]); } } else if(i > 1) for(j = 8; j > 0; j >>= 1) { memcpy(ctx->gf_t8k[i][j], ctx->gf_t8k[i - 2][j], CBLK_LEN); mul_x8(ctx->gf_t8k[i][j]); } for(j = 2; j < 16; j += j) { mode(32t) *pj = ctx->gf_t8k[i][j]; mode(32t) *pk = ctx->gf_t8k[i][1]; mode(32t) *pl = ctx->gf_t8k[i][j + 1]; for(k = 1; k < j; ++k) { *pl++ = pj[0] ^ *pk++; *pl++ = pj[1] ^ *pk++; *pl++ = pj[2] ^ *pk++; *pl++ = pj[3] ^ *pk++; } } } } __device__ void compile_4k_table64(u8 *a, GfCtx4k64 *ctx) { int i, j, k; memset(ctx->gf_t4k, 0, sizeof(ctx->gf_t4k)); for(i = 0; i < 2 * CBLK_LEN8; ++i) { if(i == 0) { memcpy(ctx->gf_t4k[1][8], a, CBLK_LEN8); for(j = 4; j > 0; j >>= 1) { memcpy(ctx->gf_t4k[1][j], ctx->gf_t4k[1][j + j], CBLK_LEN8); mul_x64(ctx->gf_t4k[1][j]); } memcpy(ctx->gf_t4k[0][8], ctx->gf_t4k[1][1], CBLK_LEN8); mul_x64(ctx->gf_t4k[0][8]); for(j = 4; j > 0; j >>= 1) { memcpy(ctx->gf_t4k[0][j], ctx->gf_t4k[0][j + j], CBLK_LEN8); mul_x64(ctx->gf_t4k[0][j]); } } else if(i > 1) for(j = 8; j > 0; j >>= 1) { memcpy(ctx->gf_t4k[i][j], ctx->gf_t4k[i - 2][j], CBLK_LEN8); mul_x8_64(ctx->gf_t4k[i][j]); } for(j = 2; j < 16; j += j) { mode(32t) *pj = ctx->gf_t4k[i][j]; mode(32t) *pk = ctx->gf_t4k[i][1]; mode(32t) *pl = ctx->gf_t4k[i][j + 1]; for(k = 1; k < j; ++k) { *pl++ = pj[0] ^ *pk++; *pl++ = pj[1] ^ *pk++; *pl++ = pj[2] ^ *pk++; *pl++ = pj[3] ^ *pk++; } } } } /* Allocate and initialize speed optimization table for multiplication by 64-bit operand in MSB-first mode */ __device__ int Gf128Tab64Init (u8 *a, GfCtx *ctx) { GfCtx8k ctx8k; u8 am[16]; int i, j; memcpy (am, a, 16); MirrorBits128 (am); compile_8k_table (am, &ctx8k); /* Convert 8k LSB-first table to 4k MSB-first */ for (i = 16; i < 32; i++) { for (j = 0; j < 16; j++) { int jm = 0; jm |= (j & 0x1) << 3; jm |= (j & 0x2) << 1; jm |= (j & 0x4) >> 1; jm |= (j & 0x8) >> 3; memcpy (&ctx->gf_t128[i-16][jm], (unsigned char *)&ctx8k.gf_t8k[31-i][j], 16); MirrorBits128 ((unsigned char *)&ctx->gf_t128[i-16][jm]); } } burn (am, sizeof (am)); return TRUE; } __device__ int Gf64TabInit (u8 *a, GfCtx *ctx) { /* Deprecated/legacy */ GfCtx4k64 ctx4k; u8 am[8]; int i, j; memcpy (am, a, 8); MirrorBits64 (am); compile_4k_table64 (am, &ctx4k); /* Convert LSB-first table to MSB-first */ for (i = 0; i < 16; i++) { for (j = 0; j < 16; j++) { int jm = 0; jm |= (j & 0x1) << 3; jm |= (j & 0x2) << 1; jm |= (j & 0x4) >> 1; jm |= (j & 0x8) >> 3; memcpy (&ctx->gf_t64[i][jm], (unsigned char *)&ctx4k.gf_t4k[15-i][j], 8); MirrorBits64 ((unsigned char *)&ctx->gf_t64[i][jm]); } } burn (am, sizeof (am)); return TRUE; } __device__ BOOL EAInitMode (PCRYPTO_INFO ci, u1byte** q_tab, u4byte** m_tab, u4byte* qt_gen, u4byte* mt_gen) { switch (ci->mode) { case XTS: // Secondary key schedule if (EAInit (ci->ea, ci->k2, ci->ks2, q_tab, m_tab, qt_gen, mt_gen) != ERR_SUCCESS) return FALSE; /* Note: XTS mode could potentially be initialized with a weak key causing all blocks in one data unit on the volume to be tweaked with zero tweaks (i.e. 512 bytes of the volume would be encrypted in ECB mode). However, to create a TrueCrypt volume with such a weak key, each human being on Earth would have to create approximately 11,378,125,361,078,862 (about eleven quadrillion) TrueCrypt volumes (provided that the size of each of the volumes is 1024 terabytes). */ break; case LRW: switch (CipherGetBlockSize (EAGetFirstCipher (ci->ea))) { case 8: /* Deprecated/legacy */ return Gf64TabInit (ci->k2, &ci->gf_ctx); case 16: return Gf128Tab64Init (ci->k2, &ci->gf_ctx); default: TC_THROW_FATAL_EXCEPTION; } break; case CBC: case INNER_CBC: case OUTER_CBC: // The mode does not need to be initialized or is initialized elsewhere return TRUE; default: // Unknown/wrong ID TC_THROW_FATAL_EXCEPTION; } return TRUE; } __device__ uint32 GetHeaderField32 (byte *header, int offset) { return BE32 (*(uint32 *) (header + offset)); } #define etab_0(x) t_fn[0][x] #define etab_1(x) t_fn[1][x] #define etab_2(x) t_fn[2][x] #define etab_3(x) t_fn[3][x] #define eltab_0(x) t_fl[0][x] #define eltab_1(x) t_fl[1][x] #define eltab_2(x) t_fl[2][x] #define eltab_3(x) t_fl[3][x] #define eltab(n, x) eltab_##n(x) __device__ void enc_round_c (uint_32t* eax, uint_32t* ebx, uint_32t* ecx, uint_32t* edx, uint_32t* ebp) { uint_32t key1, key2, tmp, esi, edi; uint_8t* ax = (uint_8t*)eax, *bx = (uint_8t*)ebx, *cx = (uint_8t*)ecx, *dx = (uint_8t*)edx; // ebp - key pointer, eax, ebx, ecx, edx - each contain 4 bytes (int) of outBlock, esi - 3rd int in key, edi - 4th int in key key1 = ebp[0]; key2 = ebp[1]; esi = ebp[2]; edi = ebp[3]; /* begin rnd_fun */ *ebx = (*ebx << 16) | (*ebx >> 16); // al - 0, ah - 1, bl - 4, bh - 5, cl - 8, ch - 9, dl - 12, dh - 13 esi ^= etab_0(cx[0]); esi ^= etab_1(dx[1]); esi ^= etab_3(bx[1]); edi ^= etab_0(dx[0]); edi ^= etab_1(ax[1]); edi ^= etab_2(bx[0]); tmp = etab_0(ax[0]); // ebp (restored later) *ebx >>= 16; *eax &= 0xffff0000; *eax |= *ebx; *edx >>= 16; tmp ^= etab_1(ax[1]); tmp ^= etab_3(dx[1]); *ebx = etab_2(dx[0]); // ebx *ebx ^= etab_1(cx[1]); *ebx ^= etab_0(ax[0]); *eax >>= 16; *ecx >>= 16; tmp ^= etab_2(cx[0]); edi ^= etab_3(cx[1]); esi ^= etab_2(ax[0]); *ebx ^= etab_3(ax[1]); /* end rnd_fun */ *eax = tmp; *ecx = esi; *edx = edi; *eax ^= key1; *ebx ^= key2; } __device__ void enc_last_round_c (uint_32t* eax, uint_32t* ebx, uint_32t* ecx, uint_32t* edx, uint_32t* ebp) { uint_32t key1, key2, tmp, esi, edi; uint_8t* ax = (uint_8t*)eax, *bx = (uint_8t*)ebx, *cx = (uint_8t*)ecx, *dx = (uint_8t*)edx; // ebp - key pointer, eax, ebx, ecx, edx - each contain 4 bytes (int) of outBlock, esi - 3rd int in key, edi - 4th int in key key1 = ebp[0]; key2 = ebp[1]; esi = ebp[2]; edi = ebp[3]; /* begin rnd_fun */ *ebx = (*ebx << 16) | (*ebx >> 16); // al - 0, ah - 1, bl - 4, bh - 5, cl - 8, ch - 9, dl - 12, dh - 13 esi ^= eltab(0, cx[0]); esi ^= eltab(1, dx[1]); esi ^= eltab(3, bx[1]); edi ^= eltab(0, dx[0]); edi ^= eltab(1, ax[1]); edi ^= eltab(2, bx[0]); tmp = eltab(0, ax[0]); // ebp (restored later) *ebx >>= 16; *eax &= 0xffff0000; *eax |= *ebx; *edx >>= 16; tmp ^= eltab(1, ax[1]); tmp ^= eltab(3, dx[1]); *ebx = eltab(2, dx[0]); // ebx *ebx ^= eltab(1, cx[1]); *ebx ^= eltab(0, ax[0]); *eax >>= 16; *ecx >>= 16; tmp ^= eltab(2, cx[0]); edi ^= eltab(3, cx[1]); esi ^= eltab(2, ax[0]); *ebx ^= eltab(3, ax[1]); /* end rnd_fun */ *eax = tmp; *ecx = esi; *edx = edi; *eax ^= key1; *ebx ^= key2; } __device__ AES_RETURN aes_encrypt_c(const u8 *inBlock, u8 *outBlock, void *ks) { uint_32t* kp = (uint_32t *)ks; // key pointer uint_32t inf = *(kp + KS_LENGTH); uint_32t* o = (uint_32t*)outBlock; unsigned int i; // xor 4 bytes in inBlock with 4 bytes in ks, 4 times, store result in outBlock o[0] = ((uint_32t*)inBlock)[0] ^ kp[0]; o[1] = ((uint_32t*)inBlock)[1] ^ kp[1]; o[2] = ((uint_32t*)inBlock)[2] ^ kp[2]; o[3] = ((uint_32t*)inBlock)[3] ^ kp[3]; if (inf == 10 * 16 || inf == 12 * 16 || inf == 14 * 16) { for (i = 0; i < inf >> 4; i++) { kp += 4; if (i < (inf >> 4) - 1) enc_round_c(&o[0], &o[1], &o[2], &o[3], kp); else enc_last_round_c(&o[0], &o[1], &o[2], &o[3], kp); } } else { // error return EXIT_FAILURE; } return EXIT_SUCCESS; } #define g0_fun(x) ( mk_tab[0 + 4*extract_byte(x,0)] ^ mk_tab[1 + 4*extract_byte(x,1)] \ ^ mk_tab[2 + 4*extract_byte(x,2)] ^ mk_tab[3 + 4*extract_byte(x,3)] ) #define g1_fun(x) ( mk_tab[0 + 4*extract_byte(x,3)] ^ mk_tab[1 + 4*extract_byte(x,0)] \ ^ mk_tab[2 + 4*extract_byte(x,1)] ^ mk_tab[3 + 4*extract_byte(x,2)] ) #define f_rnd(i) \ t1 = g1_fun(blk[1]); t0 = g0_fun(blk[0]); \ blk[2] = rotr(blk[2] ^ (t0 + t1 + l_key[4 * (i) + 8]), 1); \ blk[3] = rotl(blk[3], 1) ^ (t0 + 2 * t1 + l_key[4 * (i) + 9]); \ t1 = g1_fun(blk[3]); t0 = g0_fun(blk[2]); \ blk[0] = rotr(blk[0] ^ (t0 + t1 + l_key[4 * (i) + 10]), 1); \ blk[1] = rotl(blk[1], 1) ^ (t0 + 2 * t1 + l_key[4 * (i) + 11]) __device__ void twofish_encrypt(TwofishInstance *instance, const u4byte in_blk[4], u4byte out_blk[]) { u4byte t0, t1, blk[4]; u4byte *l_key = instance->l_key; u4byte *mk_tab = instance->mk_tab; blk[0] = in_blk[0] ^ l_key[0]; blk[1] = in_blk[1] ^ l_key[1]; blk[2] = in_blk[2] ^ l_key[2]; blk[3] = in_blk[3] ^ l_key[3]; f_rnd(0); f_rnd(1); f_rnd(2); f_rnd(3); f_rnd(4); f_rnd(5); f_rnd(6); f_rnd(7); out_blk[0] = blk[2] ^ l_key[4]; out_blk[1] = blk[3] ^ l_key[5]; out_blk[2] = blk[0] ^ l_key[6]; out_blk[3] = blk[1] ^ l_key[7]; } __device__ void KXf (const u32 *k, unsigned int r, u32 *a, u32 *b, u32 *c, u32 *d) { *a ^= k[r]; *b ^= k[r + 1]; *c ^= k[r + 2]; *d ^= k[r + 3]; } __device__ void LTf (uint32 *a, uint32 *b, uint32 *c, uint32 *d) { *a = rotlFixed(*a, 13); *c = rotlFixed(*c, 3); *d = rotlFixed(*d ^ *c ^ (*a << 3), 7); *b = rotlFixed(*b ^ *a ^ *c, 1); *a = rotlFixed(*a ^ *b ^ *d, 5); *c = rotlFixed(*c ^ *d ^ (*b << 7), 22); } __device__ void serpent_encrypt(const u8 *inBlock, u8 *outBlock, u8 *ks) { u32 a, b, c, d, e; unsigned int i=1; const u32 *k = (u32 *)ks + 8; u32 *in = (u32 *) inBlock; u32 *out = (u32 *) outBlock; a = in[0]; b = in[1]; c = in[2]; d = in[3]; do { KXf (k, 0, &a, &b, &c, &d); S0f (&a, &b, &c, &d, &e); LTf (&b, &e, &c, &a); KXf (k, 4, &b, &e, &c, &a); S1f (&b, &e, &c, &a, &d); LTf (&c, &b, &a, &e); KXf (k, 8, &c, &b, &a, &e); S2f (&c, &b, &a, &e, &d); LTf (&a, &e, &b, &d); KXf (k, 12, &a, &e, &b, &d); S3f (&a, &e, &b, &d, &c); LTf (&e, &b, &d, &c); KXf (k, 16, &e, &b, &d, &c); S4f (&e, &b, &d, &c, &a); LTf (&b, &a, &e, &c); KXf (k, 20, &b, &a, &e, &c); S5f (&b, &a, &e, &c, &d); LTf (&a, &c, &b, &e); KXf (k, 24, &a, &c, &b, &e); S6f (&a, &c, &b, &e, &d); LTf (&a, &c, &d, &b); KXf (k, 28, &a, &c, &d, &b); S7f (&a, &c, &d, &b, &e); if (i == 4) break; ++i; c = b; b = e; e = d; d = a; a = e; k += 32; LTf (&a,&b,&c,&d); } while (1); KXf (k, 32, &d, &e, &b, &a); out[0] = d; out[1] = e; out[2] = b; out[3] = a; } __device__ void BlowfishEncryptLE (unsigned char *inBlock, unsigned char *outBlock, BF_KEY *key, int encrypt) { word32 left = ((word32 *) inBlock)[0]; word32 right = ((word32 *) inBlock)[1]; const word32 *const s = key->sbox; const word32 * p = encrypt ? key->pbox : key->pbox_dec; unsigned i; left ^= p[0]; for (i=0; i<ROUNDS/2; i++) { right ^= (((s[GETBYTE(left,3)] + s[256+GETBYTE(left,2)]) ^ s[2*256+GETBYTE(left,1)]) + s[3*256+GETBYTE(left,0)]) ^ p[2*i+1]; left ^= (((s[GETBYTE(right,3)] + s[256+GETBYTE(right,2)]) ^ s[2*256+GETBYTE(right,1)]) + s[3*256+GETBYTE(right,0)]) ^ p[2*i+2]; } right ^= p[ROUNDS+1]; ((word32 *) outBlock)[0] = right; ((word32 *) outBlock)[1] = left; } __device__ word32 rotlVariable (word32 x, unsigned int y) { return (word32)((x<<y) | (x>>(sizeof(word32)*8-y))); } /* Macros to access 8-bit bytes out of a 32-bit word */ #define U8a(x) GETBYTE(x,3) #define U8b(x) GETBYTE(x,2) #define U8c(x) GETBYTE(x,1) #define U8d(x) GETBYTE(x,0) /* CAST uses three different round functions */ #define f1(l, r, km, kr) \ t = rotlVariable(km + r, kr); \ l ^= ((S[0][U8a(t)] ^ S[1][U8b(t)]) - \ S[2][U8c(t)]) + S[3][U8d(t)]; #undef f2 #define f2(l, r, km, kr) \ t = rotlVariable(km ^ r, kr); \ l ^= ((S[0][U8a(t)] - S[1][U8b(t)]) + \ S[2][U8c(t)]) ^ S[3][U8d(t)]; #undef f3 #define f3(l, r, km, kr) \ t = rotlVariable(km - r, kr); \ l ^= ((S[0][U8a(t)] + S[1][U8b(t)]) ^ \ S[2][U8c(t)]) - S[3][U8d(t)]; #define F1(l, r, i, j) f1(l, r, K[i], K[i+j]) #define F2(l, r, i, j) f2(l, r, K[i], K[i+j]) #define F3(l, r, i, j) f3(l, r, K[i], K[i+j]) __device__ void Cast5Encrypt (const byte *inBlock, byte *outBlock, CAST_KEY *key) { word32 l = BE32 (((word32 *)inBlock)[0]); word32 r = BE32 (((word32 *)inBlock)[1]); word32 *K = key->K; word32 t; /* Do the work */ F1(l, r, 0, 16); F2(r, l, 1, 16); F3(l, r, 2, 16); F1(r, l, 3, 16); F2(l, r, 4, 16); F3(r, l, 5, 16); F1(l, r, 6, 16); F2(r, l, 7, 16); F3(l, r, 8, 16); F1(r, l, 9, 16); F2(l, r, 10, 16); F3(r, l, 11, 16); F1(l, r, 12, 16); F2(r, l, 13, 16); F3(l, r, 14, 16); F1(r, l, 15, 16); /* Put l,r into outblock */ ((word32 *)outBlock)[0] = BE32 (r); ((word32 *)outBlock)[1] = BE32 (l); } __device__ void RawProcessBlock(word32 *l_, word32 *r_, const word32 *k) { word32 l = *l_, r = *r_; const word32 *kptr=k; unsigned i; for (i=0; i<8; i++) { word32 work = rotrFixed(r, 4U) ^ kptr[4*i+0]; l ^= Spbox[6][(work) & 0x3f] ^ Spbox[4][(work >> 8) & 0x3f] ^ Spbox[2][(work >> 16) & 0x3f] ^ Spbox[0][(work >> 24) & 0x3f]; work = r ^ kptr[4*i+1]; l ^= Spbox[7][(work) & 0x3f] ^ Spbox[5][(work >> 8) & 0x3f] ^ Spbox[3][(work >> 16) & 0x3f] ^ Spbox[1][(work >> 24) & 0x3f]; work = rotrFixed(l, 4U) ^ kptr[4*i+2]; r ^= Spbox[6][(work) & 0x3f] ^ Spbox[4][(work >> 8) & 0x3f] ^ Spbox[2][(work >> 16) & 0x3f] ^ Spbox[0][(work >> 24) & 0x3f]; work = l ^ kptr[4*i+3]; r ^= Spbox[7][(work) & 0x3f] ^ Spbox[5][(work >> 8) & 0x3f] ^ Spbox[3][(work >> 16) & 0x3f] ^ Spbox[1][(work >> 24) & 0x3f]; } *l_ = l; *r_ = r; } __device__ void TripleDesEncrypt (byte *inBlock, byte *outBlock, TDES_KEY *key, int encrypt) { word32 left = BE32 (((word32 *)inBlock)[0]); word32 right = BE32 (((word32 *)inBlock)[1]); word32 work; right = rotlFixed(right, 4U); work = (left ^ right) & 0xf0f0f0f0; left ^= work; right = rotrFixed(right^work, 20U); work = (left ^ right) & 0xffff0000; left ^= work; right = rotrFixed(right^work, 18U); work = (left ^ right) & 0x33333333; left ^= work; right = rotrFixed(right^work, 6U); work = (left ^ right) & 0x00ff00ff; left ^= work; right = rotlFixed(right^work, 9U); work = (left ^ right) & 0xaaaaaaaa; left = rotlFixed(left^work, 1U); right ^= work; RawProcessBlock (&left, &right, encrypt ? key->k1 : key->k1d); RawProcessBlock (&right, &left, !encrypt ? key->k2 : key->k2d); RawProcessBlock (&left, &right, encrypt ? key->k3 : key->k3d); right = rotrFixed(right, 1U); work = (left ^ right) & 0xaaaaaaaa; right ^= work; left = rotrFixed(left^work, 9U); work = (left ^ right) & 0x00ff00ff; right ^= work; left = rotlFixed(left^work, 6U); work = (left ^ right) & 0x33333333; right ^= work; left = rotlFixed(left^work, 18U); work = (left ^ right) & 0xffff0000; right ^= work; left = rotlFixed(left^work, 20U); work = (left ^ right) & 0xf0f0f0f0; right ^= work; left = rotrFixed(left^work, 4U); ((word32 *)outBlock)[0] = BE32 (right); ((word32 *)outBlock)[1] = BE32 (left); } __device__ void EncipherBlock(int cipher, void *data, void *ks) { switch (cipher) { case AES: aes_encrypt_c ((u8*)data, (u8*)data, ks); break; case TWOFISH: twofish_encrypt ((TwofishInstance*)ks, (u4byte*)data, (u4byte*)data); break; case SERPENT: serpent_encrypt ((u8*)data, (u8*)data, (u8*)ks); break; case BLOWFISH: BlowfishEncryptLE ((unsigned char*)data, (unsigned char*)data, (BF_KEY*)ks, 1); break; // Deprecated/legacy case CAST: Cast5Encrypt ((byte*)data, (byte*)data, (CAST_KEY*)ks); break; // Deprecated/legacy case TRIPLEDES: TripleDesEncrypt ((byte*)data, (byte*)data, (TDES_KEY*)ks, 1); break; // Deprecated/legacy default: TC_THROW_FATAL_EXCEPTION; // Unknown/wrong ID } } __device__ void ILTf (uint32 *a, uint32 *b, uint32 *c, uint32 *d) { *c = rotrFixed(*c, 22); *a = rotrFixed(*a, 5); *c ^= *d ^ (*b << 7); *a ^= *b ^ *d; *b = rotrFixed(*b, 1); *d = rotrFixed(*d, 7) ^ *c ^ (*a << 3); *b ^= *a ^ *c; *c = rotrFixed(*c, 3); *a = rotrFixed(*a, 13); } // order of output from S-box functions #define beforeS0(f) f(0,a,b,c,d,e) #define afterS0(f) f(1,b,e,c,a,d) #define afterS1(f) f(2,c,b,a,e,d) #define afterS2(f) f(3,a,e,b,d,c) #define afterS3(f) f(4,e,b,d,c,a) #define afterS4(f) f(5,b,a,e,c,d) #define afterS5(f) f(6,a,c,b,e,d) #define afterS6(f) f(7,a,c,d,b,e) #define afterS7(f) f(8,d,e,b,a,c) // order of output from inverse S-box functions #define beforeI7(f) f(8,a,b,c,d,e) #define afterI7(f) f(7,d,a,b,e,c) #define afterI6(f) f(6,a,b,c,e,d) #define afterI5(f) f(5,b,d,e,c,a) #define afterI4(f) f(4,b,c,e,a,d) #define afterI3(f) f(3,a,b,e,c,d) #define afterI2(f) f(2,b,d,e,c,a) #define afterI1(f) f(1,a,b,c,e,d) #define afterI0(f) f(0,a,d,b,e,c) // inverse linear transformation #define ILT(i,a,b,c,d,e) {\ c = rotrFixed(c, 22); \ a = rotrFixed(a, 5); \ c ^= d ^ (b << 7); \ a ^= b ^ d; \ b = rotrFixed(b, 1); \ d = rotrFixed(d, 7) ^ c ^ (a << 3); \ b ^= a ^ c; \ c = rotrFixed(c, 3); \ a = rotrFixed(a, 13);} #define I0(i, r0, r1, r2, r3, r4) \ { \ r2 = ~r2; \ r4 = r1; \ r1 |= r0; \ r4 = ~r4; \ r1 ^= r2; \ r2 |= r4; \ r1 ^= r3; \ r0 ^= r4; \ r2 ^= r0; \ r0 &= r3; \ r4 ^= r0; \ r0 |= r1; \ r0 ^= r2; \ r3 ^= r4; \ r2 ^= r1; \ r3 ^= r0; \ r3 ^= r1; \ r2 &= r3; \ r4 ^= r2; \ } #define I1(i, r0, r1, r2, r3, r4) \ { \ r4 = r1; \ r1 ^= r3; \ r3 &= r1; \ r4 ^= r2; \ r3 ^= r0; \ r0 |= r1; \ r2 ^= r3; \ r0 ^= r4; \ r0 |= r2; \ r1 ^= r3; \ r0 ^= r1; \ r1 |= r3; \ r1 ^= r0; \ r4 = ~r4; \ r4 ^= r1; \ r1 |= r0; \ r1 ^= r0; \ r1 |= r4; \ r3 ^= r1; \ } #define I2(i, r0, r1, r2, r3, r4) \ { \ r2 ^= r3; \ r3 ^= r0; \ r4 = r3; \ r3 &= r2; \ r3 ^= r1; \ r1 |= r2; \ r1 ^= r4; \ r4 &= r3; \ r2 ^= r3; \ r4 &= r0; \ r4 ^= r2; \ r2 &= r1; \ r2 |= r0; \ r3 = ~r3; \ r2 ^= r3; \ r0 ^= r3; \ r0 &= r1; \ r3 ^= r4; \ r3 ^= r0; \ } #define I3(i, r0, r1, r2, r3, r4) \ { \ r4 = r2; \ r2 ^= r1; \ r1 &= r2; \ r1 ^= r0; \ r0 &= r4; \ r4 ^= r3; \ r3 |= r1; \ r3 ^= r2; \ r0 ^= r4; \ r2 ^= r0; \ r0 |= r3; \ r0 ^= r1; \ r4 ^= r2; \ r2 &= r3; \ r1 |= r3; \ r1 ^= r2; \ r4 ^= r0; \ r2 ^= r4; \ } #define I4(i, r0, r1, r2, r3, r4) \ { \ r4 = r2; \ r2 &= r3; \ r2 ^= r1; \ r1 |= r3; \ r1 &= r0; \ r4 ^= r2; \ r4 ^= r1; \ r1 &= r2; \ r0 = ~r0; \ r3 ^= r4; \ r1 ^= r3; \ r3 &= r0; \ r3 ^= r2; \ r0 ^= r1; \ r2 &= r0; \ r3 ^= r0; \ r2 ^= r4; \ r2 |= r3; \ r3 ^= r0; \ r2 ^= r1; \ } #define I5(i, r0, r1, r2, r3, r4) \ { \ r1 = ~r1; \ r4 = r3; \ r2 ^= r1; \ r3 |= r0; \ r3 ^= r2; \ r2 |= r1; \ r2 &= r0; \ r4 ^= r3; \ r2 ^= r4; \ r4 |= r0; \ r4 ^= r1; \ r1 &= r2; \ r1 ^= r3; \ r4 ^= r2; \ r3 &= r4; \ r4 ^= r1; \ r3 ^= r0; \ r3 ^= r4; \ r4 = ~r4; \ } #define I6(i, r0, r1, r2, r3, r4) \ { \ r0 ^= r2; \ r4 = r2; \ r2 &= r0; \ r4 ^= r3; \ r2 = ~r2; \ r3 ^= r1; \ r2 ^= r3; \ r4 |= r0; \ r0 ^= r2; \ r3 ^= r4; \ r4 ^= r1; \ r1 &= r3; \ r1 ^= r0; \ r0 ^= r3; \ r0 |= r2; \ r3 ^= r1; \ r4 ^= r0; \ } #define I7(i, r0, r1, r2, r3, r4) \ { \ r4 = r2; \ r2 ^= r0; \ r0 &= r3; \ r2 = ~r2; \ r4 |= r3; \ r3 ^= r1; \ r1 |= r0; \ r0 ^= r2; \ r2 &= r4; \ r1 ^= r2; \ r2 ^= r0; \ r0 |= r2; \ r3 &= r4; \ r0 ^= r3; \ r4 ^= r1; \ r3 ^= r4; \ r4 |= r0; \ r3 ^= r2; \ r4 ^= r2; \ } __device__ void serpent_decrypt(const u8 *inBlock, u8 *outBlock, u8 *ks) { u32 a, b, c, d, e; const u32 *k = (u32 *)ks + 104; unsigned int i=4; u32 *in = (u32 *) inBlock; u32 *out = (u32 *) outBlock; a = in[0]; b = in[1]; c = in[2]; d = in[3]; KXf (k, 32, &a, &b, &c, &d); goto start; do { c = b; b = d; d = e; k -= 32; beforeI7(ILT); start: beforeI7(I7); KXf (k, 28, &d, &a, &b, &e); ILTf (&d, &a, &b, &e); afterI7(I6); KXf (k, 24, &a, &b, &c, &e); ILTf (&a, &b, &c, &e); afterI6(I5); KXf (k, 20, &b, &d, &e, &c); ILTf (&b, &d, &e, &c); afterI5(I4); KXf (k, 16, &b, &c, &e, &a); ILTf (&b, &c, &e, &a); afterI4(I3); KXf (k, 12, &a, &b, &e, &c); ILTf (&a, &b, &e, &c); afterI3(I2); KXf (k, 8, &b, &d, &e, &c); ILTf (&b, &d, &e, &c); afterI2(I1); KXf (k, 4, &a, &b, &c, &e); ILTf (&a, &b, &c, &e); afterI1(I0); KXf (k, 0, &a, &d, &b, &e); } while (--i != 0); out[0] = a; out[1] = d; out[2] = b; out[3] = e; } #define i_rnd(i) \ t1 = g1_fun(blk[1]); t0 = g0_fun(blk[0]); \ blk[2] = rotl(blk[2], 1) ^ (t0 + t1 + l_key[4 * (i) + 10]); \ blk[3] = rotr(blk[3] ^ (t0 + 2 * t1 + l_key[4 * (i) + 11]), 1); \ t1 = g1_fun(blk[3]); t0 = g0_fun(blk[2]); \ blk[0] = rotl(blk[0], 1) ^ (t0 + t1 + l_key[4 * (i) + 8]); \ blk[1] = rotr(blk[1] ^ (t0 + 2 * t1 + l_key[4 * (i) + 9]), 1) __device__ void twofish_decrypt(TwofishInstance *instance, const u4byte in_blk[4], u4byte out_blk[4]) { u4byte t0, t1, blk[4]; u4byte *l_key = instance->l_key; u4byte *mk_tab = instance->mk_tab; blk[0] = in_blk[0] ^ l_key[4]; blk[1] = in_blk[1] ^ l_key[5]; blk[2] = in_blk[2] ^ l_key[6]; blk[3] = in_blk[3] ^ l_key[7]; i_rnd(7); i_rnd(6); i_rnd(5); i_rnd(4); i_rnd(3); i_rnd(2); i_rnd(1); i_rnd(0); out_blk[0] = blk[2] ^ l_key[0]; out_blk[1] = blk[3] ^ l_key[1]; out_blk[2] = blk[0] ^ l_key[2]; out_blk[3] = blk[1] ^ l_key[3]; } #define AES_REV_DKS #define dtab_0(x) t_in[0][x] #define dtab_1(x) t_in[1][x] #define dtab_2(x) t_in[2][x] #define dtab_3(x) t_in[3][x] #define dltab_0(x) t_il[0][x] #define dltab_1(x) t_il[1][x] #define dltab_2(x) t_il[2][x] #define dltab_3(x) t_il[3][x] #define dltab(n, x) dltab_##n(x) __device__ void dec_round_c (uint_32t* eax, uint_32t* ebx, uint_32t* ecx, uint_32t* edx, uint_32t* ebp) { uint_32t key1, key2, tmp, esi, edi; uint_8t* ax = (uint_8t*)eax, *bx = (uint_8t*)ebx, *cx = (uint_8t*)ecx, *dx = (uint_8t*)edx; // ebp - key pointer, eax, ebx, ecx, edx - each contain 4 bytes (int) of outBlock, esi - 3rd int in key, edi - 4th int in key key1 = ebp[0]; key2 = ebp[1]; esi = ebp[2]; edi = ebp[3]; /* begin irn_fun */ *eax = (*eax << 16) | (*eax >> 16); // al - 0, ah - 1, bl - 4, bh - 5, cl - 8, ch - 9, dl - 12, dh - 13 esi ^= dtab_0(cx[0]); esi ^= dtab_1(bx[1]); esi ^= dtab_2(ax[0]); edi ^= dtab_0(dx[0]); edi ^= dtab_1(cx[1]); edi ^= dtab_3(ax[1]); tmp = dtab_0(bx[0]); // ebp (restored later) *eax >>= 16; *ebx &= 0xffff0000; *ebx |= *eax; *ecx >>= 16; tmp ^= dtab_1(bx[1]); tmp ^= dtab_3(cx[1]); *eax = dtab_2(cx[0]); // eax *eax ^= dtab_0(bx[0]); *eax ^= dtab_1(dx[1]); *ebx >>= 16; *edx >>= 16; esi ^= dtab_3(dx[1]); tmp ^= dtab_2(dx[0]); *eax ^= dtab_3(bx[1]); edi ^= dtab_2(bx[0]); /* end irn_fun */ *ebx = tmp; *ecx = esi; *edx = edi; *eax ^= key1; *ebx ^= key2; } __device__ void dec_last_round_c (uint_32t* eax, uint_32t* ebx, uint_32t* ecx, uint_32t* edx, uint_32t* ebp) { uint_32t key1, key2, tmp, esi, edi; uint_8t* ax = (uint_8t*)eax, *bx = (uint_8t*)ebx, *cx = (uint_8t*)ecx, *dx = (uint_8t*)edx; // ebp - key pointer, eax, ebx, ecx, edx - each contain 4 bytes (int) of outBlock, esi - 3rd int in key, edi - 4th int in key key1 = ebp[0]; key2 = ebp[1]; esi = ebp[2]; edi = ebp[3]; /* begin irn_fun */ *eax = (*eax << 16) | (*eax >> 16); // al - 0, ah - 1, bl - 4, bh - 5, cl - 8, ch - 9, dl - 12, dh - 13 esi ^= dltab(0, cx[0]); esi ^= dltab(1, bx[1]); esi ^= dltab(2, ax[0]); edi ^= dltab(0, dx[0]); edi ^= dltab(1, cx[1]); edi ^= dltab(3, ax[1]); tmp = dltab(0, bx[0]); // ebp (restored later) *eax >>= 16; *ebx &= 0xffff0000; *ebx |= *eax; *ecx >>= 16; tmp ^= dltab(1, bx[1]); tmp ^= dltab(3, cx[1]); *eax = dltab(2, cx[0]); // eax *eax ^= dltab(0, bx[0]); *eax ^= dltab(1, dx[1]); *ebx >>= 16; *edx >>= 16; esi ^= dltab(3, dx[1]); tmp ^= dltab(2, dx[0]); *eax ^= dltab(3, bx[1]); edi ^= dltab(2, bx[0]); /* end irn_fun */ *ebx = tmp; *ecx = esi; *edx = edi; *eax ^= key1; *ebx ^= key2; } __device__ AES_RETURN aes_decrypt_c(const u8 *inBlock, u8 *outBlock, void *ks) { uint_32t* kp = (uint_32t *)ks; // key pointer uint_32t inf = *(kp + KS_LENGTH); uint_32t* o = (uint_32t*)outBlock; unsigned int i; #ifndef AES_REV_DKS kp += inf >> 2; #endif // xor 4 bytes in inBlock with 4 bytes in ks, 4 times, store result in outBlock o[0] = ((uint_32t*)inBlock)[0] ^ kp[0]; o[1] = ((uint_32t*)inBlock)[1] ^ kp[1]; o[2] = ((uint_32t*)inBlock)[2] ^ kp[2]; o[3] = ((uint_32t*)inBlock)[3] ^ kp[3]; if (inf == 10 * 16 || inf == 12 * 16 || inf == 14 * 16) { for (i = 0; i < inf >> 4; i++) { #ifdef AES_REV_DKS kp += 4; #else kp -= 4; #endif if (i < (inf >> 4) - 1) dec_round_c(&o[0], &o[1], &o[2], &o[3], kp); else dec_last_round_c(&o[0], &o[1], &o[2], &o[3], kp); } } else { // error return EXIT_FAILURE; } return EXIT_SUCCESS; } __device__ void Cast5Decrypt (const byte *inBlock, byte *outBlock, CAST_KEY *key) { word32 r = BE32 (((word32 *)inBlock)[0]); word32 l = BE32 (((word32 *)inBlock)[1]); word32 *K = key->K; word32 t; /* Only do full 16 rounds if key length > 80 bits */ F1(r, l, 15, 16); F3(l, r, 14, 16); F2(r, l, 13, 16); F1(l, r, 12, 16); F3(r, l, 11, 16); F2(l, r, 10, 16); F1(r, l, 9, 16); F3(l, r, 8, 16); F2(r, l, 7, 16); F1(l, r, 6, 16); F3(r, l, 5, 16); F2(l, r, 4, 16); F1(r, l, 3, 16); F3(l, r, 2, 16); F2(r, l, 1, 16); F1(l, r, 0, 16); /* Put l,r into outblock */ ((word32 *)outBlock)[0] = BE32 (l); ((word32 *)outBlock)[1] = BE32 (r); /* Wipe clean */ t = l = r = 0; } __device__ void DecipherBlock(int cipher, void *data, void *ks) { switch (cipher) { case SERPENT: serpent_decrypt ((u8*)data, (u8*)data, (u8*)ks); break; case TWOFISH: twofish_decrypt ((TwofishInstance*)ks, (u4byte*)data, (u4byte*)data); break; case AES: { aes_decrypt_c ((u8*)data, (u8*)data, (void *) ((char *) ks + sizeof(aes_encrypt_ctx))); break; } case BLOWFISH: BlowfishEncryptLE ((unsigned char*)data, (unsigned char*)data, (BF_KEY*)ks, 0); break; // Deprecated/legacy case CAST: Cast5Decrypt ((byte*)data, (byte*)data, (CAST_KEY*)ks); break; // Deprecated/legacy case TRIPLEDES: TripleDesEncrypt ((byte*)data, (byte*)data, (TDES_KEY*)ks, 0); break; // Deprecated/legacy default: TC_THROW_FATAL_EXCEPTION; // Unknown/wrong ID } } __device__ void DecryptBufferXTS (u8 *buffer, TC_LARGEST_COMPILER_UINT length, const UINT64_STRUCT *startDataUnitNo, unsigned int startCipherBlockNo, u8 *ks, u8 *ks2, int cipher) { u8 finalCarry; ALIGN(32) u8 whiteningValue [BYTES_PER_XTS_BLOCK]; ALIGN(32) u8 byteBufUnitNo [BYTES_PER_XTS_BLOCK]; u64 *whiteningValuePtr64 = (u64 *) whiteningValue; u64 *bufPtr = (u64 *) buffer; unsigned int startBlock = startCipherBlockNo, endBlock, block; TC_LARGEST_COMPILER_UINT blockCount, dataUnitNo; // Convert the 64-bit data unit number into a little-endian 16-byte array. // Note that as we are converting a 64-bit number into a 16-byte array we can always zero the last 8 bytes. dataUnitNo = startDataUnitNo->Value; *((u64 *) byteBufUnitNo) = dataUnitNo; *((u64 *) byteBufUnitNo + 1) = 0; if (length % BYTES_PER_XTS_BLOCK) TC_THROW_FATAL_EXCEPTION; blockCount = length / BYTES_PER_XTS_BLOCK; // Process all blocks in the buffer while (blockCount > 0) { if (blockCount < BLOCKS_PER_XTS_DATA_UNIT) endBlock = startBlock + (unsigned int) blockCount; else endBlock = BLOCKS_PER_XTS_DATA_UNIT; whiteningValuePtr64 = (u64 *) whiteningValue; // Encrypt the data unit number using the secondary key (in order to generate the first // whitening value for this data unit) *whiteningValuePtr64 = *((u64 *) byteBufUnitNo); *(whiteningValuePtr64 + 1) = 0; EncipherBlock (cipher, whiteningValue, ks2); // Generate (and apply) subsequent whitening values for blocks in this data unit and // decrypt all relevant blocks in this data unit for (block = 0; block < endBlock; block++) { if (block >= startBlock) { // Post-whitening *bufPtr++ ^= *whiteningValuePtr64++; *bufPtr-- ^= *whiteningValuePtr64--; // Actual decryption DecipherBlock (cipher, bufPtr, ks); // Pre-whitening *bufPtr++ ^= *whiteningValuePtr64++; *bufPtr++ ^= *whiteningValuePtr64; } else whiteningValuePtr64++; // Derive the next whitening value finalCarry = (*whiteningValuePtr64 & 0x8000000000000000) ? 135 : 0; *whiteningValuePtr64-- <<= 1; if (*whiteningValuePtr64 & 0x8000000000000000) *(whiteningValuePtr64 + 1) |= 1; *whiteningValuePtr64 <<= 1; whiteningValue[0] ^= finalCarry; } blockCount -= endBlock - startBlock; startBlock = 0; dataUnitNo++; *((u64 *) byteBufUnitNo) = dataUnitNo; } FAST_ERASE64 (whiteningValue, sizeof (whiteningValue)); } __device__ void Xor128 (u64 *a, u64 *b) { *a++ ^= *b++; *a ^= *b; } __device__ void Xor64 (u64 *a, u64 *b) { *a ^= *b; } #define lp32(x) ((mode(32t)*)(x)) __device__ in_line void move_block_aligned( void *p, const void *q) { lp32(p)[0] = lp32(q)[0], lp32(p)[1] = lp32(q)[1], lp32(p)[2] = lp32(q)[2], lp32(p)[3] = lp32(q)[3]; } __device__ in_line void move_block_aligned64( void *p, const void *q) { lp32(p)[0] = lp32(q)[0], lp32(p)[1] = lp32(q)[1]; } __device__ in_line void xor_block_aligned( void *p, const void *q) { lp32(p)[0] ^= lp32(q)[0], lp32(p)[1] ^= lp32(q)[1], lp32(p)[2] ^= lp32(q)[2], lp32(p)[3] ^= lp32(q)[3]; } __device__ in_line void xor_block_aligned64( void *p, const void *q) { lp32(p)[0] ^= lp32(q)[0], lp32(p)[1] ^= lp32(q)[1]; } #define xor_8kt64(i) \ xor_block_aligned(r, ctx->gf_t128[i + i][a[i] & 15]); \ xor_block_aligned(r, ctx->gf_t128[i + i + 1][a[i] >> 4]) /* Multiply a 128-bit number by a 64-bit number in the finite field GF(2^128) */ __device__ void Gf128MulBy64Tab (u8 a[8], u8 p[16], GfCtx *ctx) { ALIGN(32) u32 r[CBLK_LEN >> 2]; move_block_aligned(r, ctx->gf_t128[7*2][a[7] & 15]); xor_block_aligned(r, ctx->gf_t128[7*2+1][a[7] >> 4]); if (*(u16 *)a) { xor_8kt64(0); xor_8kt64(1); } if (a[2]) { xor_8kt64(2); } xor_8kt64(3); xor_8kt64(4); xor_8kt64(5); xor_8kt64(6); move_block_aligned(p, r); } #define xor_8k64(i) \ xor_block_aligned64(r, ctx->gf_t64[i + i][a[i] & 15]); \ xor_block_aligned64(r, ctx->gf_t64[i + i + 1][a[i] >> 4]) /* Multiply two 64-bit numbers in the finite field GF(2^64) */ __device__ void Gf64MulTab (unsigned char a[8], unsigned char p[8], GfCtx *ctx) { /* Deprecated/legacy */ ALIGN(32) u32 r[CBLK_LEN8 >> 2]; move_block_aligned64(r, ctx->gf_t64[7*2][a[7] & 15]); xor_block_aligned64(r, ctx->gf_t64[7*2+1][a[7] >> 4]); if (*(u16 *)a) { xor_8k64(0); xor_8k64(1); } if (a[2]) { xor_8k64(2); } xor_8k64(3); xor_8k64(4); xor_8k64(5); xor_8k64(6); move_block_aligned64(p, r); } __device__ void DecryptBufferLRW128 (byte *buffer, uint64 length, uint64 blockIndex, PCRYPTO_INFO cryptoInfo) { /* Deprecated/legacy */ int cipher = EAGetFirstCipher (cryptoInfo->ea); int cipherCount = EAGetCipherCount (cryptoInfo->ea); u8 *p = buffer; u8 *ks = cryptoInfo->ks; ALIGN(32) u8 i[8]; ALIGN(32) u8 t[16]; u64 b; u8 j; *(u64 *)i = BE64(blockIndex); if (length % 16) TC_THROW_FATAL_EXCEPTION; // Note that the maximum supported volume size is 8589934592 GB (i.e., 2^63 bytes). for (b = 0; b < length >> 4; b++) { Gf128MulBy64Tab (i, t, &cryptoInfo->gf_ctx); Xor128 ((u64 *)p, (u64 *)t); if (cipherCount > 1) { // Cipher cascade ks = cryptoInfo->ks + EAGetKeyScheduleSize (cryptoInfo->ea); for (cipher = EAGetLastCipher (cryptoInfo->ea); cipher != 0; cipher = EAGetPreviousCipher (cryptoInfo->ea, cipher)) { ks -= CipherGetKeyScheduleSize (cipher); DecipherBlock (cipher, p, ks); } } else { DecipherBlock (cipher, p, ks); } Xor128 ((u64 *)p, (u64 *)t); p += 16; if (i[7] != 0xff) { j = i[7]; *(u64 *)i &= ~((j & 0xffffffffffffffff) << 56); j++; *(u64 *)i |= (j & 0xffffffffffffffff) << 56; } else *(u64 *)i = BE64 ( BE64(*(u64 *)i) + 1 ); } FAST_ERASE64 (t, sizeof(t)); } __device__ void DecryptBufferLRW64 (byte *buffer, uint64 length, uint64 blockIndex, PCRYPTO_INFO cryptoInfo) { /* Deprecated/legacy */ int cipher = EAGetFirstCipher (cryptoInfo->ea); u8 *p = buffer; u8 *ks = cryptoInfo->ks; ALIGN(32) u8 i[8]; ALIGN(32) u8 t[8]; u64 b; u8 j; *(u64 *)i = BE64(blockIndex); if (length % 8) TC_THROW_FATAL_EXCEPTION; for (b = 0; b < length >> 3; b++) { Gf64MulTab (i, t, &cryptoInfo->gf_ctx); Xor64 ((u64 *)p, (u64 *)t); DecipherBlock (cipher, p, ks); Xor64 ((u64 *)p, (u64 *)t); p += 8; if (i[7] != 0xff) { j = i[7]; *(u64 *)i &= ~((j & 0xffffffffffffffff) << 56); j++; *(u64 *)i |= (j & 0xffffffffffffffff) << 56; } else *(u64 *)i = BE64 ( BE64(*(u64 *)i) + 1 ); } FAST_ERASE64 (t, sizeof(t)); } __device__ void DecryptBufferCBC (u32 *data, unsigned int len, u8 *ks, u32 *iv, u32 *whitening, int ea, int cipher) { /* IMPORTANT: This function has been deprecated (legacy) */ u32 bufIV[4]; u64 i; u32 ct[4]; int blockSize = CipherGetBlockSize (ea != 0 ? EAGetFirstCipher (ea) : cipher); if (len % blockSize) TC_THROW_FATAL_EXCEPTION; // IV bufIV[0] = iv[0]; bufIV[1] = iv[1]; if (blockSize == 16) { bufIV[2] = iv[2]; bufIV[3] = iv[3]; } // Decrypt each block for (i = 0; i < len/blockSize; i++) { // Dewhitening data[0] ^= whitening[0]; data[1] ^= whitening[1]; if (blockSize == 16) { data[2] ^= whitening[0]; data[3] ^= whitening[1]; } // CBC ct[0] = data[0]; ct[1] = data[1]; if (blockSize == 16) { ct[2] = data[2]; ct[3] = data[3]; } if (ea != 0) { // Outer-CBC ks += EAGetKeyScheduleSize (ea); for (cipher = EAGetLastCipher (ea); cipher != 0; cipher = EAGetPreviousCipher (ea, cipher)) { ks -= CipherGetKeyScheduleSize (cipher); DecipherBlock (cipher, data, ks); } } else { // CBC/inner-CBC DecipherBlock (cipher, data, ks); } // CBC data[0] ^= bufIV[0]; data[1] ^= bufIV[1]; bufIV[0] = ct[0]; bufIV[1] = ct[1]; if (blockSize == 16) { data[2] ^= bufIV[2]; data[3] ^= bufIV[3]; bufIV[2] = ct[2]; bufIV[3] = ct[3]; } data += blockSize / sizeof(*data); } } // DecryptBuffer // // buf: data to be decrypted; the start of the buffer is assumed to be aligned with the start of a data unit. // len: number of bytes to decrypt; must be divisible by the block size (for cascaded ciphers, divisible // by the largest block size used within the cascade) __device__ void DecryptBuffer (u8 *buf, TC_LARGEST_COMPILER_UINT len, PCRYPTO_INFO cryptoInfo) { switch (cryptoInfo->mode) { case XTS: { u8 *ks = cryptoInfo->ks + EAGetKeyScheduleSize (cryptoInfo->ea); u8 *ks2 = cryptoInfo->ks2 + EAGetKeyScheduleSize (cryptoInfo->ea); UINT64_STRUCT dataUnitNo; int cipher; // When encrypting/decrypting a buffer (typically a volume header) the sequential number // of the first XTS data unit in the buffer is always 0 and the start of the buffer is // always assumed to be aligned with the start of the data unit 0. dataUnitNo.LowPart = 0; dataUnitNo.HighPart = 0; for (cipher = EAGetLastCipher (cryptoInfo->ea); cipher != 0; cipher = EAGetPreviousCipher (cryptoInfo->ea, cipher)) { ks -= CipherGetKeyScheduleSize (cipher); ks2 -= CipherGetKeyScheduleSize (cipher); DecryptBufferXTS (buf, len, &dataUnitNo, 0, ks, ks2, cipher); } } break; case LRW: /* Deprecated/legacy */ switch (CipherGetBlockSize (EAGetFirstCipher (cryptoInfo->ea))) { case 8: DecryptBufferLRW64 (buf, (u64) len, 1, cryptoInfo); break; case 16: DecryptBufferLRW128 (buf, (u64) len, 1, cryptoInfo); break; default: TC_THROW_FATAL_EXCEPTION; } break; case CBC: case INNER_CBC: { /* Deprecated/legacy */ u8 *ks = cryptoInfo->ks + EAGetKeyScheduleSize (cryptoInfo->ea); int cipher; for (cipher = EAGetLastCipher (cryptoInfo->ea); cipher != 0; cipher = EAGetPreviousCipher (cryptoInfo->ea, cipher)) { ks -= CipherGetKeyScheduleSize (cipher); DecryptBufferCBC ((u32 *) buf, (unsigned int) len, ks, (u32 *) cryptoInfo->k2, (u32 *) &cryptoInfo->k2[8], 0, cipher); } } break; case OUTER_CBC: /* Deprecated/legacy */ DecryptBufferCBC ((u32 *) buf, (unsigned int) len, cryptoInfo->ks, (u32 *) cryptoInfo->k2, (u32 *) &cryptoInfo->k2[8], cryptoInfo->ea, 0); break; default: // Unknown/wrong ID TC_THROW_FATAL_EXCEPTION; } } #define keyInfo td->keyInfo #define cryptoInfo (&td->cryptoInfo) // --opencc-options -OPT:Olimit=180335 for optimization // dk is the expected input for the next phase // the kernel was split into 5 smaller kernels, because otherwise ptxas gives a memory allocation error // x passwords are processed at a time, going through each of the five kernels, and then the process is repeated // for the next x passwords until all passwords have been processed #define dk td->dk[RIPEMD160 - 1] // 9.4 seconds __global__ static void reduceKernel_ripemd160 (char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; PTHREAD_DATA td = &d_Output[tid]; crypto_loadkey (&keyInfo, (char*)d_Input[tid].Text, (int) d_Input[tid].Length); // PKCS5 is used to derive the primary header key(s) and secondary header key(s) (XTS mode) from the password memcpy (keyInfo.salt, d_EncryptedHeader + HEADER_SALT_OFFSET, PKCS5_SALT_SIZE); keyInfo.noIterations = get_pkcs5_iteration_count (RIPEMD160, 0); derive_key_ripemd160 (keyInfo.userKey, keyInfo.keyLength, keyInfo.salt, PKCS5_SALT_SIZE, keyInfo.noIterations, dk, GetMaxPkcs5OutSize(), td); } #undef dk #define dk td->dk[SHA512 - 1] // ~8 seconds __global__ static void reduceKernel_sha512 (char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; PTHREAD_DATA td = &d_Output[tid]; crypto_loadkey (&keyInfo, (char*)d_Input[tid].Text, (int) d_Input[tid].Length); // PKCS5 is used to derive the primary header key(s) and secondary header key(s) (XTS mode) from the password memcpy (keyInfo.salt, d_EncryptedHeader + HEADER_SALT_OFFSET, PKCS5_SALT_SIZE); keyInfo.noIterations = get_pkcs5_iteration_count (SHA512, 0); derive_key_sha512 (keyInfo.userKey, keyInfo.keyLength, keyInfo.salt, PKCS5_SALT_SIZE, keyInfo.noIterations, dk, GetMaxPkcs5OutSize()); #undef dk } #define dk td->dk[SHA1 - 1] // ~10 seconds (27 seconds so far) __global__ static void reduceKernel_sha1 (char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; PTHREAD_DATA td = &d_Output[tid]; crypto_loadkey (&keyInfo, (char*)d_Input[tid].Text, (int) d_Input[tid].Length); // PKCS5 is used to derive the primary header key(s) and secondary header key(s) (XTS mode) from the password memcpy (keyInfo.salt, d_EncryptedHeader + HEADER_SALT_OFFSET, PKCS5_SALT_SIZE); keyInfo.noIterations = get_pkcs5_iteration_count (SHA1, 0); derive_key_sha1 (keyInfo.userKey, keyInfo.keyLength, keyInfo.salt, PKCS5_SALT_SIZE, keyInfo.noIterations, dk, GetMaxPkcs5OutSize()); #undef dk } #define dk td->dk[WHIRLPOOL - 1] // slowest by far: ~31 seconds, total: ~58 seconds __global__ static void reduceKernel_whirlpool (char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; PTHREAD_DATA td = &d_Output[tid]; crypto_loadkey (&keyInfo, (char*)d_Input[tid].Text, (int) d_Input[tid].Length); // PKCS5 is used to derive the primary header key(s) and secondary header key(s) (XTS mode) from the password memcpy (keyInfo.salt, d_EncryptedHeader + HEADER_SALT_OFFSET, PKCS5_SALT_SIZE); keyInfo.noIterations = get_pkcs5_iteration_count (WHIRLPOOL, 0); derive_key_whirlpool (keyInfo.userKey, keyInfo.keyLength, keyInfo.salt, PKCS5_SALT_SIZE, keyInfo.noIterations, dk, GetMaxPkcs5OutSize(), td); #undef dk } #define dk td->dk[enqPkcs5Prf - 1] __global__ static void reduceKernel_final (char* d_EncryptedHeader, PTHREAD_DATA d_Input, PTHREAD_RESULT d_Output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; PTHREAD_DATA td = &d_Input[tid]; ALIGN(32) char header[TC_VOLUME_HEADER_EFFECTIVE_SIZE]; int enqPkcs5Prf; int primaryKeyOffset; ALIGN(32) u1byte q_tab[2][256]; ALIGN(32) u4byte m_tab[4][256]; u4byte qt_gen = 0, mt_gen = 0; crypto_open(cryptoInfo); // Test all available PKCS5 PRFs for (enqPkcs5Prf = FIRST_PRF_ID; enqPkcs5Prf <= LAST_PRF_ID; ++enqPkcs5Prf) { BOOL lrw64InitDone = FALSE; // Deprecated/legacy BOOL lrw128InitDone = FALSE; // Deprecated/legacy if (enqPkcs5Prf == SHA512/* || enqPkcs5Prf == WHIRLPOOL*/) continue; // Test all available modes of operation for (cryptoInfo->mode = FIRST_MODE_OF_OPERATION_ID; cryptoInfo->mode <= LAST_MODE_OF_OPERATION; cryptoInfo->mode++) { switch (cryptoInfo->mode) { case LRW: case CBC: case INNER_CBC: case OUTER_CBC: // For LRW (deprecated/legacy), copy the tweak key // For CBC (deprecated/legacy), copy the IV/whitening seed memcpy (cryptoInfo->k2, dk, LEGACY_VOL_IV_SIZE); primaryKeyOffset = LEGACY_VOL_IV_SIZE; break; default: primaryKeyOffset = 0; } // Test all available encryption algorithms for (cryptoInfo->ea = EAGetFirst (); cryptoInfo->ea != 0; cryptoInfo->ea = EAGetNext (cryptoInfo->ea)) { int blockSize; if (!EAIsModeSupported (cryptoInfo->ea, cryptoInfo->mode)) continue; // This encryption algorithm has never been available with this mode of operation blockSize = CipherGetBlockSize (EAGetFirstCipher (cryptoInfo->ea)); if (EAInit (cryptoInfo->ea, (unsigned char *)(dk + primaryKeyOffset), cryptoInfo->ks, (u1byte**)q_tab, (u4byte**)m_tab, &qt_gen, &mt_gen) == ERR_CIPHER_INIT_FAILURE) goto ret; // Init objects related to the mode of operation if (cryptoInfo->mode == XTS) { // Copy the secondary key (if cascade, multiple concatenated) memcpy (cryptoInfo->k2, dk + EAGetKeySize (cryptoInfo->ea), EAGetKeySize (cryptoInfo->ea)); // Secondary key schedule if (!EAInitMode (cryptoInfo, (u1byte**)q_tab, (u4byte**)m_tab, &qt_gen, &mt_gen)) { goto ret; } } else if (cryptoInfo->mode == LRW && (blockSize == 8 && !lrw64InitDone || blockSize == 16 && !lrw128InitDone)) { // Deprecated/legacy if (!EAInitMode (cryptoInfo,(u1byte**)q_tab, (u4byte**)m_tab, &qt_gen, &mt_gen)) { goto ret; } if (blockSize == 8) lrw64InitDone = TRUE; else if (blockSize == 16) lrw128InitDone = TRUE; } // Copy the header for decryption memcpy (header, d_EncryptedHeader, sizeof (header)); // Try to decrypt header DecryptBuffer ((unsigned char*)(header + HEADER_ENCRYPTED_DATA_OFFSET), HEADER_ENCRYPTED_DATA_SIZE, cryptoInfo); // fixme: crash here due to twofish // Magic 'TRUE' if (GetHeaderField32 ((byte*)header, TC_HEADER_OFFSET_MAGIC) == 0x54525545){ d_Output->tid = tid; d_Output->ea = cryptoInfo->ea; d_Output->mode = cryptoInfo->mode; d_Output->prf = enqPkcs5Prf; goto ret; } } } } ret: return; } extern "C" void launch_reduceKernel_ripemd160(char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output, int BLOCK_N, int THREAD_N) { reduceKernel_ripemd160<<<BLOCK_N, THREAD_N>>>(d_EncryptedHeader, d_Input, d_Output); } extern "C" void launch_reduceKernel_sha512(char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output, int BLOCK_N, int THREAD_N) { reduceKernel_sha512<<<BLOCK_N, THREAD_N>>>(d_EncryptedHeader, d_Input, d_Output); } extern "C" void launch_reduceKernel_sha1(char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output, int BLOCK_N, int THREAD_N) { reduceKernel_sha1<<<BLOCK_N, THREAD_N>>>(d_EncryptedHeader, d_Input, d_Output); } extern "C" void launch_reduceKernel_whirlpool(char* d_EncryptedHeader, Password* d_Input, PTHREAD_DATA d_Output, int BLOCK_N, int THREAD_N) { reduceKernel_whirlpool<<<BLOCK_N, THREAD_N>>>(d_EncryptedHeader, d_Input, d_Output); } extern "C" void launch_reduceKernel_final(char *d_EncryptedHeader, PTHREAD_DATA d_Input, PTHREAD_RESULT d_Output, int BLOCK_N, int THREAD_N) { reduceKernel_final<<<BLOCK_N, THREAD_N>>>(d_EncryptedHeader, d_Input, d_Output); }
565b906c254a9d57a0f586238a6a748833db10c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" ///nvcc -o fil main.cu -O3 -m=64 -arch=compute_61 -code=sm_61 -Xptxas -allow-expensive-optimizations=true -Xptxas -v #include <iostream> #include <chrono> #include <fstream> #include <algorithm> #include <inttypes.h> #include <bitset> #include <iostream> #include <vector> #include <map> #include <iomanip> #include <fstream> #include <chrono> #include <mutex> #include <time.h> #include "lcg.h" #ifdef BOINC #include "boinc_api.h" #if defined _WIN32 || defined _WIN64 #include "boinc_win.h" #endif #endif uint64_t millis() {return (std::chrono::duration_cast< std::chrono::milliseconds >(std::chrono::system_clock::now().time_since_epoch())).count();} #define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__) inline void gpuAssert(hipError_t code, const char *file, int line) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", hipGetErrorString(code), code, file, line); exit(code); } } // ===== LCG IMPLEMENTATION ===== // namespace java_lcg { //region Java LCG #define Random uint64_t #define RANDOM_MULTIPLIER 0x5DEECE66DULL #define RANDOM_ADDEND 0xBULL #define RANDOM_MASK ((1ULL << 48u) - 1) #define get_random(seed) ((Random)((seed ^ RANDOM_MULTIPLIER) & RANDOM_MASK)) __host__ __device__ __forceinline__ static int32_t random_next(Random *random, int bits) { *random = (*random * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK; return (int32_t) (*random >> (48u - bits)); } __device__ __forceinline__ static int32_t random_next_int(Random *random, const uint16_t bound) { int32_t r = random_next(random, 31); const uint16_t m = bound - 1u; if ((bound & m) == 0) { r = (int32_t) ((bound * (uint64_t) r) >> 31u); } else { for (int32_t u = r; u - (r = u % bound) + m < 0; u = random_next(random, 31)); } return r; } __device__ __host__ __forceinline__ static int32_t random_next_int_nonpow(Random *random, const uint16_t bound) { int32_t r = random_next(random, 31); const uint16_t m = bound - 1u; for (int32_t u = r; u - (r = u % bound) + m < 0; u = random_next(random, 31)); return r; } __host__ __device__ __forceinline__ static double next_double(Random *random) { return (double) ((((uint64_t) ((uint32_t) random_next(random, 26)) << 27u)) + random_next(random, 27)) / (double)(1ULL << 53); } __host__ __device__ __forceinline__ static uint64_t random_next_long (Random *random) { return (((uint64_t)random_next(random, 32)) << 32u) + (int32_t)random_next(random, 32); } __host__ __device__ __forceinline__ static void advance2(Random *random) { *random = (*random * 0xBB20B4600A69LLU + 0x40942DE6BALLU) & RANDOM_MASK; } __host__ __device__ __forceinline__ static void advance3759(Random *random) { *random = (*random * 0x6FE85C031F25LLU + 0x8F50ECFF899LLU) & RANDOM_MASK; } } using namespace java_lcg; namespace device_intrinsics { //region DEVICE INTRINSICS #define DEVICE_STATIC_INTRINSIC_QUALIFIERS static __device__ __forceinline__ #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) #define PXL_GLOBAL_PTR "l" #else #define PXL_GLOBAL_PTR "r" #endif DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_local_l1(const void* const ptr) { asm("prefetch.local.L1 [%0];" : : PXL_GLOBAL_PTR(ptr)); } DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_uniform(const void* const ptr) { asm("prefetchu.L1 [%0];" : : PXL_GLOBAL_PTR(ptr)); } DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_local_l2(const void* const ptr) { asm("prefetch.local.L2 [%0];" : : PXL_GLOBAL_PTR(ptr)); } #if __CUDA__ < 10 #define __ldg(ptr) (*(ptr)) #endif } using namespace device_intrinsics; #define BLOCK_SIZE (128) //#define BLOCK_SIZE (128) #define WORK_SIZE_BITS 16 #define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE)) //#define SEEDS_PER_CALL 8000000 //Specifying where the (1 = dirt/grass, 0 = sand) is // This will match the seed 76261196830436 (not pack.png ofc) // Double match: 76261206560653 (almost 100% confirmed, sans very last bit of sand in first match) // Triple match: 76273693341674 (100% match) #define CHUNK_X 6 #define CHUNK_Z -1 #define INNER_X_START 4 #define INNER_Z_START 0 #define INNER_X_END 13 #define INNER_Z_END 2 __constant__ uint8_t DIRT_HEIGHT_2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1] = {{1,15,15,15,1,15,0,15,15,15}, {15,1,15,15,15,1,15,1,15,15}, {15,15,1,1,15,15,1,1,1,0}}; __constant__ double LocalNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; #define EARLY_RETURN (INNER_Z_END * 16 + INNER_X_END) #define CHUNK_X_2 6 #define CHUNK_Z_2 -2 #define INNER_X_START_2 0 #define INNER_Z_START_2 6 #define INNER_X_END_2 9 #define INNER_Z_END_2 15 __constant__ uint8_t DIRT_HEIGHT_2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1] = {{0,15,15,15,15,15,15,15,15,15}, {15,0,0,15,15,15,15,15,15,15}, {0,15,15,0,15,15,15,15,15,15}, {15,1,15,15,0,15,15,15,15,15}, {15,15,0,15,15,0,15,15,15,15}, {15,15,15,0,15,0,15,15,15,15}, {15,15,15,15,0,15,0,15,15,15}, {0,15,15,15,15,0,0,15,15,15}, {0,0,15,15,15,15,0,0,0,15}, {15,15,0,0,15,15,15,0,15,0}}; __constant__ double LocalNoise2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1]; #define CHUNK_X_3 5 #define CHUNK_Z_3 -1 #define INNER_X_START_3 4 #define INNER_Z_START_3 0 #define INNER_X_END_3 15 #define INNER_Z_END_3 10 __constant__ uint8_t DIRT_HEIGHT_2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1] = {{1,1,15,15,15,15,15,15,15,15,0,15}, {15,15,15,15,15,15,15,15,15,15,0,15}, {15,15,15,15,15,15,15,15,15,15,15,0}, {15,15,15,0,15,15,15,15,15,15,15,0}, {15,15,15,1,15,15,15,15,15,15,15,15}, {15,15,15,0,15,15,15,15,15,15,15,0}, {15,15,15,15,15,15,15,15,15,15,15,15}, {15,15,0,15,15,15,15,15,15,15,15,15}, {15,15,1,15,15,15,15,15,15,15,15,15}, {15,15,15,1,15,15,15,15,15,15,15,15}, {15,15,15,0,15,15,15,15,15,15,15,15}}; __constant__ double LocalNoise2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1]; /* //Old test: matches 104703450999364 #define CHUNK_X 2 #define CHUNK_Z 11 #define INNER_X_START 2 #define INNER_Z_START 0 #define INNER_X_END 11 #define INNER_Z_END 0 __constant__ uint8_t DIRT_HEIGHT_2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1] = {{0,15,0,1,0,15,15,15,15,1}}; __constant__ double LocalNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; */ //The generation of the simplex layers and noise namespace noise { //region Simplex layer gen /* End of constant for simplex noise*/ struct Octave { double xo; double yo; double zo; uint8_t permutations[256]; }; __shared__ uint8_t permutations[256][BLOCK_SIZE]; #define getValue(array, index) array[index][threadIdx.x] #define setValue(array, index, value) array[index][threadIdx.x] = value __device__ static inline void setupNoise(const uint8_t nbOctaves, Random *random, Octave resultArray[]) { for (int j = 0; j < nbOctaves; ++j) { __prefetch_local_l2(&resultArray[j]); resultArray[j].xo = next_double(random) * 256.0; resultArray[j].yo = next_double(random) * 256.0; resultArray[j].zo = next_double(random) * 256.0; #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = random_next_int(random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); //uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, getValue(permutations,randomIndex)); setValue(permutations, randomIndex, v1); //} } #pragma unroll for(int c = 0; c<256;c++) { __prefetch_local_l1(&(resultArray[j].permutations[c+1])); resultArray[j].permutations[c] = getValue(permutations,c); } //resultArray[j].xo = xo; //resultArray[j].yo = yo; //resultArray[j].zo = zo; } } __device__ static inline void SkipNoiseGen(const uint8_t nbOctaves, Random* random) { for (int j = 0; j < nbOctaves; ++j) { lcg::advance<2*3>(*random); for(int index = 0; index<256; index++) { random_next_int(random, 256ull - index); } } } __device__ static inline double lerp(double x, double a, double b) { return a + x * (b - a); } __device__ static inline double grad(uint8_t hash, double x, double y, double z) { switch (hash & 0xFu) { case 0x0: return x + y; case 0x1: return -x + y; case 0x2: return x - y; case 0x3: return -x - y; case 0x4: return x + z; case 0x5: return -x + z; case 0x6: return x - z; case 0x7: return -x - z; case 0x8: return y + z; case 0x9: return -y + z; case 0xA: return y - z; case 0xB: return -y - z; case 0xC: return y + x; case 0xD: return -y + z; case 0xE: return y - x; case 0xF: return -y - z; default: return 0; // never happens } } __device__ static inline void generateNormalPermutations(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START && columnIndex%16 <= INNER_X_END && DIRT_HEIGHT_2D[columnIndex/16 - INNER_Z_START][columnIndex%16 - INNER_X_START] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } if (columnIndex == EARLY_RETURN) return; columnIndex++; } } } } __device__ static inline void generateNormalPermutations_2(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START_2 && columnIndex%16 <= INNER_X_END_2 && DIRT_HEIGHT_2D_2[columnIndex/16 - INNER_Z_START_2][columnIndex%16 - INNER_X_START_2] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } columnIndex++; } } } } __device__ static inline void generateNormalPermutations_3(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START_3 && columnIndex%16 <= INNER_X_END_3 && DIRT_HEIGHT_2D_3[columnIndex/16 - INNER_Z_START_3][columnIndex%16 - INNER_X_START_3] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } columnIndex++; } } } } __device__ static inline void generateNoise(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } __device__ static inline void generateNoise_2(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations_2(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } __device__ static inline void generateNoise_3(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations_3(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } } using namespace noise; __device__ static inline bool match(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[EARLY_RETURN+1]; #pragma unroll for(uint16_t i = 0; i<EARLY_RETURN+1;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise(heightField, (double) (CHUNK_X <<4), (double) (CHUNK_Z<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END - INNER_Z_START + 1; z++) { for(uint8_t x = 0; x < INNER_X_END - INNER_X_START + 1; x++) { if (DIRT_HEIGHT_2D[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START + x + (INNER_Z_START + z) * 16] + LocalNoise2D[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D[z][x]) return false; } } } return true; } __device__ static inline bool match2(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[256]; #pragma unroll for(uint16_t i = 0; i<256;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise_2(heightField, (double) (CHUNK_X_2 <<4), (double) (CHUNK_Z_2<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END_2 - INNER_Z_START_2 + 1; z++) { for(uint8_t x = 0; x < INNER_X_END_2 - INNER_X_START_2 + 1; x++) { if (DIRT_HEIGHT_2D_2[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START_2 + x + (INNER_Z_START_2 + z) * 16] + LocalNoise2D_2[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D_2[z][x]) return false; } } } return true; } __device__ static inline bool match3(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[256]; #pragma unroll for(uint16_t i = 0; i<256;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise_3(heightField, (double) (CHUNK_X_3 <<4), (double) (CHUNK_Z_3<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END_3 - INNER_Z_START_3 + 1; z++) { for(uint8_t x = 0; x < INNER_X_END_3 - INNER_X_START_3 + 1; x++) { if (DIRT_HEIGHT_2D_3[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START_3 + x + (INNER_Z_START_3 + z) * 16] + LocalNoise2D_3[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D_3[z][x]) return false; } } } return true; } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck(uint64_t offset, uint64_t* buffer, uint32_t* counter) { uint64_t seed = blockIdx.x * blockDim.x + threadIdx.x + offset; if (match(seed)) { buffer[atomicAdd(counter,1)] = seed; } } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck2(uint32_t count, uint64_t* buffer) { uint64_t seedIndex = blockIdx.x * blockDim.x + threadIdx.x; if (seedIndex>=count) return; if (!match2(buffer[seedIndex])) { buffer[seedIndex] = 0; } } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck3(uint32_t count, uint64_t* buffer) { uint64_t seedIndex = blockIdx.x * blockDim.x + threadIdx.x; if (seedIndex>=count) return; uint64_t seed = buffer[seedIndex]; if (seed==0) return; if (!match3(seed)) { buffer[seedIndex] = 0; } } std::ifstream inSeeds; std::ofstream outSeeds; uint64_t* buffer; uint32_t* counter; double getNextDoubleForLocNoise(int x, int z); void setup(int gpu_device) { hipSetDevice(gpu_device); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipDeviceSynchronize()); double locNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; for(uint8_t z = 0; z < INNER_Z_END - INNER_Z_START + 1; z++) { for (uint8_t x = 0; x < INNER_X_END - INNER_X_START + 1; x++) { locNoise2D[z][x] = getNextDoubleForLocNoise((CHUNK_X<<4) + INNER_X_START + x, (CHUNK_Z<<4) + INNER_Z_START + z); } } GPU_ASSERT(hipMemcpyToSymbol(LocalNoise2D, &locNoise2D, sizeof(locNoise2D))); GPU_ASSERT(hipPeekAtLastError()); double locNoise2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1]; for(uint8_t z = 0; z < INNER_Z_END_2 - INNER_Z_START_2 + 1; z++) { for (uint8_t x = 0; x < INNER_X_END_2 - INNER_X_START_2 + 1; x++) { locNoise2D_2[z][x] = getNextDoubleForLocNoise((CHUNK_X_2<<4) + INNER_X_START_2 + x, (CHUNK_Z_2<<4) + INNER_Z_START_2 + z); } } GPU_ASSERT(hipMemcpyToSymbol(LocalNoise2D_2, &locNoise2D_2, sizeof(locNoise2D_2))); GPU_ASSERT(hipPeekAtLastError()); double locNoise2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1]; for(uint8_t z = 0; z < INNER_Z_END_3 - INNER_Z_START_3 + 1; z++) { for (uint8_t x = 0; x < INNER_X_END_3 - INNER_X_START_3 + 1; x++) { locNoise2D_3[z][x] = getNextDoubleForLocNoise((CHUNK_X_3<<4) + INNER_X_START_3 + x, (CHUNK_Z_3<<4) + INNER_Z_START_3 + z); } } GPU_ASSERT(hipMemcpyToSymbol(LocalNoise2D_3, &locNoise2D_3, sizeof(locNoise2D_3))); GPU_ASSERT(hipPeekAtLastError()); } time_t elapsed_chkpoint = 0; struct checkpoint_vars { unsigned long long offset; time_t elapsed_chkpoint; }; int main(int argc, char *argv[]) { int gpu_device = 0; uint64_t START; uint64_t offsetStart = 0; uint64_t COUNT; #ifdef BOINC BOINC_OPTIONS options; boinc_options_defaults(options); options.normal_thread_priority = true; boinc_init_options(&options); #endif for (int i = 1; i < argc; i += 2) { const char *param = argv[i]; if (strcmp(param, "-d") == 0 || strcmp(param, "--device") == 0) { gpu_device = atoi(argv[i + 1]); } else if (strcmp(param, "-s") == 0 || strcmp(param, "--start") == 0) { sscanf(argv[i + 1], "%llu", &START); } else if (strcmp(param, "-e") == 0 || strcmp(param, "--count") == 0) { sscanf(argv[i + 1], "%llu", &COUNT); } else { fprintf(stderr,"Unknown parameter: %s\n", param); } } FILE *checkpoint_data = boinc_fopen("packpoint.txt", "rb"); if(!checkpoint_data){ fprintf(stderr, "No checkpoint to load\n"); } else{ #ifdef BOINC boinc_begin_critical_section(); #endif struct checkpoint_vars data_store; fread(&data_store, sizeof(data_store), 1, checkpoint_data); offsetStart = data_store.offset; elapsed_chkpoint = data_store.elapsed_chkpoint; fprintf(stderr, "Checkpoint loaded, task time %d s, seed pos: %llu\n", elapsed_chkpoint, START); fclose(checkpoint_data); #ifdef BOINC boinc_end_critical_section(); #endif } #ifdef BOINC APP_INIT_DATA aid; boinc_get_init_data(aid); if (aid.gpu_device_num >= 0) { gpu_device = aid.gpu_device_num; fprintf(stderr,"boinc gpu %i gpuindex: %i \n", aid.gpu_device_num, gpu_device); } else { fprintf(stderr,"stndalone gpuindex %i \n", gpu_device); } #endif setup(gpu_device); uint64_t seedCount = COUNT; std::cout << "Processing " << seedCount << " seeds" << std::endl; outSeeds.open("seedsout"); GPU_ASSERT(hipMallocManaged(&buffer, sizeof(*buffer) * SEEDS_PER_CALL)); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipMallocManaged(&counter, sizeof(*counter))); GPU_ASSERT(hipPeekAtLastError()); time_t start_time = time(NULL); int outCount = 0; int checkpointTemp = 0; for(uint64_t offset =offsetStart;offset<seedCount;offset+=SEEDS_PER_CALL) { // Normal filtering time_t elapsed = time(NULL) - start_time; double frac = (double) offset / (double)(seedCount); #ifdef BOINC boinc_fraction_done(frac); #endif *counter = 0; hipLaunchKernelGGL(( tempCheck), dim3(1ULL<<WORK_SIZE_BITS),dim3(BLOCK_SIZE), 0, 0, 0, 0, START + offset, buffer,counter); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipDeviceSynchronize()); hipLaunchKernelGGL(( tempCheck2), dim3(((*counter)/BLOCK_SIZE)+1),dim3(BLOCK_SIZE), 0, 0, *counter, buffer); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipDeviceSynchronize()); hipLaunchKernelGGL(( tempCheck3), dim3(((*counter)/BLOCK_SIZE)+1),dim3(BLOCK_SIZE), 0, 0, *counter, buffer); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipDeviceSynchronize()); for(int i=0;i<*counter;i++) { if (buffer[i]!=0) { uint64_t seed = buffer[i]; std::cout << "3rd level seed found: " << seed << std::endl; outSeeds << seed << std::endl; outCount++; } } if(checkpointTemp >= 180000000 || boinc_time_to_checkpoint()){ #ifdef BOINC boinc_begin_critical_section(); // Boinc should not interrupt this #endif // Checkpointing section below boinc_delete_file("packpoint.txt"); // Don't touch, same func as normal fdel FILE *checkpoint_data = boinc_fopen("packpoint.txt", "wb"); struct checkpoint_vars data_store; data_store.offset = offset; data_store.elapsed_chkpoint = elapsed_chkpoint + elapsed; fwrite(&data_store, sizeof(data_store), 1, checkpoint_data); fclose(checkpoint_data); checkpointTemp = 0; #ifdef BOINC boinc_end_critical_section(); boinc_checkpoint_completed(); // Checkpointing completed #endif } checkpointTemp += SEEDS_PER_CALL; std::cout << "Seeds left:" << (((int64_t)seedCount-offset)-SEEDS_PER_CALL) << std::endl; } std::cout << "Done processing" << std::endl; #ifdef BOINC boinc_begin_critical_section(); #endif time_t elapsed = time(NULL) - start_time; double done = (double)COUNT / 1000000.0; double speed = done / (double) elapsed; fprintf(stderr, "\nSpeed: %.2lfm/s\n", speed ); fprintf(stderr, "Done\n"); fprintf(stderr, "Processed: %llu seeds in %.2lfs seconds\n", COUNT, (double) elapsed_chkpoint + (double) elapsed ); fprintf(stderr, "Have %llu output seeds.\n", outCount); fflush(stderr); outSeeds.close(); boinc_delete_file("packpoint.txt"); #ifdef BOINC boinc_end_critical_section(); #endif boinc_finish(0); } double getNextDoubleForLocNoise(int x, int z) { Random rand = get_random((((int64_t)x) >> 4) * 341873128712LL + (((int64_t)z) >> 4) * 132897987541LL); for (int dx = 0; dx < 16; dx++) { for (int dz = 0; dz < 16; dz++) { if (dx == (x & 15) && dz == (z & 15)) { //advance2(&rand); //advance2(&rand); return next_double(&rand); } advance2(&rand); advance2(&rand); advance2(&rand); for(int k1 = 127; k1 >= 0; k1--) { random_next_int_nonpow(&rand,5); } //for (int i = 0; i < 67; i++) { // advance2(&rand); //} } } exit(-99); }
565b906c254a9d57a0f586238a6a748833db10c2.cu
///nvcc -o fil main.cu -O3 -m=64 -arch=compute_61 -code=sm_61 -Xptxas -allow-expensive-optimizations=true -Xptxas -v #include <iostream> #include <chrono> #include <fstream> #include <algorithm> #include <inttypes.h> #include <bitset> #include <iostream> #include <vector> #include <map> #include <iomanip> #include <fstream> #include <chrono> #include <mutex> #include <time.h> #include "lcg.h" #ifdef BOINC #include "boinc_api.h" #if defined _WIN32 || defined _WIN64 #include "boinc_win.h" #endif #endif uint64_t millis() {return (std::chrono::duration_cast< std::chrono::milliseconds >(std::chrono::system_clock::now().time_since_epoch())).count();} #define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__) inline void gpuAssert(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line); exit(code); } } // ===== LCG IMPLEMENTATION ===== // namespace java_lcg { //region Java LCG #define Random uint64_t #define RANDOM_MULTIPLIER 0x5DEECE66DULL #define RANDOM_ADDEND 0xBULL #define RANDOM_MASK ((1ULL << 48u) - 1) #define get_random(seed) ((Random)((seed ^ RANDOM_MULTIPLIER) & RANDOM_MASK)) __host__ __device__ __forceinline__ static int32_t random_next(Random *random, int bits) { *random = (*random * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK; return (int32_t) (*random >> (48u - bits)); } __device__ __forceinline__ static int32_t random_next_int(Random *random, const uint16_t bound) { int32_t r = random_next(random, 31); const uint16_t m = bound - 1u; if ((bound & m) == 0) { r = (int32_t) ((bound * (uint64_t) r) >> 31u); } else { for (int32_t u = r; u - (r = u % bound) + m < 0; u = random_next(random, 31)); } return r; } __device__ __host__ __forceinline__ static int32_t random_next_int_nonpow(Random *random, const uint16_t bound) { int32_t r = random_next(random, 31); const uint16_t m = bound - 1u; for (int32_t u = r; u - (r = u % bound) + m < 0; u = random_next(random, 31)); return r; } __host__ __device__ __forceinline__ static double next_double(Random *random) { return (double) ((((uint64_t) ((uint32_t) random_next(random, 26)) << 27u)) + random_next(random, 27)) / (double)(1ULL << 53); } __host__ __device__ __forceinline__ static uint64_t random_next_long (Random *random) { return (((uint64_t)random_next(random, 32)) << 32u) + (int32_t)random_next(random, 32); } __host__ __device__ __forceinline__ static void advance2(Random *random) { *random = (*random * 0xBB20B4600A69LLU + 0x40942DE6BALLU) & RANDOM_MASK; } __host__ __device__ __forceinline__ static void advance3759(Random *random) { *random = (*random * 0x6FE85C031F25LLU + 0x8F50ECFF899LLU) & RANDOM_MASK; } } using namespace java_lcg; namespace device_intrinsics { //region DEVICE INTRINSICS #define DEVICE_STATIC_INTRINSIC_QUALIFIERS static __device__ __forceinline__ #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) #define PXL_GLOBAL_PTR "l" #else #define PXL_GLOBAL_PTR "r" #endif DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_local_l1(const void* const ptr) { asm("prefetch.local.L1 [%0];" : : PXL_GLOBAL_PTR(ptr)); } DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_uniform(const void* const ptr) { asm("prefetchu.L1 [%0];" : : PXL_GLOBAL_PTR(ptr)); } DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_local_l2(const void* const ptr) { asm("prefetch.local.L2 [%0];" : : PXL_GLOBAL_PTR(ptr)); } #if __CUDA__ < 10 #define __ldg(ptr) (*(ptr)) #endif } using namespace device_intrinsics; #define BLOCK_SIZE (128) //#define BLOCK_SIZE (128) #define WORK_SIZE_BITS 16 #define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE)) //#define SEEDS_PER_CALL 8000000 //Specifying where the (1 = dirt/grass, 0 = sand) is // This will match the seed 76261196830436 (not pack.png ofc) // Double match: 76261206560653 (almost 100% confirmed, sans very last bit of sand in first match) // Triple match: 76273693341674 (100% match) #define CHUNK_X 6 #define CHUNK_Z -1 #define INNER_X_START 4 #define INNER_Z_START 0 #define INNER_X_END 13 #define INNER_Z_END 2 __constant__ uint8_t DIRT_HEIGHT_2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1] = {{1,15,15,15,1,15,0,15,15,15}, {15,1,15,15,15,1,15,1,15,15}, {15,15,1,1,15,15,1,1,1,0}}; __constant__ double LocalNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; #define EARLY_RETURN (INNER_Z_END * 16 + INNER_X_END) #define CHUNK_X_2 6 #define CHUNK_Z_2 -2 #define INNER_X_START_2 0 #define INNER_Z_START_2 6 #define INNER_X_END_2 9 #define INNER_Z_END_2 15 __constant__ uint8_t DIRT_HEIGHT_2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1] = {{0,15,15,15,15,15,15,15,15,15}, {15,0,0,15,15,15,15,15,15,15}, {0,15,15,0,15,15,15,15,15,15}, {15,1,15,15,0,15,15,15,15,15}, {15,15,0,15,15,0,15,15,15,15}, {15,15,15,0,15,0,15,15,15,15}, {15,15,15,15,0,15,0,15,15,15}, {0,15,15,15,15,0,0,15,15,15}, {0,0,15,15,15,15,0,0,0,15}, {15,15,0,0,15,15,15,0,15,0}}; __constant__ double LocalNoise2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1]; #define CHUNK_X_3 5 #define CHUNK_Z_3 -1 #define INNER_X_START_3 4 #define INNER_Z_START_3 0 #define INNER_X_END_3 15 #define INNER_Z_END_3 10 __constant__ uint8_t DIRT_HEIGHT_2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1] = {{1,1,15,15,15,15,15,15,15,15,0,15}, {15,15,15,15,15,15,15,15,15,15,0,15}, {15,15,15,15,15,15,15,15,15,15,15,0}, {15,15,15,0,15,15,15,15,15,15,15,0}, {15,15,15,1,15,15,15,15,15,15,15,15}, {15,15,15,0,15,15,15,15,15,15,15,0}, {15,15,15,15,15,15,15,15,15,15,15,15}, {15,15,0,15,15,15,15,15,15,15,15,15}, {15,15,1,15,15,15,15,15,15,15,15,15}, {15,15,15,1,15,15,15,15,15,15,15,15}, {15,15,15,0,15,15,15,15,15,15,15,15}}; __constant__ double LocalNoise2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1]; /* //Old test: matches 104703450999364 #define CHUNK_X 2 #define CHUNK_Z 11 #define INNER_X_START 2 #define INNER_Z_START 0 #define INNER_X_END 11 #define INNER_Z_END 0 __constant__ uint8_t DIRT_HEIGHT_2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1] = {{0,15,0,1,0,15,15,15,15,1}}; __constant__ double LocalNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; */ //The generation of the simplex layers and noise namespace noise { //region Simplex layer gen /* End of constant for simplex noise*/ struct Octave { double xo; double yo; double zo; uint8_t permutations[256]; }; __shared__ uint8_t permutations[256][BLOCK_SIZE]; #define getValue(array, index) array[index][threadIdx.x] #define setValue(array, index, value) array[index][threadIdx.x] = value __device__ static inline void setupNoise(const uint8_t nbOctaves, Random *random, Octave resultArray[]) { for (int j = 0; j < nbOctaves; ++j) { __prefetch_local_l2(&resultArray[j]); resultArray[j].xo = next_double(random) * 256.0; resultArray[j].yo = next_double(random) * 256.0; resultArray[j].zo = next_double(random) * 256.0; #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = random_next_int(random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); //uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, getValue(permutations,randomIndex)); setValue(permutations, randomIndex, v1); //} } #pragma unroll for(int c = 0; c<256;c++) { __prefetch_local_l1(&(resultArray[j].permutations[c+1])); resultArray[j].permutations[c] = getValue(permutations,c); } //resultArray[j].xo = xo; //resultArray[j].yo = yo; //resultArray[j].zo = zo; } } __device__ static inline void SkipNoiseGen(const uint8_t nbOctaves, Random* random) { for (int j = 0; j < nbOctaves; ++j) { lcg::advance<2*3>(*random); for(int index = 0; index<256; index++) { random_next_int(random, 256ull - index); } } } __device__ static inline double lerp(double x, double a, double b) { return a + x * (b - a); } __device__ static inline double grad(uint8_t hash, double x, double y, double z) { switch (hash & 0xFu) { case 0x0: return x + y; case 0x1: return -x + y; case 0x2: return x - y; case 0x3: return -x - y; case 0x4: return x + z; case 0x5: return -x + z; case 0x6: return x - z; case 0x7: return -x - z; case 0x8: return y + z; case 0x9: return -y + z; case 0xA: return y - z; case 0xB: return -y - z; case 0xC: return y + x; case 0xD: return -y + z; case 0xE: return y - x; case 0xF: return -y - z; default: return 0; // never happens } } __device__ static inline void generateNormalPermutations(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START && columnIndex%16 <= INNER_X_END && DIRT_HEIGHT_2D[columnIndex/16 - INNER_Z_START][columnIndex%16 - INNER_X_START] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } if (columnIndex == EARLY_RETURN) return; columnIndex++; } } } } __device__ static inline void generateNormalPermutations_2(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START_2 && columnIndex%16 <= INNER_X_END_2 && DIRT_HEIGHT_2D_2[columnIndex/16 - INNER_Z_START_2][columnIndex%16 - INNER_X_START_2] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } columnIndex++; } } } } __device__ static inline void generateNormalPermutations_3(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START_3 && columnIndex%16 <= INNER_X_END_3 && DIRT_HEIGHT_2D_3[columnIndex/16 - INNER_Z_START_3][columnIndex%16 - INNER_X_START_3] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } columnIndex++; } } } } __device__ static inline void generateNoise(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } __device__ static inline void generateNoise_2(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations_2(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } __device__ static inline void generateNoise_3(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations_3(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } } using namespace noise; __device__ static inline bool match(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[EARLY_RETURN+1]; #pragma unroll for(uint16_t i = 0; i<EARLY_RETURN+1;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise(heightField, (double) (CHUNK_X <<4), (double) (CHUNK_Z<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END - INNER_Z_START + 1; z++) { for(uint8_t x = 0; x < INNER_X_END - INNER_X_START + 1; x++) { if (DIRT_HEIGHT_2D[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START + x + (INNER_Z_START + z) * 16] + LocalNoise2D[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D[z][x]) return false; } } } return true; } __device__ static inline bool match2(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[256]; #pragma unroll for(uint16_t i = 0; i<256;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise_2(heightField, (double) (CHUNK_X_2 <<4), (double) (CHUNK_Z_2<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END_2 - INNER_Z_START_2 + 1; z++) { for(uint8_t x = 0; x < INNER_X_END_2 - INNER_X_START_2 + 1; x++) { if (DIRT_HEIGHT_2D_2[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START_2 + x + (INNER_Z_START_2 + z) * 16] + LocalNoise2D_2[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D_2[z][x]) return false; } } } return true; } __device__ static inline bool match3(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[256]; #pragma unroll for(uint16_t i = 0; i<256;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise_3(heightField, (double) (CHUNK_X_3 <<4), (double) (CHUNK_Z_3<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END_3 - INNER_Z_START_3 + 1; z++) { for(uint8_t x = 0; x < INNER_X_END_3 - INNER_X_START_3 + 1; x++) { if (DIRT_HEIGHT_2D_3[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START_3 + x + (INNER_Z_START_3 + z) * 16] + LocalNoise2D_3[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D_3[z][x]) return false; } } } return true; } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck(uint64_t offset, uint64_t* buffer, uint32_t* counter) { uint64_t seed = blockIdx.x * blockDim.x + threadIdx.x + offset; if (match(seed)) { buffer[atomicAdd(counter,1)] = seed; } } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck2(uint32_t count, uint64_t* buffer) { uint64_t seedIndex = blockIdx.x * blockDim.x + threadIdx.x; if (seedIndex>=count) return; if (!match2(buffer[seedIndex])) { buffer[seedIndex] = 0; } } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck3(uint32_t count, uint64_t* buffer) { uint64_t seedIndex = blockIdx.x * blockDim.x + threadIdx.x; if (seedIndex>=count) return; uint64_t seed = buffer[seedIndex]; if (seed==0) return; if (!match3(seed)) { buffer[seedIndex] = 0; } } std::ifstream inSeeds; std::ofstream outSeeds; uint64_t* buffer; uint32_t* counter; double getNextDoubleForLocNoise(int x, int z); void setup(int gpu_device) { cudaSetDevice(gpu_device); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); double locNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; for(uint8_t z = 0; z < INNER_Z_END - INNER_Z_START + 1; z++) { for (uint8_t x = 0; x < INNER_X_END - INNER_X_START + 1; x++) { locNoise2D[z][x] = getNextDoubleForLocNoise((CHUNK_X<<4) + INNER_X_START + x, (CHUNK_Z<<4) + INNER_Z_START + z); } } GPU_ASSERT(cudaMemcpyToSymbol(LocalNoise2D, &locNoise2D, sizeof(locNoise2D))); GPU_ASSERT(cudaPeekAtLastError()); double locNoise2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1]; for(uint8_t z = 0; z < INNER_Z_END_2 - INNER_Z_START_2 + 1; z++) { for (uint8_t x = 0; x < INNER_X_END_2 - INNER_X_START_2 + 1; x++) { locNoise2D_2[z][x] = getNextDoubleForLocNoise((CHUNK_X_2<<4) + INNER_X_START_2 + x, (CHUNK_Z_2<<4) + INNER_Z_START_2 + z); } } GPU_ASSERT(cudaMemcpyToSymbol(LocalNoise2D_2, &locNoise2D_2, sizeof(locNoise2D_2))); GPU_ASSERT(cudaPeekAtLastError()); double locNoise2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1]; for(uint8_t z = 0; z < INNER_Z_END_3 - INNER_Z_START_3 + 1; z++) { for (uint8_t x = 0; x < INNER_X_END_3 - INNER_X_START_3 + 1; x++) { locNoise2D_3[z][x] = getNextDoubleForLocNoise((CHUNK_X_3<<4) + INNER_X_START_3 + x, (CHUNK_Z_3<<4) + INNER_Z_START_3 + z); } } GPU_ASSERT(cudaMemcpyToSymbol(LocalNoise2D_3, &locNoise2D_3, sizeof(locNoise2D_3))); GPU_ASSERT(cudaPeekAtLastError()); } time_t elapsed_chkpoint = 0; struct checkpoint_vars { unsigned long long offset; time_t elapsed_chkpoint; }; int main(int argc, char *argv[]) { int gpu_device = 0; uint64_t START; uint64_t offsetStart = 0; uint64_t COUNT; #ifdef BOINC BOINC_OPTIONS options; boinc_options_defaults(options); options.normal_thread_priority = true; boinc_init_options(&options); #endif for (int i = 1; i < argc; i += 2) { const char *param = argv[i]; if (strcmp(param, "-d") == 0 || strcmp(param, "--device") == 0) { gpu_device = atoi(argv[i + 1]); } else if (strcmp(param, "-s") == 0 || strcmp(param, "--start") == 0) { sscanf(argv[i + 1], "%llu", &START); } else if (strcmp(param, "-e") == 0 || strcmp(param, "--count") == 0) { sscanf(argv[i + 1], "%llu", &COUNT); } else { fprintf(stderr,"Unknown parameter: %s\n", param); } } FILE *checkpoint_data = boinc_fopen("packpoint.txt", "rb"); if(!checkpoint_data){ fprintf(stderr, "No checkpoint to load\n"); } else{ #ifdef BOINC boinc_begin_critical_section(); #endif struct checkpoint_vars data_store; fread(&data_store, sizeof(data_store), 1, checkpoint_data); offsetStart = data_store.offset; elapsed_chkpoint = data_store.elapsed_chkpoint; fprintf(stderr, "Checkpoint loaded, task time %d s, seed pos: %llu\n", elapsed_chkpoint, START); fclose(checkpoint_data); #ifdef BOINC boinc_end_critical_section(); #endif } #ifdef BOINC APP_INIT_DATA aid; boinc_get_init_data(aid); if (aid.gpu_device_num >= 0) { gpu_device = aid.gpu_device_num; fprintf(stderr,"boinc gpu %i gpuindex: %i \n", aid.gpu_device_num, gpu_device); } else { fprintf(stderr,"stndalone gpuindex %i \n", gpu_device); } #endif setup(gpu_device); uint64_t seedCount = COUNT; std::cout << "Processing " << seedCount << " seeds" << std::endl; outSeeds.open("seedsout"); GPU_ASSERT(cudaMallocManaged(&buffer, sizeof(*buffer) * SEEDS_PER_CALL)); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaMallocManaged(&counter, sizeof(*counter))); GPU_ASSERT(cudaPeekAtLastError()); time_t start_time = time(NULL); int outCount = 0; int checkpointTemp = 0; for(uint64_t offset =offsetStart;offset<seedCount;offset+=SEEDS_PER_CALL) { // Normal filtering time_t elapsed = time(NULL) - start_time; double frac = (double) offset / (double)(seedCount); #ifdef BOINC boinc_fraction_done(frac); #endif *counter = 0; tempCheck<<<1ULL<<WORK_SIZE_BITS,BLOCK_SIZE>>>(START + offset, buffer,counter); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); tempCheck2<<<((*counter)/BLOCK_SIZE)+1,BLOCK_SIZE>>>(*counter, buffer); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); tempCheck3<<<((*counter)/BLOCK_SIZE)+1,BLOCK_SIZE>>>(*counter, buffer); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); for(int i=0;i<*counter;i++) { if (buffer[i]!=0) { uint64_t seed = buffer[i]; std::cout << "3rd level seed found: " << seed << std::endl; outSeeds << seed << std::endl; outCount++; } } if(checkpointTemp >= 180000000 || boinc_time_to_checkpoint()){ #ifdef BOINC boinc_begin_critical_section(); // Boinc should not interrupt this #endif // Checkpointing section below boinc_delete_file("packpoint.txt"); // Don't touch, same func as normal fdel FILE *checkpoint_data = boinc_fopen("packpoint.txt", "wb"); struct checkpoint_vars data_store; data_store.offset = offset; data_store.elapsed_chkpoint = elapsed_chkpoint + elapsed; fwrite(&data_store, sizeof(data_store), 1, checkpoint_data); fclose(checkpoint_data); checkpointTemp = 0; #ifdef BOINC boinc_end_critical_section(); boinc_checkpoint_completed(); // Checkpointing completed #endif } checkpointTemp += SEEDS_PER_CALL; std::cout << "Seeds left:" << (((int64_t)seedCount-offset)-SEEDS_PER_CALL) << std::endl; } std::cout << "Done processing" << std::endl; #ifdef BOINC boinc_begin_critical_section(); #endif time_t elapsed = time(NULL) - start_time; double done = (double)COUNT / 1000000.0; double speed = done / (double) elapsed; fprintf(stderr, "\nSpeed: %.2lfm/s\n", speed ); fprintf(stderr, "Done\n"); fprintf(stderr, "Processed: %llu seeds in %.2lfs seconds\n", COUNT, (double) elapsed_chkpoint + (double) elapsed ); fprintf(stderr, "Have %llu output seeds.\n", outCount); fflush(stderr); outSeeds.close(); boinc_delete_file("packpoint.txt"); #ifdef BOINC boinc_end_critical_section(); #endif boinc_finish(0); } double getNextDoubleForLocNoise(int x, int z) { Random rand = get_random((((int64_t)x) >> 4) * 341873128712LL + (((int64_t)z) >> 4) * 132897987541LL); for (int dx = 0; dx < 16; dx++) { for (int dz = 0; dz < 16; dz++) { if (dx == (x & 15) && dz == (z & 15)) { //advance2(&rand); //advance2(&rand); return next_double(&rand); } advance2(&rand); advance2(&rand); advance2(&rand); for(int k1 = 127; k1 >= 0; k1--) { random_next_int_nonpow(&rand,5); } //for (int i = 0; i < 67; i++) { // advance2(&rand); //} } } exit(-99); }
2f20109ed38bea9fb86757d407ccaa33f012346d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_common.cuh" #include "gpu_vcd_common.cuh" #include "gpu_vcd_sf_mp_k0.cuh" __device__ float k0_get_input_g(float* input_g, size_t input_g_pitch, int x, int y) { return *((float*)((char*)&input_g[0] + input_g_pitch * y) + x); } // set an element to green __device__ void set_tempGreen(float* tempGreen_g, size_t tempGreen_g_pitch, int x, int y, float value) { *((float*)((char*)&tempGreen_g[0] + tempGreen_g_pitch * y) + x) = value; } __device__ void set_e(int* eValues_g, size_t eValues_g_pitch, int x, int y, int value) { *((int*)((char*)&eValues_g[0] + eValues_g_pitch * y) + x) = value; } // set an element to shared memory __device__ void k0_set_input_s(float* input_s, size_t tempGreen_s_pitch, int x, int y, float val) { input_s[y * (tempGreen_s_pitch / sizeof(float)) + x] = val; } __device__ void k0_set_input_s_i(int* input_s, size_t tempGreen_s_pitch, int x, int y, int val) { input_s[y * (tempGreen_s_pitch / sizeof(float)) + x] = val; } // get an element from shared memory __device__ float k0_get_input_s(float* input_s, size_t tempGreen_s_pitch, int x, int y) { return input_s[y * (tempGreen_s_pitch / sizeof(float)) + x]; } __device__ int k0_get_input_s_i(int* input_s, size_t tempGreen_s_pitch, int x, int y) { return input_s[y * (tempGreen_s_pitch / sizeof(float)) + x]; } __global__ void calculate_temp_green_k(float* input_g, size_t input_g_pitch, float* tempGreenV_g, float* tempGreenH_g, float* tempGreenD_g, size_t tempGreen_g_pitch, size_t tempGreen_s_pitch, size_t staging_pitch, vcd_params params, int filter) { extern __shared__ float input_s[]; int loopx, loopy = 0; int maxtr = max(params.window_radius + params.temp_green_radius, params.e_radius); // apron size, max transfer radius int tlsx, tlsy, tltx, tlty = 0; float valV, valH, valD = 0.0f; // load input from global memory to shared memory tlsx = 2 * blockDim.x * blockIdx.x + params.window_radius; tlsy = blockDim.y * blockIdx.y + params.window_radius; int bankblk = tlsx % 16; loopy = (int)ceil((float)(blockDim.y + 2 * params.temp_green_radius)/(blockDim.y)); if (bankblk == 0) loopx = (int)ceil((float)(2 * blockDim.x + 2 * params.temp_green_radius)/(blockDim.x)); else loopx = 1 + (int)ceil((float)(2 * blockDim.x + 2 * params.temp_green_radius - (16 - bankblk))/(blockDim.x)); for (int y = 0; y < loopy; y++) { for (int x = 0; x < loopx; x++) { int ntlsx = tlsx + x * blockDim.x + threadIdx.x - bankblk; // thread load source address, x int ntlsy = tlsy + y * blockDim.y + threadIdx.y; // thread load source address, y int endx = tlsx + 2 * blockDim.x + 2 * params.temp_green_radius; int endy = tlsy + blockDim.y + 2 * params.temp_green_radius; if (ntlsx < endx && ntlsy < endy && ntlsx >= tlsx && // load condition ntlsx < (params.width + 2 * maxtr) && ntlsy < (params.height + 2 * maxtr)) // border condition { tltx = x * blockDim.x + threadIdx.x - bankblk; tlty = y * blockDim.y + threadIdx.y; float elmt = k0_get_input_g(input_g, input_g_pitch, ntlsx, ntlsy); // get element from load source k0_set_input_s(input_s, tempGreen_s_pitch, tltx, tlty, elmt); } } } // make sure all elements are loaded __syncthreads(); tlsx = 2 * (threadIdx.x % (blockDim.x / 2)) + (threadIdx.y / (blockDim.y / 2)) * blockDim.x + params.temp_green_radius; // x address for loading from input tlsy = threadIdx.x / (blockDim.x / 2) + 2 * (threadIdx.y % (blockDim.y / 2)) + params.temp_green_radius; // y address for loading from input tltx = params.x + 2 * blockDim.x * blockIdx.x + tlsx; tlty = params.y + blockDim.y * blockIdx.y + tlsy; int col = fc(filter, tltx, tlty); if (col == COLOR_GREEN_RED) { tlsx += params.grtor; tltx += params.grtor; } else if (col == COLOR_GREEN_BLUE) { tlsx += params.gbtob; tltx += params.gbtob; } // SHARED MEMORY VERSION // calculate vertical green if (tltx < (params.x + params.width + params.window_radius) && tlty < (params.y + params.height + params.window_radius)) { valV = (k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy-1) + k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy+1)) / 2.0f + (2.0f * k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy-2) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy+2)) / 4.0f; // calculate horizontal green valH = (k0_get_input_s(input_s, tempGreen_s_pitch, tlsx-1, tlsy) + k0_get_input_s(input_s, tempGreen_s_pitch, tlsx+1, tlsy)) / 2.0f + (2.0f * k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx-2, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx+2, tlsy)) / 4.0f; // calculate diagonal green valD = (k0_get_input_s(input_s, tempGreen_s_pitch, tlsx-1, tlsy) + k0_get_input_s(input_s, tempGreen_s_pitch, tlsx+1, tlsy) + k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy-1) + k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy+1)) / 4.0f + (4.0f * k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx-2, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx+2, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy-2) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy+2)) / 8.0f; } __syncthreads(); // once computation result is saved in register, we must put them back in shared memory, to ensure coalesced store // we will try to achieve 64-bit coalesced store for CC 1.0 and 1.1, so the trade-off will be 2-way bank conflict // address to put and then read the computation result into shared memory //tlsx = (threadIdx.x) % 8 + (threadIdx.y / 8) * 8; //tlsy = threadIdx.x / 8 + 2 * (threadIdx.y % 8); tlsx = (threadIdx.x) % (blockDim.x / 2) + (threadIdx.y / (blockDim.y / 2)) * (blockDim.x / 2); tlsy = threadIdx.x / (blockDim.x / 2) + 2 * (threadIdx.y % (blockDim.y / 2)); // address to write into global memory tltx = blockDim.x * blockIdx.x + threadIdx.x; tlty = blockDim.y * blockIdx.y + threadIdx.y; if (tltx < (params.width + params.window_radius) / 2 && tlty < (params.height + params.window_radius)) { // put valV into shared memory k0_set_input_s(input_s, staging_pitch, tlsx, tlsy, valV); __syncthreads(); set_tempGreen(tempGreenV_g, tempGreen_g_pitch, tltx, tlty, k0_get_input_s(input_s, staging_pitch, threadIdx.x, threadIdx.y)); // put valH itu shared memory k0_set_input_s(input_s, staging_pitch, tlsx, tlsy, valH); __syncthreads(); set_tempGreen(tempGreenH_g, tempGreen_g_pitch, tltx, tlty, k0_get_input_s(input_s, staging_pitch, threadIdx.x, threadIdx.y)); // put valD into shared memory k0_set_input_s(input_s, staging_pitch, tlsx, tlsy, valD); __syncthreads(); set_tempGreen(tempGreenD_g, tempGreen_g_pitch, tltx, tlty, k0_get_input_s(input_s, staging_pitch, threadIdx.x, threadIdx.y)); } } void calculate_temp_green(float* input_g, size_t input_g_pitch, float* tempGreenV_g, float* tempGreenH_g, float* tempGreenD_g, size_t tempGreen_g_pitch, vcd_params params, int filter, hipDeviceProp_t prop, vcd_sf_mp_kernel_times* times) { dim3 dimBlock, dimGrid; size_t smemPitch, stagingPitch, smemSize; int mod = 0; int banks = 0; float kerntime = 0.0f; hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); if (prop.major == 2) { dimBlock.x = 32; banks = 32; } else // prop.major = 1.x { dimBlock.x = 16; banks = 16; } dimBlock.y = 16; dimGrid.x = (int) ceil((float)((params.width + params.window_radius) / 2) / (float)dimBlock.x); dimGrid.y = (int) ceil((float)((params.height + params.window_radius) / (float)dimBlock.y)); smemPitch = ((2 * dimBlock.x + 2 * params.temp_green_radius) * sizeof(float)); // assuming the size of element is float stagingPitch = (dimBlock.x + banks / 2) * sizeof(float); smemSize = smemPitch * (dimBlock.y + 2 * params.temp_green_radius); hipEventRecord(start,0); hipLaunchKernelGGL(( calculate_temp_green_k), dim3(dimGrid),dim3(dimBlock),smemSize, 0, input_g, input_g_pitch, tempGreenV_g, tempGreenH_g, tempGreenD_g, tempGreen_g_pitch, smemPitch, stagingPitch, params, filter); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&kerntime, start, stop); times->calc_temp_green = kerntime; hipEventDestroy(start); hipEventDestroy(stop); } __global__ void calculate_e_k(int* e, size_t eptc, float* input, size_t inputptc, size_t smemptc, size_t stgptc, vcd_params params, int filter) { // load from input to shared memory. However, the position is not aligned for coalescing. // so, we must calculate the offset to achieve coalescing int sx, sy, tx, ty, cx, cy, color, val = 0; float ev = 0.0f; int maxrad = max(params.window_radius + params.temp_green_radius, params.e_radius); int offsety = maxrad > params.e_radius? maxrad - params.e_radius : 0; extern __shared__ float smem[]; // source address sx = 2 * blockDim.x * blockIdx.x; sy = blockDim.y * blockIdx.y + offsety; int loopx = (int)ceil((float)(2 * blockDim.x + params.e_radius + maxrad) / (float)(blockDim.x)); int loopy = (int)ceil((float)(blockDim.y + 2 * params.e_radius) / (float)(blockDim.y)); for (int y = 0; y < loopy; y++) { for (int x = 0; x < loopx; x++) { int nsx = sx + x * blockDim.x + threadIdx.x; int nsy = sy + y * blockDim.y + threadIdx.y; int endx = sx + 2 * blockDim.x + params.e_radius + maxrad; int endy = sy + blockDim.y + 2 * params.e_radius; if (nsx < endx && nsy < endy && nsx < (params.width + 2 * maxrad) && nsy < (params.height + 2 * maxrad)) { tx = x * blockDim.x + threadIdx.x; ty = y * blockDim.y + threadIdx.y; smem[ty * (smemptc / sizeof(float)) + tx] = input[nsy * (inputptc / sizeof(float)) + nsx]; } } } //sx = 2 * (threadIdx.x % 8) + (threadIdx.y / 8) * 16 + maxrad; // x address for loading from input //sy = threadIdx.x / 8 + 2 * (threadIdx.y % 8) + params.e_radius; // y address for loading from input sx = 2 * (threadIdx.x % (blockDim.x / 2)) + (threadIdx.y / (blockDim.y / 2)) * blockDim.x + maxrad; // x address for loading from input sy = threadIdx.x / (blockDim.x / 2) + 2 * (threadIdx.y % (blockDim.y / 2)) + params.e_radius; // y address for loading from input cx = params.x + 2 * blockDim.x * blockIdx.x + sx; cy = params.y + blockDim.y * blockIdx.y + sy; color = fc(filter, cx, cy); if (color == COLOR_GREEN_RED) { sx += params.grtor; cx += params.grtor; } else if (color == COLOR_GREEN_BLUE) { sx += params.gbtob; cx += params.gbtob; } float Lh, Lv = 0.0f; if (cx < (params.x + params.width) && cy < (params.y + params.height)) { for (int dx = -2; dx < 3; dx++) { for (int dy = -2; dy < 3; dy++) { /*if (dx != 0 || dy != 0) { Lh += abs(k0_get_input_s(smem, smemptc, sx+dx,sy+dy)-k0_get_input_s(smem, smemptc, sx,sy+dy)); Lv += abs(k0_get_input_s(smem, smemptc, sx+dx,sy+dy)-k0_get_input_s(smem, smemptc, sx+dx,sy)); }*/ if (dx != 0) { Lv += abs(k0_get_input_s(smem, smemptc, sx+dx,sy+dy)-k0_get_input_s(smem, smemptc, sx+dx,sy)); } if (dy != 0) { Lh += abs(k0_get_input_s(smem, smemptc, sx+dx,sy+dy)-k0_get_input_s(smem, smemptc, sx,sy+dy)); } } } ev = max((float)Lv/(float)Lh,(float)Lh/(float)Lv); val = 0; if (ev > params.e_threshold) { // sharp block, insert previous g computation result if (Lh < Lv) { val = HORZ_EDGE_PRE_CALC; } else { val = VERT_EDGE_PRE_CALC; } } else { val = TEX_PRE_CALC; } } __syncthreads(); // make sure all threads done the calculation, because after this the shared memory content will be wiped out // put the result into shared memory. 2-way bank conflict //sx = (threadIdx.x) % 8 + (threadIdx.y / 8) * 8; //sy = 2 * (threadIdx.y % 8) + (threadIdx.x / 8); sx = (threadIdx.x) % (blockDim.x / 2) + (threadIdx.y / (blockDim.y / 2)) * (blockDim.x / 2); sy = threadIdx.x / (blockDim.x / 2) + 2 * (threadIdx.y % (blockDim.y / 2)); ((int*)smem)[sy * (stgptc / sizeof(int)) + sx] = val; __syncthreads(); // put the result in shared memory into global memory tx = blockDim.x * blockIdx.x + threadIdx.x; ty = blockDim.y * blockIdx.y + threadIdx.y; if (tx < (params.width / 2) && ty < params.height) { e[ty * (eptc / sizeof(int)) + tx] = ((int*)smem)[threadIdx.y * (stgptc / sizeof(int)) + threadIdx.x]; } } __global__ void test_e_k(float* outr, size_t outrptc, int* e, size_t eptc, vcd_params params, int filter) { int sx, sy, tx, ty, cx, cy, color = 0; // source address sx = blockDim.x * blockIdx.x + threadIdx.x; sy = blockDim.y * blockIdx.y + threadIdx.y; // target address tx = 2 * sx; ty = sy; // color address cx = params.x + tx; cy = params.y + ty; // adjust source and target position if it falls into red/blue pixel color = fc(filter, cx, cy); if (color == COLOR_GREEN_RED) { tx += correct_x(params, COLOR_RED, color); //tx += params.grtor; } else if (color == COLOR_GREEN_BLUE) { tx += correct_x(params, COLOR_BLUE, color); //tx += params.gbtob; } if (sx < (params.width / 2) && sy < params.height) { // copy from input to output int val = e[sy * (eptc / sizeof(int)) + sx]; if (val == HORZ_EDGE_PRE_CALC || val == VERT_EDGE_PRE_CALC) outr[ty * (outrptc / sizeof(unsigned int)) + tx] = 8191.0f; } } void calculate_e(int* e, size_t eptc, float* input, size_t inputptc, vcd_params params, int filter, hipDeviceProp_t prop, vcd_sf_mp_kernel_times* times) { int maxrad = max(params.window_radius + params.temp_green_radius, params.e_radius); float kerntime = 0.0f; hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); dim3 dimBlock; dim3 dimGrid; int banks = 0; if (prop.major == 2) { dimBlock.x = 32; banks = 32; } else { dimBlock.x = 16; banks = 16; } dimBlock.y = 16; dimGrid.x = (int)ceil((float)(params.width / 2) / (float)dimBlock.x); dimGrid.y = (int)ceil((float)params.height / (float)dimBlock.y); size_t smemptc, stgptc = 0; size_t smemsz= 0; int mod = 0; smemptc = get_smem_pitch(((params.e_radius + maxrad + 2 * dimBlock.x) * sizeof(float))); stgptc = (dimBlock.x + banks / 2) * sizeof(float); smemsz = smemptc * (2 * params.e_radius + dimBlock.y); hipEventRecord(start, 0); hipLaunchKernelGGL(( calculate_e_k), dim3(dimGrid), dim3(dimBlock), smemsz, 0, e, eptc, input, inputptc, smemptc, stgptc, params, filter); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&kerntime, start, stop); times->calc_e = kerntime; hipEventDestroy(start); hipEventDestroy(stop); } __global__ void test_temp_green_k(float* outg, size_t outg_ptc, const float* tempg, size_t tempg_ptc, const float* input, size_t input_ptc, vcd_params params, int filter) { // first, copy from input to output int sx, sy, tx, ty, cx, cy, color = 0; int maxrad = max(params.window_radius + params.temp_green_radius, params.e_radius); // source address sx = 2 * (blockDim.x * blockIdx.x + threadIdx.x) + maxrad; sy = blockDim.y * blockIdx.y + threadIdx.y + maxrad; // target address tx = sx - maxrad; ty = sy - maxrad; // color address cx = params.x + tx; cy = params.y + ty; // adjust source and target position if it falls into red/blue pixel color = fc(filter, cx, cy); if (color == COLOR_RED) { sx -= params.grtor; tx -= params.grtor; } else if (color == COLOR_BLUE) { sx += params.gbtob; tx += params.gbtob; } if (tx < params.width && ty < params.height) { // copy from input to output outg[ty * (outg_ptc / sizeof(unsigned int)) + tx] = (unsigned int)input[sy * (input_ptc / sizeof(float)) + sx]; } // second, copy from temp green to output // source address. Dont forget to offset it by the size of window radius sx = blockDim.x * blockIdx.x + threadIdx.x; sy = blockDim.y * blockIdx.y + threadIdx.y; // target address tx = 2 * (blockDim.x * blockIdx.x + threadIdx.x); ty = blockDim.y * blockIdx.y + threadIdx.y; cx = params.x + tx; cy = params.y + ty; color = fc(filter, cx, cy); if (color == COLOR_GREEN_RED) { tx += params.grtor; } else if (color == COLOR_GREEN_BLUE) { tx += params.gbtob; } if (tx < params.width && ty < params.height) { // copy from temp green to output outg[ty * (outg_ptc / sizeof(unsigned int)) + tx] = (unsigned int)tempg[sy * (tempg_ptc / sizeof(float)) + sx]; } } void test_temp_green(float* outg, size_t outg_ptc, const float* tempg, size_t tempg_ptc, const float* input, size_t input_ptc, vcd_params params, int filter, hipDeviceProp_t prop) { dim3 dimBlock; dim3 dimGrid; if (prop.major == 2) dimBlock.x = 32; else // prop.major = 1.x dimBlock.x = 16; dimBlock.y = 16; dimGrid.x = (int)ceil((float)params.width / (float)dimBlock.x); dimGrid.y = (int)ceil((float)params.height / (float)dimBlock.y); hipLaunchKernelGGL(( test_temp_green_k), dim3(dimGrid), dim3(dimBlock), 0, 0, outg, outg_ptc, tempg, tempg_ptc, input, input_ptc, params, filter); } void test_e(float* outr, size_t outrptc, int* e, size_t eptc, vcd_params params, int filter, hipDeviceProp_t prop) { dim3 dimBlock; dim3 dimGrid; if (prop.major == 2) dimBlock.x = 32; else // prop.major = 1.x dimBlock.x = 16; dimBlock.y = 16; dimGrid.x = (int)ceil((float)(params.width / 2) / (float)dimBlock.x); dimGrid.y = (int)ceil((float)params.height / (float)dimBlock.y); hipLaunchKernelGGL(( test_e_k), dim3(dimGrid), dim3(dimBlock), 0, 0, outr, outrptc, e, eptc, params, filter); }
2f20109ed38bea9fb86757d407ccaa33f012346d.cu
#include "gpu_common.cuh" #include "gpu_vcd_common.cuh" #include "gpu_vcd_sf_mp_k0.cuh" __device__ float k0_get_input_g(float* input_g, size_t input_g_pitch, int x, int y) { return *((float*)((char*)&input_g[0] + input_g_pitch * y) + x); } // set an element to green __device__ void set_tempGreen(float* tempGreen_g, size_t tempGreen_g_pitch, int x, int y, float value) { *((float*)((char*)&tempGreen_g[0] + tempGreen_g_pitch * y) + x) = value; } __device__ void set_e(int* eValues_g, size_t eValues_g_pitch, int x, int y, int value) { *((int*)((char*)&eValues_g[0] + eValues_g_pitch * y) + x) = value; } // set an element to shared memory __device__ void k0_set_input_s(float* input_s, size_t tempGreen_s_pitch, int x, int y, float val) { input_s[y * (tempGreen_s_pitch / sizeof(float)) + x] = val; } __device__ void k0_set_input_s_i(int* input_s, size_t tempGreen_s_pitch, int x, int y, int val) { input_s[y * (tempGreen_s_pitch / sizeof(float)) + x] = val; } // get an element from shared memory __device__ float k0_get_input_s(float* input_s, size_t tempGreen_s_pitch, int x, int y) { return input_s[y * (tempGreen_s_pitch / sizeof(float)) + x]; } __device__ int k0_get_input_s_i(int* input_s, size_t tempGreen_s_pitch, int x, int y) { return input_s[y * (tempGreen_s_pitch / sizeof(float)) + x]; } __global__ void calculate_temp_green_k(float* input_g, size_t input_g_pitch, float* tempGreenV_g, float* tempGreenH_g, float* tempGreenD_g, size_t tempGreen_g_pitch, size_t tempGreen_s_pitch, size_t staging_pitch, vcd_params params, int filter) { extern __shared__ float input_s[]; int loopx, loopy = 0; int maxtr = max(params.window_radius + params.temp_green_radius, params.e_radius); // apron size, max transfer radius int tlsx, tlsy, tltx, tlty = 0; float valV, valH, valD = 0.0f; // load input from global memory to shared memory tlsx = 2 * blockDim.x * blockIdx.x + params.window_radius; tlsy = blockDim.y * blockIdx.y + params.window_radius; int bankblk = tlsx % 16; loopy = (int)ceil((float)(blockDim.y + 2 * params.temp_green_radius)/(blockDim.y)); if (bankblk == 0) loopx = (int)ceil((float)(2 * blockDim.x + 2 * params.temp_green_radius)/(blockDim.x)); else loopx = 1 + (int)ceil((float)(2 * blockDim.x + 2 * params.temp_green_radius - (16 - bankblk))/(blockDim.x)); for (int y = 0; y < loopy; y++) { for (int x = 0; x < loopx; x++) { int ntlsx = tlsx + x * blockDim.x + threadIdx.x - bankblk; // thread load source address, x int ntlsy = tlsy + y * blockDim.y + threadIdx.y; // thread load source address, y int endx = tlsx + 2 * blockDim.x + 2 * params.temp_green_radius; int endy = tlsy + blockDim.y + 2 * params.temp_green_radius; if (ntlsx < endx && ntlsy < endy && ntlsx >= tlsx && // load condition ntlsx < (params.width + 2 * maxtr) && ntlsy < (params.height + 2 * maxtr)) // border condition { tltx = x * blockDim.x + threadIdx.x - bankblk; tlty = y * blockDim.y + threadIdx.y; float elmt = k0_get_input_g(input_g, input_g_pitch, ntlsx, ntlsy); // get element from load source k0_set_input_s(input_s, tempGreen_s_pitch, tltx, tlty, elmt); } } } // make sure all elements are loaded __syncthreads(); tlsx = 2 * (threadIdx.x % (blockDim.x / 2)) + (threadIdx.y / (blockDim.y / 2)) * blockDim.x + params.temp_green_radius; // x address for loading from input tlsy = threadIdx.x / (blockDim.x / 2) + 2 * (threadIdx.y % (blockDim.y / 2)) + params.temp_green_radius; // y address for loading from input tltx = params.x + 2 * blockDim.x * blockIdx.x + tlsx; tlty = params.y + blockDim.y * blockIdx.y + tlsy; int col = fc(filter, tltx, tlty); if (col == COLOR_GREEN_RED) { tlsx += params.grtor; tltx += params.grtor; } else if (col == COLOR_GREEN_BLUE) { tlsx += params.gbtob; tltx += params.gbtob; } // SHARED MEMORY VERSION // calculate vertical green if (tltx < (params.x + params.width + params.window_radius) && tlty < (params.y + params.height + params.window_radius)) { valV = (k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy-1) + k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy+1)) / 2.0f + (2.0f * k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy-2) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy+2)) / 4.0f; // calculate horizontal green valH = (k0_get_input_s(input_s, tempGreen_s_pitch, tlsx-1, tlsy) + k0_get_input_s(input_s, tempGreen_s_pitch, tlsx+1, tlsy)) / 2.0f + (2.0f * k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx-2, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx+2, tlsy)) / 4.0f; // calculate diagonal green valD = (k0_get_input_s(input_s, tempGreen_s_pitch, tlsx-1, tlsy) + k0_get_input_s(input_s, tempGreen_s_pitch, tlsx+1, tlsy) + k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy-1) + k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy+1)) / 4.0f + (4.0f * k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx-2, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx+2, tlsy) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy-2) - k0_get_input_s(input_s, tempGreen_s_pitch, tlsx, tlsy+2)) / 8.0f; } __syncthreads(); // once computation result is saved in register, we must put them back in shared memory, to ensure coalesced store // we will try to achieve 64-bit coalesced store for CC 1.0 and 1.1, so the trade-off will be 2-way bank conflict // address to put and then read the computation result into shared memory //tlsx = (threadIdx.x) % 8 + (threadIdx.y / 8) * 8; //tlsy = threadIdx.x / 8 + 2 * (threadIdx.y % 8); tlsx = (threadIdx.x) % (blockDim.x / 2) + (threadIdx.y / (blockDim.y / 2)) * (blockDim.x / 2); tlsy = threadIdx.x / (blockDim.x / 2) + 2 * (threadIdx.y % (blockDim.y / 2)); // address to write into global memory tltx = blockDim.x * blockIdx.x + threadIdx.x; tlty = blockDim.y * blockIdx.y + threadIdx.y; if (tltx < (params.width + params.window_radius) / 2 && tlty < (params.height + params.window_radius)) { // put valV into shared memory k0_set_input_s(input_s, staging_pitch, tlsx, tlsy, valV); __syncthreads(); set_tempGreen(tempGreenV_g, tempGreen_g_pitch, tltx, tlty, k0_get_input_s(input_s, staging_pitch, threadIdx.x, threadIdx.y)); // put valH itu shared memory k0_set_input_s(input_s, staging_pitch, tlsx, tlsy, valH); __syncthreads(); set_tempGreen(tempGreenH_g, tempGreen_g_pitch, tltx, tlty, k0_get_input_s(input_s, staging_pitch, threadIdx.x, threadIdx.y)); // put valD into shared memory k0_set_input_s(input_s, staging_pitch, tlsx, tlsy, valD); __syncthreads(); set_tempGreen(tempGreenD_g, tempGreen_g_pitch, tltx, tlty, k0_get_input_s(input_s, staging_pitch, threadIdx.x, threadIdx.y)); } } void calculate_temp_green(float* input_g, size_t input_g_pitch, float* tempGreenV_g, float* tempGreenH_g, float* tempGreenD_g, size_t tempGreen_g_pitch, vcd_params params, int filter, cudaDeviceProp prop, vcd_sf_mp_kernel_times* times) { dim3 dimBlock, dimGrid; size_t smemPitch, stagingPitch, smemSize; int mod = 0; int banks = 0; float kerntime = 0.0f; cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); if (prop.major == 2) { dimBlock.x = 32; banks = 32; } else // prop.major = 1.x { dimBlock.x = 16; banks = 16; } dimBlock.y = 16; dimGrid.x = (int) ceil((float)((params.width + params.window_radius) / 2) / (float)dimBlock.x); dimGrid.y = (int) ceil((float)((params.height + params.window_radius) / (float)dimBlock.y)); smemPitch = ((2 * dimBlock.x + 2 * params.temp_green_radius) * sizeof(float)); // assuming the size of element is float stagingPitch = (dimBlock.x + banks / 2) * sizeof(float); smemSize = smemPitch * (dimBlock.y + 2 * params.temp_green_radius); cudaEventRecord(start,0); calculate_temp_green_k<<<dimGrid,dimBlock,smemSize>>>(input_g, input_g_pitch, tempGreenV_g, tempGreenH_g, tempGreenD_g, tempGreen_g_pitch, smemPitch, stagingPitch, params, filter); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&kerntime, start, stop); times->calc_temp_green = kerntime; cudaEventDestroy(start); cudaEventDestroy(stop); } __global__ void calculate_e_k(int* e, size_t eptc, float* input, size_t inputptc, size_t smemptc, size_t stgptc, vcd_params params, int filter) { // load from input to shared memory. However, the position is not aligned for coalescing. // so, we must calculate the offset to achieve coalescing int sx, sy, tx, ty, cx, cy, color, val = 0; float ev = 0.0f; int maxrad = max(params.window_radius + params.temp_green_radius, params.e_radius); int offsety = maxrad > params.e_radius? maxrad - params.e_radius : 0; extern __shared__ float smem[]; // source address sx = 2 * blockDim.x * blockIdx.x; sy = blockDim.y * blockIdx.y + offsety; int loopx = (int)ceil((float)(2 * blockDim.x + params.e_radius + maxrad) / (float)(blockDim.x)); int loopy = (int)ceil((float)(blockDim.y + 2 * params.e_radius) / (float)(blockDim.y)); for (int y = 0; y < loopy; y++) { for (int x = 0; x < loopx; x++) { int nsx = sx + x * blockDim.x + threadIdx.x; int nsy = sy + y * blockDim.y + threadIdx.y; int endx = sx + 2 * blockDim.x + params.e_radius + maxrad; int endy = sy + blockDim.y + 2 * params.e_radius; if (nsx < endx && nsy < endy && nsx < (params.width + 2 * maxrad) && nsy < (params.height + 2 * maxrad)) { tx = x * blockDim.x + threadIdx.x; ty = y * blockDim.y + threadIdx.y; smem[ty * (smemptc / sizeof(float)) + tx] = input[nsy * (inputptc / sizeof(float)) + nsx]; } } } //sx = 2 * (threadIdx.x % 8) + (threadIdx.y / 8) * 16 + maxrad; // x address for loading from input //sy = threadIdx.x / 8 + 2 * (threadIdx.y % 8) + params.e_radius; // y address for loading from input sx = 2 * (threadIdx.x % (blockDim.x / 2)) + (threadIdx.y / (blockDim.y / 2)) * blockDim.x + maxrad; // x address for loading from input sy = threadIdx.x / (blockDim.x / 2) + 2 * (threadIdx.y % (blockDim.y / 2)) + params.e_radius; // y address for loading from input cx = params.x + 2 * blockDim.x * blockIdx.x + sx; cy = params.y + blockDim.y * blockIdx.y + sy; color = fc(filter, cx, cy); if (color == COLOR_GREEN_RED) { sx += params.grtor; cx += params.grtor; } else if (color == COLOR_GREEN_BLUE) { sx += params.gbtob; cx += params.gbtob; } float Lh, Lv = 0.0f; if (cx < (params.x + params.width) && cy < (params.y + params.height)) { for (int dx = -2; dx < 3; dx++) { for (int dy = -2; dy < 3; dy++) { /*if (dx != 0 || dy != 0) { Lh += abs(k0_get_input_s(smem, smemptc, sx+dx,sy+dy)-k0_get_input_s(smem, smemptc, sx,sy+dy)); Lv += abs(k0_get_input_s(smem, smemptc, sx+dx,sy+dy)-k0_get_input_s(smem, smemptc, sx+dx,sy)); }*/ if (dx != 0) { Lv += abs(k0_get_input_s(smem, smemptc, sx+dx,sy+dy)-k0_get_input_s(smem, smemptc, sx+dx,sy)); } if (dy != 0) { Lh += abs(k0_get_input_s(smem, smemptc, sx+dx,sy+dy)-k0_get_input_s(smem, smemptc, sx,sy+dy)); } } } ev = max((float)Lv/(float)Lh,(float)Lh/(float)Lv); val = 0; if (ev > params.e_threshold) { // sharp block, insert previous g computation result if (Lh < Lv) { val = HORZ_EDGE_PRE_CALC; } else { val = VERT_EDGE_PRE_CALC; } } else { val = TEX_PRE_CALC; } } __syncthreads(); // make sure all threads done the calculation, because after this the shared memory content will be wiped out // put the result into shared memory. 2-way bank conflict //sx = (threadIdx.x) % 8 + (threadIdx.y / 8) * 8; //sy = 2 * (threadIdx.y % 8) + (threadIdx.x / 8); sx = (threadIdx.x) % (blockDim.x / 2) + (threadIdx.y / (blockDim.y / 2)) * (blockDim.x / 2); sy = threadIdx.x / (blockDim.x / 2) + 2 * (threadIdx.y % (blockDim.y / 2)); ((int*)smem)[sy * (stgptc / sizeof(int)) + sx] = val; __syncthreads(); // put the result in shared memory into global memory tx = blockDim.x * blockIdx.x + threadIdx.x; ty = blockDim.y * blockIdx.y + threadIdx.y; if (tx < (params.width / 2) && ty < params.height) { e[ty * (eptc / sizeof(int)) + tx] = ((int*)smem)[threadIdx.y * (stgptc / sizeof(int)) + threadIdx.x]; } } __global__ void test_e_k(float* outr, size_t outrptc, int* e, size_t eptc, vcd_params params, int filter) { int sx, sy, tx, ty, cx, cy, color = 0; // source address sx = blockDim.x * blockIdx.x + threadIdx.x; sy = blockDim.y * blockIdx.y + threadIdx.y; // target address tx = 2 * sx; ty = sy; // color address cx = params.x + tx; cy = params.y + ty; // adjust source and target position if it falls into red/blue pixel color = fc(filter, cx, cy); if (color == COLOR_GREEN_RED) { tx += correct_x(params, COLOR_RED, color); //tx += params.grtor; } else if (color == COLOR_GREEN_BLUE) { tx += correct_x(params, COLOR_BLUE, color); //tx += params.gbtob; } if (sx < (params.width / 2) && sy < params.height) { // copy from input to output int val = e[sy * (eptc / sizeof(int)) + sx]; if (val == HORZ_EDGE_PRE_CALC || val == VERT_EDGE_PRE_CALC) outr[ty * (outrptc / sizeof(unsigned int)) + tx] = 8191.0f; } } void calculate_e(int* e, size_t eptc, float* input, size_t inputptc, vcd_params params, int filter, cudaDeviceProp prop, vcd_sf_mp_kernel_times* times) { int maxrad = max(params.window_radius + params.temp_green_radius, params.e_radius); float kerntime = 0.0f; cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 dimBlock; dim3 dimGrid; int banks = 0; if (prop.major == 2) { dimBlock.x = 32; banks = 32; } else { dimBlock.x = 16; banks = 16; } dimBlock.y = 16; dimGrid.x = (int)ceil((float)(params.width / 2) / (float)dimBlock.x); dimGrid.y = (int)ceil((float)params.height / (float)dimBlock.y); size_t smemptc, stgptc = 0; size_t smemsz= 0; int mod = 0; smemptc = get_smem_pitch(((params.e_radius + maxrad + 2 * dimBlock.x) * sizeof(float))); stgptc = (dimBlock.x + banks / 2) * sizeof(float); smemsz = smemptc * (2 * params.e_radius + dimBlock.y); cudaEventRecord(start, 0); calculate_e_k<<<dimGrid, dimBlock, smemsz>>>(e, eptc, input, inputptc, smemptc, stgptc, params, filter); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&kerntime, start, stop); times->calc_e = kerntime; cudaEventDestroy(start); cudaEventDestroy(stop); } __global__ void test_temp_green_k(float* outg, size_t outg_ptc, const float* tempg, size_t tempg_ptc, const float* input, size_t input_ptc, vcd_params params, int filter) { // first, copy from input to output int sx, sy, tx, ty, cx, cy, color = 0; int maxrad = max(params.window_radius + params.temp_green_radius, params.e_radius); // source address sx = 2 * (blockDim.x * blockIdx.x + threadIdx.x) + maxrad; sy = blockDim.y * blockIdx.y + threadIdx.y + maxrad; // target address tx = sx - maxrad; ty = sy - maxrad; // color address cx = params.x + tx; cy = params.y + ty; // adjust source and target position if it falls into red/blue pixel color = fc(filter, cx, cy); if (color == COLOR_RED) { sx -= params.grtor; tx -= params.grtor; } else if (color == COLOR_BLUE) { sx += params.gbtob; tx += params.gbtob; } if (tx < params.width && ty < params.height) { // copy from input to output outg[ty * (outg_ptc / sizeof(unsigned int)) + tx] = (unsigned int)input[sy * (input_ptc / sizeof(float)) + sx]; } // second, copy from temp green to output // source address. Dont forget to offset it by the size of window radius sx = blockDim.x * blockIdx.x + threadIdx.x; sy = blockDim.y * blockIdx.y + threadIdx.y; // target address tx = 2 * (blockDim.x * blockIdx.x + threadIdx.x); ty = blockDim.y * blockIdx.y + threadIdx.y; cx = params.x + tx; cy = params.y + ty; color = fc(filter, cx, cy); if (color == COLOR_GREEN_RED) { tx += params.grtor; } else if (color == COLOR_GREEN_BLUE) { tx += params.gbtob; } if (tx < params.width && ty < params.height) { // copy from temp green to output outg[ty * (outg_ptc / sizeof(unsigned int)) + tx] = (unsigned int)tempg[sy * (tempg_ptc / sizeof(float)) + sx]; } } void test_temp_green(float* outg, size_t outg_ptc, const float* tempg, size_t tempg_ptc, const float* input, size_t input_ptc, vcd_params params, int filter, cudaDeviceProp prop) { dim3 dimBlock; dim3 dimGrid; if (prop.major == 2) dimBlock.x = 32; else // prop.major = 1.x dimBlock.x = 16; dimBlock.y = 16; dimGrid.x = (int)ceil((float)params.width / (float)dimBlock.x); dimGrid.y = (int)ceil((float)params.height / (float)dimBlock.y); test_temp_green_k<<<dimGrid, dimBlock>>>(outg, outg_ptc, tempg, tempg_ptc, input, input_ptc, params, filter); } void test_e(float* outr, size_t outrptc, int* e, size_t eptc, vcd_params params, int filter, cudaDeviceProp prop) { dim3 dimBlock; dim3 dimGrid; if (prop.major == 2) dimBlock.x = 32; else // prop.major = 1.x dimBlock.x = 16; dimBlock.y = 16; dimGrid.x = (int)ceil((float)(params.width / 2) / (float)dimBlock.x); dimGrid.y = (int)ceil((float)params.height / (float)dimBlock.y); test_e_k<<<dimGrid, dimBlock>>>(outr, outrptc, e, eptc, params, filter); }
8cb5fcfa18421da1f28709f032e33a91f159118e.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/gather.h> #include <thrust/scan.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <chrono> #include <iostream> #include <iomanip> #include <string> #include <vector> #include "../pml/measurement.h" #include "../pml/csvwriter.h" // This example computes a summed area table using segmented scan // http://en.wikipedia.org/wiki/Summed_area_table // convert a linear index to a linear index in the transpose struct transpose_index : public thrust::unary_function<size_t,size_t> { size_t m, n; __host__ __device__ transpose_index(size_t _m, size_t _n) : m(_m), n(_n) {} __host__ __device__ size_t operator()(size_t linear_index) { size_t i = linear_index / n; size_t j = linear_index % n; return m * j + i; } }; // convert a linear index to a row index struct row_index : public thrust::unary_function<size_t,size_t> { size_t n; __host__ __device__ row_index(size_t _n) : n(_n) {} __host__ __device__ size_t operator()(size_t i) { return i / n; } }; // transpose an M-by-N array template <typename T> void transpose(size_t m, size_t n, thrust::device_vector<T>& src, thrust::device_vector<T>& dst) { thrust::counting_iterator<size_t> indices(0); thrust::gather (thrust::make_transform_iterator(indices, transpose_index(n, m)), thrust::make_transform_iterator(indices, transpose_index(n, m)) + dst.size(), src.begin(), dst.begin()); } // scan the rows of an M-by-N array template <typename T> void scan_horizontally(size_t n, thrust::device_vector<T>& d_data) { thrust::counting_iterator<size_t> indices(0); thrust::inclusive_scan_by_key (thrust::make_transform_iterator(indices, row_index(n)), thrust::make_transform_iterator(indices, row_index(n)) + d_data.size(), d_data.begin(), d_data.begin()); } void scan_old(size_t m, size_t n, thrust::device_vector<int>& data) { // [step 1] scan horizontally scan_horizontally(n, data); // [step 2] transpose array thrust::device_vector<int> temp(m * n); transpose(m, n, data, temp); // [step 3] scan transpose horizontally scan_horizontally(m, temp); // [step 4] transpose the transpose transpose(n, m, temp, data); } // scan the rows of an M-by-N array template <typename T> void scan_vertically(size_t m, size_t n, thrust::device_vector<T>& d_data) { thrust::counting_iterator<size_t> indices(0); auto mapBegin = thrust::make_transform_iterator(indices, transpose_index(n,m)); thrust::inclusive_scan_by_key (thrust::make_transform_iterator(indices, row_index(m)), thrust::make_transform_iterator(indices, row_index(m)) + d_data.size(), thrust::make_permutation_iterator(d_data.begin(), mapBegin), thrust::make_permutation_iterator(d_data.begin(), mapBegin)); } void scan_new(size_t m, size_t n, thrust::device_vector<int>& data) { // [step 1] scan horizontally scan_horizontally(n, data); // [step 2] scan vertically scan_vertically(m, n, data); } // print an M-by-N array template <typename T> void print(size_t m, size_t n, thrust::device_vector<T>& d_data) { thrust::host_vector<T> h_data = d_data; for(size_t i = 0; i < m; i++) { for(size_t j = 0; j < n; j++) std::cout << std::setw(8) << h_data[i * n + j] << " "; std::cout << "\n"; } } int main(void) { int iterations = 10; std::vector<size_t> mVec; std::vector<size_t> nVec; std::vector<MeasurementSeries<std::chrono::microseconds>> oldTimes; std::vector<MeasurementSeries<std::chrono::microseconds>> newTimes; // first run size_t m = 1000; // number of rows mVec.push_back(m); size_t n = 1000; // number of columns nVec.push_back(n); MeasurementSeries<std::chrono::microseconds> oldMeasurement; for (int i = 0; i < iterations; ++i) { thrust::device_vector<int> data(m * n, 1); hipDeviceSynchronize(); oldMeasurement.start(); scan_old(m, n, data); hipDeviceSynchronize(); oldMeasurement.stop(); } oldTimes.push_back(oldMeasurement); MeasurementSeries<std::chrono::microseconds> newMeasurement; for (int i = 0; i < iterations; ++i) { thrust::device_vector<int> data(m * n, 1); hipDeviceSynchronize(); newMeasurement.start(); scan_new(m, n, data); hipDeviceSynchronize(); newMeasurement.stop(); } newTimes.push_back(newMeasurement); // sanity check thrust::device_vector<int> data_old(m * n, 1); thrust::device_vector<int> data_new(m * n, 1); scan_old(m, n, data_old); scan_new(m, n, data_new); if (data_old != data_new) { std::cout << "wrong result" << std::endl; } // second run m = 100000; // number of rows mVec.push_back(m); n = 10; // number of columns nVec.push_back(n); MeasurementSeries<std::chrono::microseconds> oldMeasurement2; for (int i = 0; i < iterations; ++i) { thrust::device_vector<int> data(m * n, 1); hipDeviceSynchronize(); oldMeasurement2.start(); scan_old(m, n, data); hipDeviceSynchronize(); oldMeasurement2.stop(); } oldTimes.push_back(oldMeasurement2); MeasurementSeries<std::chrono::microseconds> newMeasurement2; for (int i = 0; i < iterations; ++i) { thrust::device_vector<int> data(m * n, 1); hipDeviceSynchronize(); newMeasurement2.start(); scan_new(m, n, data); hipDeviceSynchronize(); newMeasurement2.stop(); } newTimes.push_back(newMeasurement2); // sanity check thrust::device_vector<int> data_old2(m * n, 1); thrust::device_vector<int> data_new2(m * n, 1); scan_old(m, n, data_old2); scan_new(m, n, data_new2); if (data_old2 != data_new2) { std::cout << "wrong result" << std::endl; } CSVWriter csvwriter("summed_area_table.csv"); csvwriter.setHeaderNames( {"m", "n", "old times", "new times"} ); csvwriter.write(mVec, nVec, oldTimes, newTimes); return 0; }
8cb5fcfa18421da1f28709f032e33a91f159118e.cu
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/gather.h> #include <thrust/scan.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <chrono> #include <iostream> #include <iomanip> #include <string> #include <vector> #include "../pml/measurement.h" #include "../pml/csvwriter.h" // This example computes a summed area table using segmented scan // http://en.wikipedia.org/wiki/Summed_area_table // convert a linear index to a linear index in the transpose struct transpose_index : public thrust::unary_function<size_t,size_t> { size_t m, n; __host__ __device__ transpose_index(size_t _m, size_t _n) : m(_m), n(_n) {} __host__ __device__ size_t operator()(size_t linear_index) { size_t i = linear_index / n; size_t j = linear_index % n; return m * j + i; } }; // convert a linear index to a row index struct row_index : public thrust::unary_function<size_t,size_t> { size_t n; __host__ __device__ row_index(size_t _n) : n(_n) {} __host__ __device__ size_t operator()(size_t i) { return i / n; } }; // transpose an M-by-N array template <typename T> void transpose(size_t m, size_t n, thrust::device_vector<T>& src, thrust::device_vector<T>& dst) { thrust::counting_iterator<size_t> indices(0); thrust::gather (thrust::make_transform_iterator(indices, transpose_index(n, m)), thrust::make_transform_iterator(indices, transpose_index(n, m)) + dst.size(), src.begin(), dst.begin()); } // scan the rows of an M-by-N array template <typename T> void scan_horizontally(size_t n, thrust::device_vector<T>& d_data) { thrust::counting_iterator<size_t> indices(0); thrust::inclusive_scan_by_key (thrust::make_transform_iterator(indices, row_index(n)), thrust::make_transform_iterator(indices, row_index(n)) + d_data.size(), d_data.begin(), d_data.begin()); } void scan_old(size_t m, size_t n, thrust::device_vector<int>& data) { // [step 1] scan horizontally scan_horizontally(n, data); // [step 2] transpose array thrust::device_vector<int> temp(m * n); transpose(m, n, data, temp); // [step 3] scan transpose horizontally scan_horizontally(m, temp); // [step 4] transpose the transpose transpose(n, m, temp, data); } // scan the rows of an M-by-N array template <typename T> void scan_vertically(size_t m, size_t n, thrust::device_vector<T>& d_data) { thrust::counting_iterator<size_t> indices(0); auto mapBegin = thrust::make_transform_iterator(indices, transpose_index(n,m)); thrust::inclusive_scan_by_key (thrust::make_transform_iterator(indices, row_index(m)), thrust::make_transform_iterator(indices, row_index(m)) + d_data.size(), thrust::make_permutation_iterator(d_data.begin(), mapBegin), thrust::make_permutation_iterator(d_data.begin(), mapBegin)); } void scan_new(size_t m, size_t n, thrust::device_vector<int>& data) { // [step 1] scan horizontally scan_horizontally(n, data); // [step 2] scan vertically scan_vertically(m, n, data); } // print an M-by-N array template <typename T> void print(size_t m, size_t n, thrust::device_vector<T>& d_data) { thrust::host_vector<T> h_data = d_data; for(size_t i = 0; i < m; i++) { for(size_t j = 0; j < n; j++) std::cout << std::setw(8) << h_data[i * n + j] << " "; std::cout << "\n"; } } int main(void) { int iterations = 10; std::vector<size_t> mVec; std::vector<size_t> nVec; std::vector<MeasurementSeries<std::chrono::microseconds>> oldTimes; std::vector<MeasurementSeries<std::chrono::microseconds>> newTimes; // first run size_t m = 1000; // number of rows mVec.push_back(m); size_t n = 1000; // number of columns nVec.push_back(n); MeasurementSeries<std::chrono::microseconds> oldMeasurement; for (int i = 0; i < iterations; ++i) { thrust::device_vector<int> data(m * n, 1); cudaDeviceSynchronize(); oldMeasurement.start(); scan_old(m, n, data); cudaDeviceSynchronize(); oldMeasurement.stop(); } oldTimes.push_back(oldMeasurement); MeasurementSeries<std::chrono::microseconds> newMeasurement; for (int i = 0; i < iterations; ++i) { thrust::device_vector<int> data(m * n, 1); cudaDeviceSynchronize(); newMeasurement.start(); scan_new(m, n, data); cudaDeviceSynchronize(); newMeasurement.stop(); } newTimes.push_back(newMeasurement); // sanity check thrust::device_vector<int> data_old(m * n, 1); thrust::device_vector<int> data_new(m * n, 1); scan_old(m, n, data_old); scan_new(m, n, data_new); if (data_old != data_new) { std::cout << "wrong result" << std::endl; } // second run m = 100000; // number of rows mVec.push_back(m); n = 10; // number of columns nVec.push_back(n); MeasurementSeries<std::chrono::microseconds> oldMeasurement2; for (int i = 0; i < iterations; ++i) { thrust::device_vector<int> data(m * n, 1); cudaDeviceSynchronize(); oldMeasurement2.start(); scan_old(m, n, data); cudaDeviceSynchronize(); oldMeasurement2.stop(); } oldTimes.push_back(oldMeasurement2); MeasurementSeries<std::chrono::microseconds> newMeasurement2; for (int i = 0; i < iterations; ++i) { thrust::device_vector<int> data(m * n, 1); cudaDeviceSynchronize(); newMeasurement2.start(); scan_new(m, n, data); cudaDeviceSynchronize(); newMeasurement2.stop(); } newTimes.push_back(newMeasurement2); // sanity check thrust::device_vector<int> data_old2(m * n, 1); thrust::device_vector<int> data_new2(m * n, 1); scan_old(m, n, data_old2); scan_new(m, n, data_new2); if (data_old2 != data_new2) { std::cout << "wrong result" << std::endl; } CSVWriter csvwriter("summed_area_table.csv"); csvwriter.setHeaderNames( {"m", "n", "old times", "new times"} ); csvwriter.write(mVec, nVec, oldTimes, newTimes); return 0; }
14303790c3da89ef326d99df83b7b75f6e850134.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // REQUIRES: nvptx-registered-target // Make sure we don't allow dynamic initialization for device // variables, but accept empty constructors allowed by CUDA. // RUN: %clang_cc1 -verify %s -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 %s #ifdef __clang__ #include "Inputs/cuda.h" #endif // Use the types we share with CodeGen tests. #include "Inputs/cuda-initializers.h" __shared__ int s_v_i = 1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ int d_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ int s_v_f = f(); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ int c_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T s_t_i = {2}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ EC d_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ EC s_ec_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ EC d_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ EC s_ec_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ ETC d_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ ETC s_etc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ ETC d_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ ETC s_etc_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ UC d_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UC c_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ UD d_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UD s_ud; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UD c_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ ECI d_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ ECI s_eci; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ECI c_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NEC d_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NEC s_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NEC c_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NED d_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NED c_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NCV d_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NCV s_ncv; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCV c_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ VD d_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ VD s_vd; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ VD c_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NCF d_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NCF s_ncf; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCF c_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ UTC d_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UTC s_utc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ UTC d_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UTC s_utc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NETC d_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NETC s_netc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NETC d_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NETC s_netc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ EC_I_EC1 d_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ EC_I_EC1 s_ec_i_ec1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC_I_EC1 c_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_V_T d_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_V_T s_t_v_t; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_V_T c_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_B_NEC d_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_B_NEC s_t_b_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NEC c_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_F_NEC d_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_F_NEC s_t_f_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NEC c_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_FA_NEC d_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_FA_NEC s_t_fa_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NEC c_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_B_NED d_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_B_NED s_t_b_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NED c_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_F_NED d_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_F_NED s_t_f_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NED c_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_FA_NED d_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_FA_NED s_t_fa_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NED c_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} // Verify that only __shared__ local variables may be static on device // side and that they are not allowed to be initialized. __device__ void df_sema() { static __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __device__ int ds; // expected-error@-1 {{within a __device__ function, only __shared__ variables may be marked 'static'}} static __constant__ int dc; // expected-error@-1 {{within a __device__ function, only __shared__ variables may be marked 'static'}} static int v; // expected-error@-1 {{within a __device__ function, only __shared__ variables may be marked 'static'}} } __host__ __device__ void hd_sema() { static int x = 42; #ifdef __CUDA_ARCH__ // expected-error@-2 {{within a __host__ __device__ function, only __shared__ variables may be marked 'static'}} #endif } inline __host__ __device__ void hd_emitted_host_only() { static int x = 42; // no error on device because this is never codegen'ed there. } void call_hd_emitted_host_only() { hd_emitted_host_only(); }
14303790c3da89ef326d99df83b7b75f6e850134.cu
// REQUIRES: nvptx-registered-target // Make sure we don't allow dynamic initialization for device // variables, but accept empty constructors allowed by CUDA. // RUN: %clang_cc1 -verify %s -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 %s #ifdef __clang__ #include "Inputs/cuda.h" #endif // Use the types we share with CodeGen tests. #include "Inputs/cuda-initializers.h" __shared__ int s_v_i = 1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ int d_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ int s_v_f = f(); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ int c_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T s_t_i = {2}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ EC d_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ EC s_ec_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ EC d_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ EC s_ec_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ ETC d_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ ETC s_etc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ ETC d_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ ETC s_etc_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ UC d_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UC c_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ UD d_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UD s_ud; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UD c_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ ECI d_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ ECI s_eci; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ECI c_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NEC d_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NEC s_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NEC c_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NED d_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NED c_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NCV d_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NCV s_ncv; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCV c_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ VD d_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ VD s_vd; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ VD c_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NCF d_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NCF s_ncf; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCF c_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ UTC d_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UTC s_utc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ UTC d_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UTC s_utc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NETC d_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NETC s_netc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NETC d_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NETC s_netc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ EC_I_EC1 d_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ EC_I_EC1 s_ec_i_ec1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC_I_EC1 c_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_V_T d_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_V_T s_t_v_t; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_V_T c_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_B_NEC d_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_B_NEC s_t_b_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NEC c_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_F_NEC d_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_F_NEC s_t_f_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NEC c_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_FA_NEC d_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_FA_NEC s_t_fa_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NEC c_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_B_NED d_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_B_NED s_t_b_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NED c_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_F_NED d_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_F_NED s_t_f_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NED c_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_FA_NED d_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_FA_NED s_t_fa_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NED c_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} // Verify that only __shared__ local variables may be static on device // side and that they are not allowed to be initialized. __device__ void df_sema() { static __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __device__ int ds; // expected-error@-1 {{within a __device__ function, only __shared__ variables may be marked 'static'}} static __constant__ int dc; // expected-error@-1 {{within a __device__ function, only __shared__ variables may be marked 'static'}} static int v; // expected-error@-1 {{within a __device__ function, only __shared__ variables may be marked 'static'}} } __host__ __device__ void hd_sema() { static int x = 42; #ifdef __CUDA_ARCH__ // expected-error@-2 {{within a __host__ __device__ function, only __shared__ variables may be marked 'static'}} #endif } inline __host__ __device__ void hd_emitted_host_only() { static int x = 42; // no error on device because this is never codegen'ed there. } void call_hd_emitted_host_only() { hd_emitted_host_only(); }
cfc3cef8b01cdcf55719cf0c695dd8d9221c1f3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <staggered_oprod.h> #include <tune_quda.h> #include <quda_internal.h> #include <gauge_field_order.h> #include <quda_matrix.h> #include <dslash_quda.h> namespace quda { #ifdef GPU_STAGGERED_DIRAC namespace { // anonymous #include <texture.h> } enum KernelType {OPROD_INTERIOR_KERNEL, OPROD_EXTERIOR_KERNEL}; template<typename Float, typename Output, typename InputA, typename InputB> struct StaggeredOprodArg { unsigned int length; int X[4]; unsigned int parity; unsigned int dir; unsigned int ghostOffset[4]; unsigned int displacement; KernelType kernelType; int nFace; bool partitioned[4]; InputA inA; InputB inB; Output outA; Output outB; Float coeff[2]; StaggeredOprodArg(const unsigned int parity, const unsigned int dir, const unsigned int *ghostOffset, const unsigned int displacement, const KernelType& kernelType, const int nFace, const double coeff[2], InputA& inA, InputB& inB, Output& outA, Output& outB, GaugeField& meta) : length(meta.VolumeCB()), parity(parity), dir(dir), displacement(displacement), kernelType(kernelType), nFace(nFace), inA(inA), inB(inB), outA(outA), outB(outB) { this->coeff[0] = coeff[0]; this->coeff[1] = coeff[1]; for(int i=0; i<4; ++i) this->X[i] = meta.X()[i]; for(int i=0; i<4; ++i) this->ghostOffset[i] = ghostOffset[i]; for(int i=0; i<4; ++i) this->partitioned[i] = commDimPartitioned(i) ? true : false; } }; enum IndexType { EVEN_X = 0, EVEN_Y = 1, EVEN_Z = 2, EVEN_T = 3 }; template <IndexType idxType> static __device__ __forceinline__ void coordsFromIndex(int& idx, int c[4], const unsigned int cb_idx, const unsigned int parity, const int X[4]) { const int &LX = X[0]; const int &LY = X[1]; const int &LZ = X[2]; const int XYZ = X[2]*X[1]*X[0]; const int XY = X[1]*X[0]; idx = 2*cb_idx; int x, y, z, t; if (idxType == EVEN_X /*!(LX & 1)*/) { // X even // t = idx / XYZ; // z = (idx / XY) % Z; // y = (idx / X) % Y; // idx += (parity + t + z + y) & 1; // x = idx % X; // equivalent to the above, but with fewer divisions/mods: int aux1 = idx / LX; x = idx - aux1 * LX; int aux2 = aux1 / LY; y = aux1 - aux2 * LY; t = aux2 / LZ; z = aux2 - t * LZ; aux1 = (parity + t + z + y) & 1; x += aux1; idx += aux1; } else if (idxType == EVEN_Y /*!(LY & 1)*/) { // Y even t = idx / XYZ; z = (idx / XY) % LZ; idx += (parity + t + z) & 1; y = (idx / LX) % LY; x = idx % LX; } else if (idxType == EVEN_Z /*!(LZ & 1)*/) { // Z even t = idx / XYZ; idx += (parity + t) & 1; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } else { idx += parity; t = idx / XYZ; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } c[0] = x; c[1] = y; c[2] = z; c[3] = t; } // Get the coordinates for the exterior kernels __device__ static void coordsFromIndex(int x[4], const unsigned int cb_idx, const int X[4], const unsigned int dir, const int displacement, const unsigned int parity) { int Xh[2] = {X[0]/2, X[1]/2}; switch(dir){ case 0: x[2] = cb_idx/Xh[1] % X[2]; x[3] = cb_idx/(Xh[1]*X[2]) % X[3]; x[0] = cb_idx/(Xh[1]*X[2]*X[3]); x[0] += (X[0] - displacement); x[1] = 2*(cb_idx % Xh[1]) + ((x[0]+x[2]+x[3]+parity)&1); break; case 1: x[2] = cb_idx/Xh[0] % X[2]; x[3] = cb_idx/(Xh[0]*X[2]) % X[3]; x[1] = cb_idx/(Xh[0]*X[2]*X[3]); x[1] += (X[1] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 2: x[1] = cb_idx/Xh[0] % X[1]; x[3] = cb_idx/(Xh[0]*X[1]) % X[3]; x[2] = cb_idx/(Xh[0]*X[1]*X[3]); x[2] += (X[2] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 3: x[1] = cb_idx/Xh[0] % X[1]; x[2] = cb_idx/(Xh[0]*X[1]) % X[2]; x[3] = cb_idx/(Xh[0]*X[1]*X[2]); x[3] += (X[3] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; } return; } __device__ __forceinline__ int neighborIndex(const unsigned int cb_idx, const int shift[4], const bool partitioned[4], const unsigned int parity, const int X[4]){ int full_idx; int x[4]; coordsFromIndex<EVEN_X>(full_idx, x, cb_idx, parity, X); for(int dim = 0; dim<4; ++dim){ if( partitioned[dim] ) if( (x[dim]+shift[dim])<0 || (x[dim]+shift[dim])>=X[dim]) return -1; } for(int dim=0; dim<4; ++dim){ x[dim] = shift[dim] ? (x[dim]+shift[dim] + X[dim]) % X[dim] : x[dim]; } return (((x[3]*X[2] + x[2])*X[1] + x[1])*X[0] + x[0]) >> 1; } template<typename real, typename Output, typename InputA, typename InputB> __global__ void interiorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int gridSize = gridDim.x*blockDim.x; typedef complex<real> Complex; Complex x[3]; Complex y[3]; Complex z[3]; Matrix<Complex,3> result; Matrix<Complex,3> tempA, tempB; // input while(idx<arg.length){ arg.inA.load(x, idx); #pragma unroll for(int dim=0; dim<4; ++dim){ int shift[4] = {0,0,0,0}; shift[dim] = 1; const int first_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X); if(first_nbr_idx >= 0){ arg.inB.load(y, first_nbr_idx); outerProd(y,x,&result); arg.outA.load(reinterpret_cast<real*>(tempA.data), idx, dim, arg.parity); result = tempA + result*arg.coeff[0]; arg.outA.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity); if (arg.nFace == 3) { shift[dim] = 3; const int third_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X); if(third_nbr_idx >= 0){ arg.inB.load(z, third_nbr_idx); outerProd(z, x, &result); arg.outB.load(reinterpret_cast<real*>(tempB.data), idx, dim, arg.parity); result = tempB + result*arg.coeff[1]; arg.outB.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity); } } } } // dim idx += gridSize; } return; } // interiorOprodKernel template<int dim, typename real, typename Output, typename InputA, typename InputB> __global__ void exteriorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg) { typedef complex<real> Complex; unsigned int cb_idx = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int gridSize = gridDim.x*blockDim.x; Complex a[3]; Complex b[3]; Matrix<Complex,3> result; Matrix<Complex,3> inmatrix; // input Output& out = (arg.displacement == 1) ? arg.outA : arg.outB; real coeff = (arg.displacement == 1) ? arg.coeff[0] : arg.coeff[1]; int x[4]; while(cb_idx<arg.length){ coordsFromIndex(x, cb_idx, arg.X, arg.dir, arg.displacement, arg.parity); const unsigned int bulk_cb_idx = ((((x[3]*arg.X[2] + x[2])*arg.X[1] + x[1])*arg.X[0] + x[0]) >> 1); out.load(reinterpret_cast<real*>(inmatrix.data), bulk_cb_idx, arg.dir, arg.parity); arg.inA.load(a, bulk_cb_idx); const unsigned int ghost_idx = arg.ghostOffset[dim] + cb_idx; arg.inB.loadGhost(b, ghost_idx, arg.dir); outerProd(b,a,&result); result = inmatrix + result*coeff; out.save(reinterpret_cast<real*>(result.data), bulk_cb_idx, arg.dir, arg.parity); cb_idx += gridSize; } return; } template<typename Float, typename Output, typename InputA, typename InputB> class StaggeredOprodField : public Tunable { private: StaggeredOprodArg<Float,Output,InputA,InputB> &arg; const GaugeField &meta; unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return arg.outA.volumeCB; } bool tunedGridDim() const { return false; } public: StaggeredOprodField(StaggeredOprodArg<Float,Output,InputA,InputB> &arg, const GaugeField &meta) : arg(arg), meta(meta) { writeAuxString("threads=%d,prec=%lu,stride=%d",arg.length,sizeof(Complex)/2,arg.inA.Stride()); // this sets the communications pattern for the packing kernel int comms[QUDA_MAX_DIM] = { commDimPartitioned(0), commDimPartitioned(1), commDimPartitioned(2), commDimPartitioned(3) }; setPackComms(comms); } virtual ~StaggeredOprodField() {} void apply(const hipStream_t &stream){ if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) { // Disable tuning for the time being TuneParam tp = tuneLaunch(*this, QUDA_TUNE_NO, getVerbosity()); if (arg.kernelType == OPROD_INTERIOR_KERNEL) { hipLaunchKernelGGL(( interiorOprodKernel), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); } else if (arg.kernelType == OPROD_EXTERIOR_KERNEL) { if (arg.dir == 0)hipLaunchKernelGGL(( exteriorOprodKernel<0>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); else if (arg.dir == 1)hipLaunchKernelGGL(( exteriorOprodKernel<1>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); else if (arg.dir == 2)hipLaunchKernelGGL(( exteriorOprodKernel<2>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); else if (arg.dir == 3)hipLaunchKernelGGL(( exteriorOprodKernel<3>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); } else { errorQuda("Kernel type not supported\n"); } } else { // run the CPU code errorQuda("No CPU support for staggered outer-product calculation\n"); } } // apply void preTune(){ this->arg.outA.save(); this->arg.outB.save(); } void postTune(){ this->arg.outA.load(); this->arg.outB.load(); } long long flops() const { return 0; } // FIXME long long bytes() const { return 0; } // FIXME TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux);} }; // StaggeredOprodField void exchangeGhost(int nFace, cudaColorSpinorField &a, int parity, int dag) { // need to enable packing in temporal direction to get spin-projector correct pushKernelPackT(true); // first transfer src1 qudaDeviceSynchronize(); MemoryLocation location[2*QUDA_MAX_DIM] = {Device, Device, Device, Device, Device, Device, Device, Device}; a.pack(nFace, 1-parity, dag, Nstream-1, location, Device); qudaDeviceSynchronize(); for(int i=3; i>=0; i--){ if(commDimPartitioned(i)){ // Initialize the host transfer from the source spinor a.gather(nFace, dag, 2*i); } // commDim(i) } // i=3,..,0 qudaDeviceSynchronize(); comm_barrier(); for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsStart(nFace, 2*i, dag); } } for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsWait(nFace, 2*i, dag); a.scatter(nFace, dag, 2*i); } } qudaDeviceSynchronize(); popKernelPackT(); // restore packing state a.bufferIndex = (1 - a.bufferIndex); comm_barrier(); } template<typename Float, typename Output, typename InputA, typename InputB> void computeStaggeredOprodCuda(Output outA, Output outB, GaugeField& outFieldA, GaugeField& outFieldB, InputA& inA, InputB& inB, cudaColorSpinorField& src, const unsigned int parity, const int faceVolumeCB[4], const double coeff[2], int nFace) { unsigned int ghostOffset[4] = {0,0,0,0}; for(int dir=0; dir<4; ++dir) ghostOffset[dir] = src.GhostOffset(dir,1)/src.FieldOrder(); // offset we want is the forwards one // Create the arguments for the interior kernel StaggeredOprodArg<Float,Output,InputA,InputB> arg(parity, 0, ghostOffset, 1, OPROD_INTERIOR_KERNEL, nFace, coeff, inA, inB, outA, outB, outFieldA); StaggeredOprodField<Float,Output,InputA,InputB> oprod(arg, outFieldA); arg.kernelType = OPROD_INTERIOR_KERNEL; arg.length = src.VolumeCB(); oprod.apply(streams[Nstream-1]); for(int i=3; i>=0; i--){ if (commDimPartitioned(i)) { // update parameters for this exterior kernel arg.kernelType = OPROD_EXTERIOR_KERNEL; arg.dir = i; // First, do the one hop term { arg.displacement = 1; arg.length = faceVolumeCB[i]; oprod.apply(streams[Nstream-1]); } // Now do the 3 hop term if (nFace == 3) { arg.displacement = 3; arg.length = arg.displacement*faceVolumeCB[i]; oprod.apply(streams[Nstream-1]); } } } // i=3,..,0 checkCudaError(); } // computeStaggeredOprodCuda #endif // GPU_STAGGERED_DIRAC void computeStaggeredOprod(GaugeField& outA, GaugeField& outB, ColorSpinorField& inEven, ColorSpinorField& inOdd, const unsigned int parity, const double coeff[2], int nFace) { #ifdef GPU_STAGGERED_DIRAC if(outA.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", outA.Order()); if(outB.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", outB.Order()); if(inEven.Precision() != outA.Precision()) errorQuda("Mixed precision not supported: %d %d\n", inEven.Precision(), outA.Precision()); cudaColorSpinorField &inA = (parity&1) ? static_cast<cudaColorSpinorField&>(inOdd) : static_cast<cudaColorSpinorField&>(inEven); cudaColorSpinorField &inB = (parity&1) ? static_cast<cudaColorSpinorField&>(inEven) : static_cast<cudaColorSpinorField&>(inOdd); inA.allocateGhostBuffer(nFace); inB.allocateGhostBuffer(nFace); if (inEven.Precision() == QUDA_DOUBLE_PRECISION) { Spinor<double2, double2, 3, 0, 0> spinorA(inA, nFace); Spinor<double2, double2, 3, 0, 1> spinorB(inB, nFace); exchangeGhost(nFace,static_cast<cudaColorSpinorField&>(inB), parity, 0); computeStaggeredOprodCuda<double>(gauge::FloatNOrder<double, 18, 2, 18>(outA), gauge::FloatNOrder<double, 18, 2, 18>(outB), outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace); } else if (inEven.Precision() == QUDA_SINGLE_PRECISION) { Spinor<float2, float2, 3, 0, 0> spinorA(inA, nFace); Spinor<float2, float2, 3, 0, 1> spinorB(inB, nFace); exchangeGhost(nFace,static_cast<cudaColorSpinorField&>(inB), parity, 0); computeStaggeredOprodCuda<float>(gauge::FloatNOrder<float, 18, 2, 18>(outA), gauge::FloatNOrder<float, 18, 2, 18>(outB), outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace); } else { errorQuda("Unsupported precision: %d\n", inEven.Precision()); } #else // GPU_STAGGERED_DIRAC not defined errorQuda("Staggered Outer Product has not been built!"); #endif return; } // computeStaggeredOprod void computeStaggeredOprod(GaugeField *out[], ColorSpinorField& in, const double coeff[], int nFace) { if (nFace == 1) { computeStaggeredOprod(*out[0], *out[0], in.Even(), in.Odd(), 0, coeff, nFace); double coeff_[2] = {-coeff[0],0.0}; // need to multiply by -1 on odd sites computeStaggeredOprod(*out[0], *out[0], in.Even(), in.Odd(), 1, coeff_, nFace); } else if (nFace == 3) { computeStaggeredOprod(*out[0], *out[1], in.Even(), in.Odd(), 0, coeff, nFace); computeStaggeredOprod(*out[0], *out[1], in.Even(), in.Odd(), 1, coeff, nFace); } else { errorQuda("Invalid nFace=%d", nFace); } } } // namespace quda
cfc3cef8b01cdcf55719cf0c695dd8d9221c1f3c.cu
#include <cstdio> #include <cstdlib> #include <staggered_oprod.h> #include <tune_quda.h> #include <quda_internal.h> #include <gauge_field_order.h> #include <quda_matrix.h> #include <dslash_quda.h> namespace quda { #ifdef GPU_STAGGERED_DIRAC namespace { // anonymous #include <texture.h> } enum KernelType {OPROD_INTERIOR_KERNEL, OPROD_EXTERIOR_KERNEL}; template<typename Float, typename Output, typename InputA, typename InputB> struct StaggeredOprodArg { unsigned int length; int X[4]; unsigned int parity; unsigned int dir; unsigned int ghostOffset[4]; unsigned int displacement; KernelType kernelType; int nFace; bool partitioned[4]; InputA inA; InputB inB; Output outA; Output outB; Float coeff[2]; StaggeredOprodArg(const unsigned int parity, const unsigned int dir, const unsigned int *ghostOffset, const unsigned int displacement, const KernelType& kernelType, const int nFace, const double coeff[2], InputA& inA, InputB& inB, Output& outA, Output& outB, GaugeField& meta) : length(meta.VolumeCB()), parity(parity), dir(dir), displacement(displacement), kernelType(kernelType), nFace(nFace), inA(inA), inB(inB), outA(outA), outB(outB) { this->coeff[0] = coeff[0]; this->coeff[1] = coeff[1]; for(int i=0; i<4; ++i) this->X[i] = meta.X()[i]; for(int i=0; i<4; ++i) this->ghostOffset[i] = ghostOffset[i]; for(int i=0; i<4; ++i) this->partitioned[i] = commDimPartitioned(i) ? true : false; } }; enum IndexType { EVEN_X = 0, EVEN_Y = 1, EVEN_Z = 2, EVEN_T = 3 }; template <IndexType idxType> static __device__ __forceinline__ void coordsFromIndex(int& idx, int c[4], const unsigned int cb_idx, const unsigned int parity, const int X[4]) { const int &LX = X[0]; const int &LY = X[1]; const int &LZ = X[2]; const int XYZ = X[2]*X[1]*X[0]; const int XY = X[1]*X[0]; idx = 2*cb_idx; int x, y, z, t; if (idxType == EVEN_X /*!(LX & 1)*/) { // X even // t = idx / XYZ; // z = (idx / XY) % Z; // y = (idx / X) % Y; // idx += (parity + t + z + y) & 1; // x = idx % X; // equivalent to the above, but with fewer divisions/mods: int aux1 = idx / LX; x = idx - aux1 * LX; int aux2 = aux1 / LY; y = aux1 - aux2 * LY; t = aux2 / LZ; z = aux2 - t * LZ; aux1 = (parity + t + z + y) & 1; x += aux1; idx += aux1; } else if (idxType == EVEN_Y /*!(LY & 1)*/) { // Y even t = idx / XYZ; z = (idx / XY) % LZ; idx += (parity + t + z) & 1; y = (idx / LX) % LY; x = idx % LX; } else if (idxType == EVEN_Z /*!(LZ & 1)*/) { // Z even t = idx / XYZ; idx += (parity + t) & 1; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } else { idx += parity; t = idx / XYZ; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } c[0] = x; c[1] = y; c[2] = z; c[3] = t; } // Get the coordinates for the exterior kernels __device__ static void coordsFromIndex(int x[4], const unsigned int cb_idx, const int X[4], const unsigned int dir, const int displacement, const unsigned int parity) { int Xh[2] = {X[0]/2, X[1]/2}; switch(dir){ case 0: x[2] = cb_idx/Xh[1] % X[2]; x[3] = cb_idx/(Xh[1]*X[2]) % X[3]; x[0] = cb_idx/(Xh[1]*X[2]*X[3]); x[0] += (X[0] - displacement); x[1] = 2*(cb_idx % Xh[1]) + ((x[0]+x[2]+x[3]+parity)&1); break; case 1: x[2] = cb_idx/Xh[0] % X[2]; x[3] = cb_idx/(Xh[0]*X[2]) % X[3]; x[1] = cb_idx/(Xh[0]*X[2]*X[3]); x[1] += (X[1] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 2: x[1] = cb_idx/Xh[0] % X[1]; x[3] = cb_idx/(Xh[0]*X[1]) % X[3]; x[2] = cb_idx/(Xh[0]*X[1]*X[3]); x[2] += (X[2] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 3: x[1] = cb_idx/Xh[0] % X[1]; x[2] = cb_idx/(Xh[0]*X[1]) % X[2]; x[3] = cb_idx/(Xh[0]*X[1]*X[2]); x[3] += (X[3] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; } return; } __device__ __forceinline__ int neighborIndex(const unsigned int cb_idx, const int shift[4], const bool partitioned[4], const unsigned int parity, const int X[4]){ int full_idx; int x[4]; coordsFromIndex<EVEN_X>(full_idx, x, cb_idx, parity, X); for(int dim = 0; dim<4; ++dim){ if( partitioned[dim] ) if( (x[dim]+shift[dim])<0 || (x[dim]+shift[dim])>=X[dim]) return -1; } for(int dim=0; dim<4; ++dim){ x[dim] = shift[dim] ? (x[dim]+shift[dim] + X[dim]) % X[dim] : x[dim]; } return (((x[3]*X[2] + x[2])*X[1] + x[1])*X[0] + x[0]) >> 1; } template<typename real, typename Output, typename InputA, typename InputB> __global__ void interiorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int gridSize = gridDim.x*blockDim.x; typedef complex<real> Complex; Complex x[3]; Complex y[3]; Complex z[3]; Matrix<Complex,3> result; Matrix<Complex,3> tempA, tempB; // input while(idx<arg.length){ arg.inA.load(x, idx); #pragma unroll for(int dim=0; dim<4; ++dim){ int shift[4] = {0,0,0,0}; shift[dim] = 1; const int first_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X); if(first_nbr_idx >= 0){ arg.inB.load(y, first_nbr_idx); outerProd(y,x,&result); arg.outA.load(reinterpret_cast<real*>(tempA.data), idx, dim, arg.parity); result = tempA + result*arg.coeff[0]; arg.outA.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity); if (arg.nFace == 3) { shift[dim] = 3; const int third_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X); if(third_nbr_idx >= 0){ arg.inB.load(z, third_nbr_idx); outerProd(z, x, &result); arg.outB.load(reinterpret_cast<real*>(tempB.data), idx, dim, arg.parity); result = tempB + result*arg.coeff[1]; arg.outB.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity); } } } } // dim idx += gridSize; } return; } // interiorOprodKernel template<int dim, typename real, typename Output, typename InputA, typename InputB> __global__ void exteriorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg) { typedef complex<real> Complex; unsigned int cb_idx = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int gridSize = gridDim.x*blockDim.x; Complex a[3]; Complex b[3]; Matrix<Complex,3> result; Matrix<Complex,3> inmatrix; // input Output& out = (arg.displacement == 1) ? arg.outA : arg.outB; real coeff = (arg.displacement == 1) ? arg.coeff[0] : arg.coeff[1]; int x[4]; while(cb_idx<arg.length){ coordsFromIndex(x, cb_idx, arg.X, arg.dir, arg.displacement, arg.parity); const unsigned int bulk_cb_idx = ((((x[3]*arg.X[2] + x[2])*arg.X[1] + x[1])*arg.X[0] + x[0]) >> 1); out.load(reinterpret_cast<real*>(inmatrix.data), bulk_cb_idx, arg.dir, arg.parity); arg.inA.load(a, bulk_cb_idx); const unsigned int ghost_idx = arg.ghostOffset[dim] + cb_idx; arg.inB.loadGhost(b, ghost_idx, arg.dir); outerProd(b,a,&result); result = inmatrix + result*coeff; out.save(reinterpret_cast<real*>(result.data), bulk_cb_idx, arg.dir, arg.parity); cb_idx += gridSize; } return; } template<typename Float, typename Output, typename InputA, typename InputB> class StaggeredOprodField : public Tunable { private: StaggeredOprodArg<Float,Output,InputA,InputB> &arg; const GaugeField &meta; unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return arg.outA.volumeCB; } bool tunedGridDim() const { return false; } public: StaggeredOprodField(StaggeredOprodArg<Float,Output,InputA,InputB> &arg, const GaugeField &meta) : arg(arg), meta(meta) { writeAuxString("threads=%d,prec=%lu,stride=%d",arg.length,sizeof(Complex)/2,arg.inA.Stride()); // this sets the communications pattern for the packing kernel int comms[QUDA_MAX_DIM] = { commDimPartitioned(0), commDimPartitioned(1), commDimPartitioned(2), commDimPartitioned(3) }; setPackComms(comms); } virtual ~StaggeredOprodField() {} void apply(const cudaStream_t &stream){ if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) { // Disable tuning for the time being TuneParam tp = tuneLaunch(*this, QUDA_TUNE_NO, getVerbosity()); if (arg.kernelType == OPROD_INTERIOR_KERNEL) { interiorOprodKernel<<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); } else if (arg.kernelType == OPROD_EXTERIOR_KERNEL) { if (arg.dir == 0) exteriorOprodKernel<0><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); else if (arg.dir == 1) exteriorOprodKernel<1><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); else if (arg.dir == 2) exteriorOprodKernel<2><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); else if (arg.dir == 3) exteriorOprodKernel<3><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); } else { errorQuda("Kernel type not supported\n"); } } else { // run the CPU code errorQuda("No CPU support for staggered outer-product calculation\n"); } } // apply void preTune(){ this->arg.outA.save(); this->arg.outB.save(); } void postTune(){ this->arg.outA.load(); this->arg.outB.load(); } long long flops() const { return 0; } // FIXME long long bytes() const { return 0; } // FIXME TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux);} }; // StaggeredOprodField void exchangeGhost(int nFace, cudaColorSpinorField &a, int parity, int dag) { // need to enable packing in temporal direction to get spin-projector correct pushKernelPackT(true); // first transfer src1 qudaDeviceSynchronize(); MemoryLocation location[2*QUDA_MAX_DIM] = {Device, Device, Device, Device, Device, Device, Device, Device}; a.pack(nFace, 1-parity, dag, Nstream-1, location, Device); qudaDeviceSynchronize(); for(int i=3; i>=0; i--){ if(commDimPartitioned(i)){ // Initialize the host transfer from the source spinor a.gather(nFace, dag, 2*i); } // commDim(i) } // i=3,..,0 qudaDeviceSynchronize(); comm_barrier(); for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsStart(nFace, 2*i, dag); } } for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsWait(nFace, 2*i, dag); a.scatter(nFace, dag, 2*i); } } qudaDeviceSynchronize(); popKernelPackT(); // restore packing state a.bufferIndex = (1 - a.bufferIndex); comm_barrier(); } template<typename Float, typename Output, typename InputA, typename InputB> void computeStaggeredOprodCuda(Output outA, Output outB, GaugeField& outFieldA, GaugeField& outFieldB, InputA& inA, InputB& inB, cudaColorSpinorField& src, const unsigned int parity, const int faceVolumeCB[4], const double coeff[2], int nFace) { unsigned int ghostOffset[4] = {0,0,0,0}; for(int dir=0; dir<4; ++dir) ghostOffset[dir] = src.GhostOffset(dir,1)/src.FieldOrder(); // offset we want is the forwards one // Create the arguments for the interior kernel StaggeredOprodArg<Float,Output,InputA,InputB> arg(parity, 0, ghostOffset, 1, OPROD_INTERIOR_KERNEL, nFace, coeff, inA, inB, outA, outB, outFieldA); StaggeredOprodField<Float,Output,InputA,InputB> oprod(arg, outFieldA); arg.kernelType = OPROD_INTERIOR_KERNEL; arg.length = src.VolumeCB(); oprod.apply(streams[Nstream-1]); for(int i=3; i>=0; i--){ if (commDimPartitioned(i)) { // update parameters for this exterior kernel arg.kernelType = OPROD_EXTERIOR_KERNEL; arg.dir = i; // First, do the one hop term { arg.displacement = 1; arg.length = faceVolumeCB[i]; oprod.apply(streams[Nstream-1]); } // Now do the 3 hop term if (nFace == 3) { arg.displacement = 3; arg.length = arg.displacement*faceVolumeCB[i]; oprod.apply(streams[Nstream-1]); } } } // i=3,..,0 checkCudaError(); } // computeStaggeredOprodCuda #endif // GPU_STAGGERED_DIRAC void computeStaggeredOprod(GaugeField& outA, GaugeField& outB, ColorSpinorField& inEven, ColorSpinorField& inOdd, const unsigned int parity, const double coeff[2], int nFace) { #ifdef GPU_STAGGERED_DIRAC if(outA.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", outA.Order()); if(outB.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", outB.Order()); if(inEven.Precision() != outA.Precision()) errorQuda("Mixed precision not supported: %d %d\n", inEven.Precision(), outA.Precision()); cudaColorSpinorField &inA = (parity&1) ? static_cast<cudaColorSpinorField&>(inOdd) : static_cast<cudaColorSpinorField&>(inEven); cudaColorSpinorField &inB = (parity&1) ? static_cast<cudaColorSpinorField&>(inEven) : static_cast<cudaColorSpinorField&>(inOdd); inA.allocateGhostBuffer(nFace); inB.allocateGhostBuffer(nFace); if (inEven.Precision() == QUDA_DOUBLE_PRECISION) { Spinor<double2, double2, 3, 0, 0> spinorA(inA, nFace); Spinor<double2, double2, 3, 0, 1> spinorB(inB, nFace); exchangeGhost(nFace,static_cast<cudaColorSpinorField&>(inB), parity, 0); computeStaggeredOprodCuda<double>(gauge::FloatNOrder<double, 18, 2, 18>(outA), gauge::FloatNOrder<double, 18, 2, 18>(outB), outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace); } else if (inEven.Precision() == QUDA_SINGLE_PRECISION) { Spinor<float2, float2, 3, 0, 0> spinorA(inA, nFace); Spinor<float2, float2, 3, 0, 1> spinorB(inB, nFace); exchangeGhost(nFace,static_cast<cudaColorSpinorField&>(inB), parity, 0); computeStaggeredOprodCuda<float>(gauge::FloatNOrder<float, 18, 2, 18>(outA), gauge::FloatNOrder<float, 18, 2, 18>(outB), outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace); } else { errorQuda("Unsupported precision: %d\n", inEven.Precision()); } #else // GPU_STAGGERED_DIRAC not defined errorQuda("Staggered Outer Product has not been built!"); #endif return; } // computeStaggeredOprod void computeStaggeredOprod(GaugeField *out[], ColorSpinorField& in, const double coeff[], int nFace) { if (nFace == 1) { computeStaggeredOprod(*out[0], *out[0], in.Even(), in.Odd(), 0, coeff, nFace); double coeff_[2] = {-coeff[0],0.0}; // need to multiply by -1 on odd sites computeStaggeredOprod(*out[0], *out[0], in.Even(), in.Odd(), 1, coeff_, nFace); } else if (nFace == 3) { computeStaggeredOprod(*out[0], *out[1], in.Even(), in.Odd(), 0, coeff, nFace); computeStaggeredOprod(*out[0], *out[1], in.Even(), in.Odd(), 1, coeff, nFace); } else { errorQuda("Invalid nFace=%d", nFace); } } } // namespace quda
1f1238ebfabae31b74a94fe933f36a0d51c9b0b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @precisions normal z -> s d c */ #include "common_magma.h" #include "commonblas_z.h" #include "magma_templates.h" #define PRECISION_z #define BLOCK_SIZE 512 /* --------------------------------------------------------------------------- */ #define BLOCK_SIZE1 192 __global__ void magma_zswap_gemv_kernel( int m, int rk, int n, const magmaDoubleComplex * __restrict__ V, int ldv, const magmaDoubleComplex * __restrict__ x, int ldx, magmaDoubleComplex *c, magmaDoubleComplex *b) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE1 * blockIdx.x; magmaDoubleComplex lsum, tmp; V += j; lsum = MAGMA_Z_ZERO; if (j < m) { tmp = b[j]; b[j] = c[j]; if (j >= rk) { for (int k=0; k < n; k++) { lsum += MAGMA_Z_MUL( V[k*ldv], MAGMA_Z_CNJG(x[k*ldx])); } } c[j] = tmp - lsum; } } __global__ void magma_zgemv_kernel( int m, int n, const magmaDoubleComplex * __restrict__ V, int ldv, const magmaDoubleComplex * __restrict__ x, magmaDoubleComplex *b, magmaDoubleComplex *c) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE1 * blockIdx.x; magmaDoubleComplex lsum; V += j; lsum = MAGMA_Z_ZERO; if (j < m) { for (int k=0; k < n; k++) { lsum += MAGMA_Z_MUL( V[k*ldv], x[k]); } c[j] = b[j] - lsum; } } __global__ void magma_zscale_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex* dAkk) { const int i = threadIdx.x; magmaDoubleComplex tmp; __shared__ magmaDoubleComplex scale; /* === Compute the norm of dx0 === */ magmaDoubleComplex *dx = dx0; __shared__ double sum[ BLOCK_SIZE ]; double re, lsum; lsum = 0; for (int k = i; k < n; k += BLOCK_SIZE) { #if (defined(PRECISION_s) || defined(PRECISION_d)) re = dx[k]; lsum += re*re; #else re = MAGMA_Z_REAL( dx[k] ); double im = MAGMA_Z_IMAG( dx[k] ); lsum += re*re + im*im; #endif } sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); /* === Compute the scaling factor === */ if (i == 0) { double beta = sqrt(sum[0]); if ( beta == 0 ) { *dtau = MAGMA_Z_ZERO; } else { tmp = dx0[0]; #if (defined(PRECISION_s) || defined(PRECISION_d)) beta = -copysign( beta, tmp ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = (beta - tmp) / beta; *dAkk = beta; scale = 1. / (tmp - beta); #else double alphar = MAGMA_Z_REAL(tmp), alphai = MAGMA_Z_IMAG(tmp); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta); *dAkk = MAGMA_Z_MAKE(beta, 0.); tmp = MAGMA_Z_MAKE( alphar - beta, alphai); scale = MAGMA_Z_DIV( MAGMA_Z_ONE, tmp); #endif } } __syncthreads(); /* === Scale the vector === */ for (int j=i; j < n; j += BLOCK_SIZE) dx0[j] = MAGMA_Z_MUL(dx0[j], scale); /* === Make temporary the first element to 1; value is stored in dAkk === */ if (i == 0) dx0[0] = MAGMA_Z_ONE; } #define BLOCK_SIZE2 192 #if (defined(PRECISION_z) || defined(PRECISION_d)) #define TOL 1.e-8 #else #define TOL 1.e-4 #endif __global__ void magma_zgemv_kernel_adjust( int n, int k, magmaDoubleComplex * A, int lda, magmaDoubleComplex *B, int ldb, magmaDoubleComplex *C, double *xnorm, double *xnorm2, magmaDoubleComplex *Akk, int *lsticc, int *lsticcs) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE2 * blockIdx.x; magmaDoubleComplex sum; double temp, oldnorm; if (j < n) { B += j; sum = MAGMA_Z_CNJG( B[(k-1)*ldb] ); // sum = MAGMA_Z_ZERO; for (int m=0; m < k-1; m++) { sum += MAGMA_Z_MUL( MAGMA_Z_CNJG( B[m*ldb] ), A[m*lda] ); } C[j*lda] -= sum; oldnorm = xnorm[j]; temp = MAGMA_Z_ABS( C[j*lda] ) / oldnorm; temp = (1.0 + temp) * (1.0 - temp); temp = oldnorm * sqrt(temp); xnorm[j] = temp; // Below 'j' was 'i'; was that a bug? double temp2 = xnorm[j] / xnorm2[j]; temp2 = temp*(temp2 * temp2); if (temp2 <= TOL) { *lsticc = 1; lsticcs[j] = 1; } } if (j == 0) A[(k-1)*lda] = *Akk; /* __syncthreads(); // Check if the norm has to be recomputed if (blockIdx.x == 0) { //if (2.*temp < oldnorm) { //printf("recompute norm\n"); magmaDoubleComplex *dx = C+blockIdx.x*lda+1; __shared__ double sum[ BLOCK_SIZE2 ]; double re, lsum; // get norm of dx lsum = 0; for (int k = i; k < n1; k += BLOCK_SIZE2) { #if (defined(PRECISION_s) || defined(PRECISION_d)) re = dx[k]; lsum += re*re; #else re = MAGMA_Z_REAL( dx[k] ); double im = MAGMA_Z_IMAG( dx[k] ); lsum += re*re + im*im; #endif } sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE2 >( i, sum ); if (i == 0) { printf("adjusted = %f recomputed = %f\n", xnorm[blockIdx.x], sqrt(sum[0])); xnorm[blockIdx.x] = sqrt(sum[0]); } //} } */ } __global__ void magmablas_dznrm2_check_kernel( int m, magmaDoubleComplex *da, int ldda, double *dxnorm, double *dxnorm2, int *dlsticc, int *dlsticcs) { const int i = threadIdx.x; magmaDoubleComplex *dx = da + blockIdx.x * ldda; __shared__ double sum[ BLOCK_SIZE ]; double re, lsum; if (blockIdx.x == 0 && i == 0) *dlsticc = 0; // get norm of dx only if lsticc[blockIdx] != 0 if ( dlsticcs[blockIdx.x] == 0 ) return; else dlsticcs[blockIdx.x] = 0; lsum = 0; for (int j = i; j < m; j += BLOCK_SIZE) { #if (defined(PRECISION_s) || defined(PRECISION_d)) re = dx[j]; lsum += re*re; #else re = MAGMA_Z_REAL( dx[j] ); double im = MAGMA_Z_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); if (i == 0) { dxnorm[blockIdx.x] = sqrt(sum[0]); dxnorm2[blockIdx.x] = sqrt(sum[0]); } } /* --------------------------------------------------------------------------- */ /** Purpose ------- ZLAQPS computes a step of QR factorization with column pivoting of a complex M-by-N matrix A by using Blas-3. It tries to factorize NB columns from A starting from the row OFFSET+1, and updates all of the matrix with Blas-3 xGEMM. In some cases, due to catastrophic cancellations, it cannot factorize NB columns. Hence, the actual number of factorized columns is returned in KB. Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 @param[in] offset INTEGER The number of rows of A that have been factorized in previous steps. @param[in] nb INTEGER The number of columns to factorize. @param[out] kb INTEGER The number of columns actually factorized. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) on the GPU On entry, the M-by-N matrix A. On exit, block A(OFFSET+1:M,1:KB) is the triangular factor obtained and block A(1:OFFSET,1:N) has been accordingly pivoted, but no factorized. The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has been updated. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[in,out] jpvt INTEGER array, dimension (N) JPVT(I) = K <==> Column K of the full matrix A has been permuted into position I in AP. @param[out] dtau COMPLEX_16 array, dimension (KB) The scalar factors of the elementary reflectors. @param[in,out] dvn1 DOUBLE PRECISION array, dimension (N) The vector with the partial column norms. @param[in,out] dvn2 DOUBLE PRECISION array, dimension (N) The vector with the exact column norms. @param[in,out] dauxv COMPLEX_16 array, dimension (NB) Auxiliar vector. @param[in,out] dF COMPLEX_16 array, dimension (LDDF,NB) Matrix F**H = L * Y**H * A. @param[in] lddf INTEGER The leading dimension of the array F. LDDF >= max(1,N). @ingroup magma_zgeqp3_aux ********************************************************************/ extern "C" magma_int_t magma_zlaqps3_gpu( magma_int_t m, magma_int_t n, magma_int_t offset, magma_int_t nb, magma_int_t *kb, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t *jpvt, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dvn1, magmaDouble_ptr dvn2, magmaDoubleComplex_ptr dauxv, magmaDoubleComplex_ptr dF, magma_int_t lddf) { #define dA(i_, j_) (dA + (i_) + (j_)*(ldda)) #define dF(i_, j_) (dF + (i_) + (j_)*(lddf)) magmaDoubleComplex c_zero = MAGMA_Z_MAKE( 0.,0.); magmaDoubleComplex c_one = MAGMA_Z_MAKE( 1.,0.); magmaDoubleComplex c_neg_one = MAGMA_Z_MAKE(-1.,0.); magma_int_t ione = 1; magma_int_t i__1, i__2; magma_int_t k, rk; magmaDoubleComplex tauk; magma_int_t pvt, itemp; magmaDoubleComplex_ptr dAkk = dauxv; dauxv += 1; int lsticc, *dlsticc, *dlsticcs; magma_malloc( (void**) &dlsticcs, (n+1)*sizeof(int) ); hipMemset( dlsticcs, 0, (n+1)*sizeof(int) ); dlsticc = dlsticcs + n; // double tol3z = magma_dsqrt( lapackf77_dlamch("Epsilon")); lsticc = 0; k = 0; while( k < nb && lsticc == 0 ) { rk = offset + k; /* Determine ith pivot column and swap if necessary */ pvt = k - 1 + magma_idamax( n-k, &dvn1[k], ione ); if (pvt != k) { magmablas_zswap( k, dF(pvt,0), lddf, dF(k,0), lddf); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset ); } /* Apply previous Householder reflectors to column K: A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)' */ hipLaunchKernelGGL(( magma_zswap_gemv_kernel), dim3(magma_ceildiv( m, BLOCK_SIZE1 )), dim3(BLOCK_SIZE1), 0, magma_stream , m, rk, k, dA(0, 0), ldda, dF(k, 0), lddf, dA(0, k), dA(0,pvt)); /* Generate elementary reflector H(k). */ hipLaunchKernelGGL(( magma_zscale_kernel), dim3(1), dim3(BLOCK_SIZE), 0, magma_stream , m-rk, dA(rk, k), &dtau[k], &dvn1[k], dAkk); // printf("m-rk = %d\n", m-rk); /* Compute Kth column of F: Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */ if (k < n-1) { magma_zgetvector( 1, &dtau[k], 1, &tauk, 1 ); magma_zgemv( MagmaConjTrans, m-rk, n, tauk, dA( rk, 0 ), ldda, dA( rk, k ), 1, c_zero, dauxv, 1 ); if (k == 0) magmablas_zlacpy(MagmaUpperLower, n-k-1, 1, dauxv+k+1, n-k-1, dF( k+1, k ), n-k-1); } /* Incremental updating of F: F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K). F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K) := tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K) so, F is (updated A)*V */ if (k > 0) { /* I think we only need stricly lower-triangular part */ hipLaunchKernelGGL(( magma_zgemv_kernel), dim3(magma_ceildiv( n-k-1, BLOCK_SIZE1 )), dim3(BLOCK_SIZE1), 0, magma_stream , n-k-1, k, dF(k+1,0), lddf, dauxv, dauxv+k+1, dF(k+1,k)); } /* Update the current row of A: A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < n-1) { i__1 = n - k - 1; i__2 = k + 1; /* left-looking update of rows, * * since F=A**H v with original A, so no right-looking */ hipLaunchKernelGGL(( magma_zgemv_kernel_adjust), dim3(magma_ceildiv( n-k-1, BLOCK_SIZE2 )), dim3(BLOCK_SIZE2), 0, magma_stream , n-k-1, k+1, dA(rk, 0 ), ldda, dF(k+1,0 ), lddf, dA(rk, k+1), &dvn1[k+1], &dvn2[k+1], dAkk, dlsticc, dlsticcs ); magma_getmatrix(1,1, sizeof(int), dlsticc, 1, &lsticc, 1); // TTT: force not to recompute; has to be finally commented if ( nb < 3 ) lsticc = 0; // printf("k=%d n-k = %d\n", k, n-k); // forcing recompute works! - forcing it requires changing dlsticcs as well, e.g., // can be done in the kernel directly (magmablas_dznrm2_check_kernel) // if (k == 16) lsticc = 1; } /* Update partial column norms. */ /* if (rk < min(m, n+offset)-1) { magmablas_dznrm2_row_check_adjust( n-k-1, tol3z, &dvn1[k+1], &dvn2[k+1], dA(rk,k+1), ldda, lsticcs); } magma_dgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); */ if (k >= n-1) magmablas_zlacpy(MagmaUpperLower, 1, 1, dAkk, 1, dA(rk, k), 1); ++k; } // leave k as the last column done --k; *kb = k + 1; rk = offset + *kb - 1; //printf("actually factored = %d",*kb); /* Apply the block reflector to the rest of the matrix: A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */ if (*kb < min(n, m - offset)-1) { i__1 = m - rk - 1; i__2 = n - *kb; magma_zgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb, c_neg_one, dA(rk+1, 0 ), ldda, dF(*kb, 0 ), lddf, c_one, dA(rk+1, *kb), ldda ); } /* Recomputation of difficult columns. */ if ( lsticc > 0 ) { // printf( " -- recompute dnorms --\n" ); //magmablas_dznrm2_check(m-rk-1, n-*kb, A(rk+1,rk+1), lda, // &dvn1[rk+1], &dvn2[rk+1], dlsticcs); // There is a bug when we get to recompute hipLaunchKernelGGL(( magmablas_dznrm2_check_kernel), dim3(n-*kb), dim3(BLOCK_SIZE) , 0, 0, m-rk-1, dA(rk+1,rk+1), ldda, &dvn1[rk+1], &dvn2[rk+1], dlsticc, dlsticcs); } magma_free(dlsticcs); return MAGMA_SUCCESS; } /* magma_zlaqps */
1f1238ebfabae31b74a94fe933f36a0d51c9b0b2.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @precisions normal z -> s d c */ #include "common_magma.h" #include "commonblas_z.h" #include "magma_templates.h" #define PRECISION_z #define BLOCK_SIZE 512 /* --------------------------------------------------------------------------- */ #define BLOCK_SIZE1 192 __global__ void magma_zswap_gemv_kernel( int m, int rk, int n, const magmaDoubleComplex * __restrict__ V, int ldv, const magmaDoubleComplex * __restrict__ x, int ldx, magmaDoubleComplex *c, magmaDoubleComplex *b) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE1 * blockIdx.x; magmaDoubleComplex lsum, tmp; V += j; lsum = MAGMA_Z_ZERO; if (j < m) { tmp = b[j]; b[j] = c[j]; if (j >= rk) { for (int k=0; k < n; k++) { lsum += MAGMA_Z_MUL( V[k*ldv], MAGMA_Z_CNJG(x[k*ldx])); } } c[j] = tmp - lsum; } } __global__ void magma_zgemv_kernel( int m, int n, const magmaDoubleComplex * __restrict__ V, int ldv, const magmaDoubleComplex * __restrict__ x, magmaDoubleComplex *b, magmaDoubleComplex *c) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE1 * blockIdx.x; magmaDoubleComplex lsum; V += j; lsum = MAGMA_Z_ZERO; if (j < m) { for (int k=0; k < n; k++) { lsum += MAGMA_Z_MUL( V[k*ldv], x[k]); } c[j] = b[j] - lsum; } } __global__ void magma_zscale_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex* dAkk) { const int i = threadIdx.x; magmaDoubleComplex tmp; __shared__ magmaDoubleComplex scale; /* === Compute the norm of dx0 === */ magmaDoubleComplex *dx = dx0; __shared__ double sum[ BLOCK_SIZE ]; double re, lsum; lsum = 0; for (int k = i; k < n; k += BLOCK_SIZE) { #if (defined(PRECISION_s) || defined(PRECISION_d)) re = dx[k]; lsum += re*re; #else re = MAGMA_Z_REAL( dx[k] ); double im = MAGMA_Z_IMAG( dx[k] ); lsum += re*re + im*im; #endif } sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); /* === Compute the scaling factor === */ if (i == 0) { double beta = sqrt(sum[0]); if ( beta == 0 ) { *dtau = MAGMA_Z_ZERO; } else { tmp = dx0[0]; #if (defined(PRECISION_s) || defined(PRECISION_d)) beta = -copysign( beta, tmp ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = (beta - tmp) / beta; *dAkk = beta; scale = 1. / (tmp - beta); #else double alphar = MAGMA_Z_REAL(tmp), alphai = MAGMA_Z_IMAG(tmp); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta); *dAkk = MAGMA_Z_MAKE(beta, 0.); tmp = MAGMA_Z_MAKE( alphar - beta, alphai); scale = MAGMA_Z_DIV( MAGMA_Z_ONE, tmp); #endif } } __syncthreads(); /* === Scale the vector === */ for (int j=i; j < n; j += BLOCK_SIZE) dx0[j] = MAGMA_Z_MUL(dx0[j], scale); /* === Make temporary the first element to 1; value is stored in dAkk === */ if (i == 0) dx0[0] = MAGMA_Z_ONE; } #define BLOCK_SIZE2 192 #if (defined(PRECISION_z) || defined(PRECISION_d)) #define TOL 1.e-8 #else #define TOL 1.e-4 #endif __global__ void magma_zgemv_kernel_adjust( int n, int k, magmaDoubleComplex * A, int lda, magmaDoubleComplex *B, int ldb, magmaDoubleComplex *C, double *xnorm, double *xnorm2, magmaDoubleComplex *Akk, int *lsticc, int *lsticcs) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE2 * blockIdx.x; magmaDoubleComplex sum; double temp, oldnorm; if (j < n) { B += j; sum = MAGMA_Z_CNJG( B[(k-1)*ldb] ); // sum = MAGMA_Z_ZERO; for (int m=0; m < k-1; m++) { sum += MAGMA_Z_MUL( MAGMA_Z_CNJG( B[m*ldb] ), A[m*lda] ); } C[j*lda] -= sum; oldnorm = xnorm[j]; temp = MAGMA_Z_ABS( C[j*lda] ) / oldnorm; temp = (1.0 + temp) * (1.0 - temp); temp = oldnorm * sqrt(temp); xnorm[j] = temp; // Below 'j' was 'i'; was that a bug? double temp2 = xnorm[j] / xnorm2[j]; temp2 = temp*(temp2 * temp2); if (temp2 <= TOL) { *lsticc = 1; lsticcs[j] = 1; } } if (j == 0) A[(k-1)*lda] = *Akk; /* __syncthreads(); // Check if the norm has to be recomputed if (blockIdx.x == 0) { //if (2.*temp < oldnorm) { //printf("recompute norm\n"); magmaDoubleComplex *dx = C+blockIdx.x*lda+1; __shared__ double sum[ BLOCK_SIZE2 ]; double re, lsum; // get norm of dx lsum = 0; for (int k = i; k < n1; k += BLOCK_SIZE2) { #if (defined(PRECISION_s) || defined(PRECISION_d)) re = dx[k]; lsum += re*re; #else re = MAGMA_Z_REAL( dx[k] ); double im = MAGMA_Z_IMAG( dx[k] ); lsum += re*re + im*im; #endif } sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE2 >( i, sum ); if (i == 0) { printf("adjusted = %f recomputed = %f\n", xnorm[blockIdx.x], sqrt(sum[0])); xnorm[blockIdx.x] = sqrt(sum[0]); } //} } */ } __global__ void magmablas_dznrm2_check_kernel( int m, magmaDoubleComplex *da, int ldda, double *dxnorm, double *dxnorm2, int *dlsticc, int *dlsticcs) { const int i = threadIdx.x; magmaDoubleComplex *dx = da + blockIdx.x * ldda; __shared__ double sum[ BLOCK_SIZE ]; double re, lsum; if (blockIdx.x == 0 && i == 0) *dlsticc = 0; // get norm of dx only if lsticc[blockIdx] != 0 if ( dlsticcs[blockIdx.x] == 0 ) return; else dlsticcs[blockIdx.x] = 0; lsum = 0; for (int j = i; j < m; j += BLOCK_SIZE) { #if (defined(PRECISION_s) || defined(PRECISION_d)) re = dx[j]; lsum += re*re; #else re = MAGMA_Z_REAL( dx[j] ); double im = MAGMA_Z_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); if (i == 0) { dxnorm[blockIdx.x] = sqrt(sum[0]); dxnorm2[blockIdx.x] = sqrt(sum[0]); } } /* --------------------------------------------------------------------------- */ /** Purpose ------- ZLAQPS computes a step of QR factorization with column pivoting of a complex M-by-N matrix A by using Blas-3. It tries to factorize NB columns from A starting from the row OFFSET+1, and updates all of the matrix with Blas-3 xGEMM. In some cases, due to catastrophic cancellations, it cannot factorize NB columns. Hence, the actual number of factorized columns is returned in KB. Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 @param[in] offset INTEGER The number of rows of A that have been factorized in previous steps. @param[in] nb INTEGER The number of columns to factorize. @param[out] kb INTEGER The number of columns actually factorized. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) on the GPU On entry, the M-by-N matrix A. On exit, block A(OFFSET+1:M,1:KB) is the triangular factor obtained and block A(1:OFFSET,1:N) has been accordingly pivoted, but no factorized. The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has been updated. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[in,out] jpvt INTEGER array, dimension (N) JPVT(I) = K <==> Column K of the full matrix A has been permuted into position I in AP. @param[out] dtau COMPLEX_16 array, dimension (KB) The scalar factors of the elementary reflectors. @param[in,out] dvn1 DOUBLE PRECISION array, dimension (N) The vector with the partial column norms. @param[in,out] dvn2 DOUBLE PRECISION array, dimension (N) The vector with the exact column norms. @param[in,out] dauxv COMPLEX_16 array, dimension (NB) Auxiliar vector. @param[in,out] dF COMPLEX_16 array, dimension (LDDF,NB) Matrix F**H = L * Y**H * A. @param[in] lddf INTEGER The leading dimension of the array F. LDDF >= max(1,N). @ingroup magma_zgeqp3_aux ********************************************************************/ extern "C" magma_int_t magma_zlaqps3_gpu( magma_int_t m, magma_int_t n, magma_int_t offset, magma_int_t nb, magma_int_t *kb, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t *jpvt, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dvn1, magmaDouble_ptr dvn2, magmaDoubleComplex_ptr dauxv, magmaDoubleComplex_ptr dF, magma_int_t lddf) { #define dA(i_, j_) (dA + (i_) + (j_)*(ldda)) #define dF(i_, j_) (dF + (i_) + (j_)*(lddf)) magmaDoubleComplex c_zero = MAGMA_Z_MAKE( 0.,0.); magmaDoubleComplex c_one = MAGMA_Z_MAKE( 1.,0.); magmaDoubleComplex c_neg_one = MAGMA_Z_MAKE(-1.,0.); magma_int_t ione = 1; magma_int_t i__1, i__2; magma_int_t k, rk; magmaDoubleComplex tauk; magma_int_t pvt, itemp; magmaDoubleComplex_ptr dAkk = dauxv; dauxv += 1; int lsticc, *dlsticc, *dlsticcs; magma_malloc( (void**) &dlsticcs, (n+1)*sizeof(int) ); cudaMemset( dlsticcs, 0, (n+1)*sizeof(int) ); dlsticc = dlsticcs + n; // double tol3z = magma_dsqrt( lapackf77_dlamch("Epsilon")); lsticc = 0; k = 0; while( k < nb && lsticc == 0 ) { rk = offset + k; /* Determine ith pivot column and swap if necessary */ pvt = k - 1 + magma_idamax( n-k, &dvn1[k], ione ); if (pvt != k) { magmablas_zswap( k, dF(pvt,0), lddf, dF(k,0), lddf); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset ); } /* Apply previous Householder reflectors to column K: A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)' */ magma_zswap_gemv_kernel<<< magma_ceildiv( m, BLOCK_SIZE1 ), BLOCK_SIZE1, 0, magma_stream >>> ( m, rk, k, dA(0, 0), ldda, dF(k, 0), lddf, dA(0, k), dA(0,pvt)); /* Generate elementary reflector H(k). */ magma_zscale_kernel<<< 1, BLOCK_SIZE, 0, magma_stream >>> (m-rk, dA(rk, k), &dtau[k], &dvn1[k], dAkk); // printf("m-rk = %d\n", m-rk); /* Compute Kth column of F: Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */ if (k < n-1) { magma_zgetvector( 1, &dtau[k], 1, &tauk, 1 ); magma_zgemv( MagmaConjTrans, m-rk, n, tauk, dA( rk, 0 ), ldda, dA( rk, k ), 1, c_zero, dauxv, 1 ); if (k == 0) magmablas_zlacpy(MagmaUpperLower, n-k-1, 1, dauxv+k+1, n-k-1, dF( k+1, k ), n-k-1); } /* Incremental updating of F: F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K). F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K) := tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K) so, F is (updated A)*V */ if (k > 0) { /* I think we only need stricly lower-triangular part */ magma_zgemv_kernel<<< magma_ceildiv( n-k-1, BLOCK_SIZE1 ), BLOCK_SIZE1, 0, magma_stream >>> (n-k-1, k, dF(k+1,0), lddf, dauxv, dauxv+k+1, dF(k+1,k)); } /* Update the current row of A: A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < n-1) { i__1 = n - k - 1; i__2 = k + 1; /* left-looking update of rows, * * since F=A**H v with original A, so no right-looking */ magma_zgemv_kernel_adjust<<< magma_ceildiv( n-k-1, BLOCK_SIZE2 ), BLOCK_SIZE2, 0, magma_stream >>> ( n-k-1, k+1, dA(rk, 0 ), ldda, dF(k+1,0 ), lddf, dA(rk, k+1), &dvn1[k+1], &dvn2[k+1], dAkk, dlsticc, dlsticcs ); magma_getmatrix(1,1, sizeof(int), dlsticc, 1, &lsticc, 1); // TTT: force not to recompute; has to be finally commented if ( nb < 3 ) lsticc = 0; // printf("k=%d n-k = %d\n", k, n-k); // forcing recompute works! - forcing it requires changing dlsticcs as well, e.g., // can be done in the kernel directly (magmablas_dznrm2_check_kernel) // if (k == 16) lsticc = 1; } /* Update partial column norms. */ /* if (rk < min(m, n+offset)-1) { magmablas_dznrm2_row_check_adjust( n-k-1, tol3z, &dvn1[k+1], &dvn2[k+1], dA(rk,k+1), ldda, lsticcs); } magma_dgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); */ if (k >= n-1) magmablas_zlacpy(MagmaUpperLower, 1, 1, dAkk, 1, dA(rk, k), 1); ++k; } // leave k as the last column done --k; *kb = k + 1; rk = offset + *kb - 1; //printf("actually factored = %d",*kb); /* Apply the block reflector to the rest of the matrix: A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */ if (*kb < min(n, m - offset)-1) { i__1 = m - rk - 1; i__2 = n - *kb; magma_zgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb, c_neg_one, dA(rk+1, 0 ), ldda, dF(*kb, 0 ), lddf, c_one, dA(rk+1, *kb), ldda ); } /* Recomputation of difficult columns. */ if ( lsticc > 0 ) { // printf( " -- recompute dnorms --\n" ); //magmablas_dznrm2_check(m-rk-1, n-*kb, A(rk+1,rk+1), lda, // &dvn1[rk+1], &dvn2[rk+1], dlsticcs); // There is a bug when we get to recompute magmablas_dznrm2_check_kernel<<< n-*kb, BLOCK_SIZE >>> ( m-rk-1, dA(rk+1,rk+1), ldda, &dvn1[rk+1], &dvn2[rk+1], dlsticc, dlsticcs); } magma_free(dlsticcs); return MAGMA_SUCCESS; } /* magma_zlaqps */
f8b8e5b6a576533cf0545cedbe145f7eaa869197.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/optimizers/lars_momentum_op.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/phi/kernels/funcs/aligned_vector.h" #include "paddle/phi/kernels/funcs/math_cuda_utils.h" #if TORCH_HIP_VERSION >= 11000 #include <hip/hip_cooperative_groups.h> #endif #ifdef __HIPCC__ #define LARS_BLOCK_SIZE 256 #else #define LARS_BLOCK_SIZE 512 #endif #define LARS_MAX_MERGED_OPS 60 namespace paddle { namespace operators { template <typename T> using MultiPrecisionType = typename details::MPTypeTrait<T>::Type; __device__ __forceinline__ float Sqrt(float x) { return sqrtf(x); } __device__ __forceinline__ double Sqrt(double x) { return sqrt(x); } __device__ __forceinline__ float Fma(float x, float y, float z) { return fmaf(x, y, z); } __device__ __forceinline__ double Fma(double x, double y, double z) { return fma(x, y, z); } template <typename T> class LarsThreadConfig { public: int grid_for_norm; int grid_for_lars; #if TORCH_HIP_VERSION >= 11000 private: int grid_stride; public: explicit LarsThreadConfig(int64_t numel, int sm_num, int num_blocks_per_sm) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_lars = ::min(::min(sm_num * num_blocks_per_sm, grid), LARS_BLOCK_SIZE); grid_stride = LARS_BLOCK_SIZE * grid_for_lars; } int GetRepeatTimes(int64_t numel) { return (numel + grid_stride - 1) / grid_stride - 1; } #else int repeat_times; explicit LarsThreadConfig(const int64_t numel) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_norm = ::min(grid, LARS_BLOCK_SIZE); const int grid_stride = grid_for_norm * LARS_BLOCK_SIZE; repeat_times = (numel + grid_stride - 1) / grid_stride - 1; // Determine to read 4 fp16 or float data once, but 2 double data once. grid_for_lars = std::is_same<double, T>::value ? (numel + (LARS_BLOCK_SIZE << 1) - 1) / (LARS_BLOCK_SIZE << 1) : (numel + (LARS_BLOCK_SIZE << 2) - 1) / (LARS_BLOCK_SIZE << 2); } #endif }; template <typename T, typename MT, int VecSize, bool IsAmp = false> __device__ inline void VectorizeLarsUpdate(const T* __restrict__ grad, const MT* param, const MT* velocity, T* param_out, MT* velocity_out, const MT mu, MT local_lr, const MT lars_weight_decay, const MT rescale_grad, const int tid, const int grid_stride, const int numel, MT* master_param_out = nullptr) { using VecType = phi::AlignedVector<T, VecSize>; using VecMType = phi::AlignedVector<MT, VecSize>; int main = numel >> (VecSize >> 1); int tail_offset = main * VecSize; const VecType* grad_vec = reinterpret_cast<const VecType*>(grad); const VecMType* param_vec = reinterpret_cast<const VecMType*>(param); const VecMType* velocity_vec = reinterpret_cast<const VecMType*>(velocity); VecType* param_out_vec = reinterpret_cast<VecType*>(param_out); VecMType* velocity_out_vec = reinterpret_cast<VecMType*>(velocity_out); VecMType* master_param_out_vec; if (IsAmp) { master_param_out_vec = reinterpret_cast<VecMType*>(master_param_out); } for (int i = tid; i < main; i += grid_stride) { VecType param_out_tmp; VecMType velocity_tmp, param_tmp; VecType grad_data = grad_vec[i]; VecMType param_data = param_vec[i]; VecMType velocity_data = velocity_vec[i]; #pragma unroll for (int j = 0; j < VecSize; ++j) { MT grad_val = static_cast<MT>(grad_data[j]) * rescale_grad; velocity_tmp[j] = Fma(velocity_data[j], mu, local_lr * Fma(lars_weight_decay, param_data[j], grad_val)); param_tmp[j] = param_data[j] - velocity_tmp[j]; param_out_tmp[j] = static_cast<T>(param_tmp[j]); } param_out_vec[i] = param_out_tmp; velocity_out_vec[i] = velocity_tmp; if (IsAmp) { master_param_out_vec[i] = param_tmp; } } for (int i = tid + tail_offset; i < numel; i += grid_stride) { MT grad_val = static_cast<MT>(grad[i]) * rescale_grad; MT param_val = param[i]; MT velocity_tmp = Fma(velocity[i], mu, local_lr * Fma(lars_weight_decay, param_val, grad_val)); MT param_tmp = param_val - velocity_tmp; param_out[i] = static_cast<T>(param_tmp); velocity_out[i] = velocity_tmp; if (IsAmp) { master_param_out[i] = param_tmp; } } } #if TORCH_HIP_VERSION >= 11000 /* Once TORCH_HIP_VERSION is beyond 11, cooperative_groups can be involved in without --rdc=true compile flag, then L2_norm kernel can be set with __device__ and cooperative_groups::grid_group also can be involved. Otherwise, adding this flag may affect much, L2_norm kernel shall be set with __global__.*/ // TODO(limingshu): declaration of cooperative_groups wapper is invalid in host. template <typename T, typename MT> __forceinline__ __device__ void L2NormKernel( const cooperative_groups::grid_group* cg, #else template <typename T, typename MT> __global__ void L2NormKernel( #endif const T* p_data, const T* __restrict__ g_data, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int64_t numel, const int repeat_times, const MT rescale_grad, const int thresh = 0, MT* __restrict__ p_n = nullptr, MT* __restrict__ g_n = nullptr) { __shared__ MT s_buffer[2]; int tid = threadIdx.x + blockDim.x * blockIdx.x; int grid_stride = LARS_BLOCK_SIZE * gridDim.x; MT p_tmp = static_cast<MT>(0); MT g_tmp = static_cast<MT>(0); while (tid < numel) { MT tmp0 = static_cast<MT>(p_data[tid]); MT tmp1 = static_cast<MT>(g_data[tid]); p_tmp += (tmp0 * tmp0); g_tmp += (tmp1 * tmp1); tid += grid_stride; } p_tmp = phi::funcs::BlockReduceSum<MT>(p_tmp, FINAL_MASK); g_tmp = phi::funcs::BlockReduceSum<MT>(g_tmp, FINAL_MASK); if (threadIdx.x == 0) { p_buffer[blockIdx.x] = p_tmp; g_buffer[blockIdx.x] = g_tmp; } #if TORCH_HIP_VERSION >= 11000 cg->sync(); // Grid sync for writring partial result to gloabl memory MT p_part_sum = threadIdx.x < gridDim.x ? p_buffer[threadIdx.x] : 0; MT g_part_sum = threadIdx.x < gridDim.x ? g_buffer[threadIdx.x] : 0; MT tmp0 = phi::funcs::BlockReduceSum<MT>(p_part_sum, FINAL_MASK); MT tmp1 = phi::funcs::BlockReduceSum<MT>(g_part_sum, FINAL_MASK); if (threadIdx.x == 0) { s_buffer[0] = tmp0; s_buffer[1] = tmp1; } __syncthreads(); *p_n = Sqrt(s_buffer[0]); *g_n = rescale_grad * Sqrt(s_buffer[1]); #endif } template <typename T, typename MT> __forceinline__ __device__ void MomentumUpdate( const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, const MT mu, const MT lars_weight_decay, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const MT param_norm, const MT grad_norm, const int tid, const int grid_stride, const int64_t numel, const bool is_amp) { const MT lr = learning_rate[0]; MT local_lr = lr; if (param_norm > static_cast<MT>(0) && grad_norm > static_cast<MT>(0)) { local_lr = lr * lars_coeff * param_norm / (fma(lars_weight_decay, param_norm, grad_norm) + epsilon); } if (is_amp) { VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/true>(grad, master_param, velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel, master_param_out); } else { if (std::is_same<T, float>::value || std::is_same<T, paddle::platform::float16>::value) { /* TODO(limingshu): pointer cast may damage memory accessing for fp16 */ VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } else { VectorizeLarsUpdate<T, MT, /*VecSize=*/2, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } } } #if TORCH_HIP_VERSION >= 11000 template <typename T, typename MT> struct LarsParamWarpper { int64_t numel_arr[LARS_MAX_MERGED_OPS]; int repeat_arr[LARS_MAX_MERGED_OPS]; const T* __restrict__ g_arr[LARS_MAX_MERGED_OPS]; const MT* __restrict__ lr_arr[LARS_MAX_MERGED_OPS]; T* __restrict__ p_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ v_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ master_p_out_arr[LARS_MAX_MERGED_OPS]; MT weight_decay_arr[LARS_MAX_MERGED_OPS]; }; template <typename T, typename MT> __global__ void MergedMomentumLarsKernel(LarsParamWarpper<T, MT> lars_warpper, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int op_num, const MT mu, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const bool is_amp) { int grid_stride = gridDim.x * LARS_BLOCK_SIZE; int tid = threadIdx.x + blockIdx.x * blockDim.x; const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); for (int i = 0; i < op_num; ++i) { int numel = lars_warpper.numel_arr[i]; MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], p_buffer, g_buffer, numel, lars_warpper.repeat_arr[i], rescale_grad, 0, &param_norm, &grad_norm); MomentumUpdate<T, MT>(lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], lars_warpper.v_out_arr[i], lars_warpper.p_out_arr[i], lars_warpper.v_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.lr_arr[i], mu, lars_warpper.weight_decay_arr[i], lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } } #endif template <typename T, typename MT> __global__ void MomentumLarsKernel(const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const MT mu, const MT lars_coeff, const MT lars_weight_decay, const MT epsilon, const MT rescale_grad, const int repeat_times, const int thresh, const int64_t numel, const bool is_amp) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int grid_stride = gridDim.x * LARS_BLOCK_SIZE; #if TORCH_HIP_VERSION >= 11000 const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, param, grad, p_buffer, g_buffer, numel, repeat_times, rescale_grad, gridDim.x, &param_norm, &grad_norm); #else const MT rescale_grad_pow = rescale_grad * rescale_grad; MT param_part_norm = threadIdx.x < thresh ? p_buffer[threadIdx.x] : 0; MT grad_part_norm = threadIdx.x < thresh ? g_buffer[threadIdx.x] : 0; __syncthreads(); MT param_norm = Sqrt(phi::funcs::BlockReduceSum<MT>(param_part_norm, FINAL_MASK)); MT grad_norm = Sqrt(rescale_grad_pow * phi::funcs::BlockReduceSum<MT>( grad_part_norm, FINAL_MASK)); #endif MomentumUpdate<T, MT>(param, grad, velocity, param_out, velocity_out, master_param, master_param_out, learning_rate, mu, lars_weight_decay, lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } template <typename T, typename MT> inline void SeparatedLarsMomentumOpCUDAKernel(const phi::GPUContext& cuda_ctx, const T* param_data, T* param_out_data, const MT* velocity_data, MT* velocity_out_data, const T* grad_data, const MT* lr, MT* p_buffer, MT* g_buffer, const MT mu, const MT lars_coeff, const MT weight_decay, const MT epsilon, const MT rescale_grad, const int64_t numel, const MT* master_param_data, MT* master_out_data, const bool is_amp) { LarsThreadConfig<T> lars_thread_config(numel); hipLaunchKernelGGL(( L2NormKernel<T, MT>), dim3(lars_thread_config.grid_for_norm), dim3(LARS_BLOCK_SIZE), 0, cuda_ctx.stream(), param_data, grad_data, p_buffer, g_buffer, numel, lars_thread_config.repeat_times, rescale_grad); hipLaunchKernelGGL(( MomentumLarsKernel<T, MT>) , dim3(lars_thread_config.grid_for_lars), dim3(LARS_BLOCK_SIZE), 0, cuda_ctx.stream(), param_data, grad_data, velocity_data, param_out_data, velocity_out_data, master_param_data, master_out_data, lr, p_buffer, g_buffer, mu, lars_coeff, weight_decay, epsilon, rescale_grad, 0, lars_thread_config.grid_for_norm, numel, is_amp); } template <typename T, typename DeviceContext> class LarsMomentumOpCUDAKernel : public framework::OpKernel<T> { using MT = MultiPrecisionType<T>; public: void Compute(const framework::ExecutionContext& ctx) const override { int num_blocks_per_sm = 0; bool multi_precision = ctx.Attr<bool>("multi_precision"); auto& cuda_ctx = ctx.template device_context<phi::GPUContext>(); int sm_num = cuda_ctx.GetSMCount(); phi::DenseTensor tmp_buffer_t = ctx.AllocateTmpTensor<MT, phi::GPUContext>( {LARS_BLOCK_SIZE << 1}, cuda_ctx); auto* p_buffer = tmp_buffer_t.mutable_data<MT>(ctx.GetPlace()); auto* g_buffer = p_buffer + LARS_BLOCK_SIZE; MT mu = static_cast<MT>(ctx.Attr<float>("mu")); MT lars_coeff = static_cast<MT>(ctx.Attr<float>("lars_coeff")); MT epsilon = static_cast<MT>(ctx.Attr<float>("epsilon")); MT rescale_grad = static_cast<MT>(ctx.Attr<float>("rescale_grad")); auto weight_decay_arr = ctx.Attr<std::vector<float>>("lars_weight_decay"); auto grad = ctx.MultiInput<phi::DenseTensor>("Grad"); auto param = ctx.MultiInput<phi::DenseTensor>("Param"); auto velocity = ctx.MultiInput<phi::DenseTensor>("Velocity"); auto param_out = ctx.MultiOutput<phi::DenseTensor>("ParamOut"); auto velocity_out = ctx.MultiOutput<phi::DenseTensor>("VelocityOut"); auto learning_rate = ctx.MultiInput<phi::DenseTensor>("LearningRate"); auto master_param = ctx.MultiInput<phi::DenseTensor>("MasterParam"); auto master_param_out = ctx.MultiOutput<phi::DenseTensor>("MasterParamOut"); int op_num = grad.size(); #if TORCH_HIP_VERSION >= 11000 if (op_num > 1) { LarsParamWarpper<T, MT> lars_warpper; PADDLE_ENFORCE_LT( op_num, LARS_MAX_MERGED_OPS, platform::errors::InvalidArgument( "The maximum number of merged-ops supported is (%d), but" "lars op required for trainning this model is (%d)\n", LARS_MAX_MERGED_OPS, op_num)); /* Implementation of lars optimizer consists of following two steps: 1. Figure out the L2 norm statistic result of grad data and param data. 2. Update param and velocity with usage of L2 norm statistic result. Step1 and step2 can be merged with api provided by nvida hipLaunchCooperativeKernel: - The thread quantity shall less than pyhsical SM limited threads - Launche as thread-block can synchronizlly execute. */ hipOccupancyMaxActiveBlocksPerMultiprocessor( &num_blocks_per_sm, MergedMomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); size_t total_numel = 0; for (int i = 0; i < op_num; ++i) { size_t temp_numel = param[i]->numel(); total_numel += temp_numel; lars_warpper.numel_arr[i] = temp_numel; lars_warpper.g_arr[i] = grad[i]->data<T>(); lars_warpper.lr_arr[i] = learning_rate[i]->data<MT>(); lars_warpper.p_out_arr[i] = param_out[i]->mutable_data<T>(ctx.GetPlace()); lars_warpper.v_out_arr[i] = velocity_out[i]->mutable_data<MT>(ctx.GetPlace()); lars_warpper.weight_decay_arr[i] = static_cast<MT>(weight_decay_arr[i]); PADDLE_ENFORCE_EQ( param[i]->data<T>(), lars_warpper.p_out_arr[i], platform::errors::InvalidArgument( "Input(Param) and Output(ParamOut) must be the same Tensors.")); PADDLE_ENFORCE_EQ(velocity[i]->data<MT>(), lars_warpper.v_out_arr[i], platform::errors::InvalidArgument( "Input(Velocity) and Output(VelocityOut) must be " "the same Tensors.")); } int64_t avg_numel = total_numel / op_num; LarsThreadConfig<float> lars_thread_config( avg_numel, sm_num, num_blocks_per_sm); for (int i = 0; i < op_num; ++i) { lars_warpper.repeat_arr[i] = lars_thread_config.GetRepeatTimes(lars_warpper.numel_arr[i]); } if (multi_precision) { for (int i = 0; i < op_num; ++i) { lars_warpper.master_p_out_arr[i] = master_param_out[i]->mutable_data<MT>(ctx.GetPlace()); PADDLE_ENFORCE_EQ(master_param[i]->data<MT>(), lars_warpper.master_p_out_arr[i], platform::errors::InvalidArgument( "Input(MasterParam) and Output(MasterParamOut) " "must be the same Tensors.")); } } void* cuda_param[] = {reinterpret_cast<void*>(&lars_warpper), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&op_num), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads, and thead of each block synchronizedly cooperate. hipLaunchCooperativeKernel( reinterpret_cast<void*>(MergedMomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } else { auto* param_data = param[0]->data<T>(); auto* grad_data = grad[0]->data<T>(); auto* velocity_data = velocity[0]->data<MT>(); auto* lr = learning_rate[0]->data<MT>(); auto* param_out_data = param_out[0]->mutable_data<T>(ctx.GetPlace()); auto* velocity_out_data = velocity_out[0]->mutable_data<MT>(ctx.GetPlace()); const MT* master_param_data = multi_precision ? master_param[0]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[0]->mutable_data<MT>(ctx.GetPlace()) : nullptr; int64_t numel = param[0]->numel(); MT lars_weight_decay = weight_decay_arr[0]; // Figure out how many blocks can be active in each sm. hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm, MomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); LarsThreadConfig<float> lars_thread_config( numel, sm_num, num_blocks_per_sm); int repeat_times = lars_thread_config.GetRepeatTimes(numel); int thresh = 0; void* cuda_param[] = { reinterpret_cast<void*>(&param_data), reinterpret_cast<void*>(&grad_data), reinterpret_cast<void*>(&velocity_data), reinterpret_cast<void*>(&param_out_data), reinterpret_cast<void*>(&velocity_out_data), reinterpret_cast<void*>(&master_param_data), reinterpret_cast<void*>(&master_param_out_data), reinterpret_cast<void*>(&lr), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&lars_weight_decay), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&repeat_times), reinterpret_cast<void*>(&thresh), // Just a placeholder reinterpret_cast<void*>(&numel), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads. hipLaunchCooperativeKernel( reinterpret_cast<void*>(MomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } #else for (int i = 0; i < op_num; ++i) { const MT* master_param_data = multi_precision ? master_param[i]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[i]->mutable_data<MT>(ctx.GetPlace()) : nullptr; SeparatedLarsMomentumOpCUDAKernel<T, MT>( cuda_ctx, param[i]->data<T>(), param_out[i]->mutable_data<T>(ctx.GetPlace()), velocity[i]->data<MT>(), velocity_out[i]->mutable_data<MT>(ctx.GetPlace()), grad[i]->data<T>(), learning_rate[i]->data<MT>(), p_buffer, g_buffer, mu, lars_coeff, weight_decay_arr[i], epsilon, rescale_grad, param[i]->numel(), master_param_data, master_param_out_data, multi_precision); } #endif } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; PD_REGISTER_STRUCT_KERNEL(lars_momentum, GPU, ALL_LAYOUT, ops::LarsMomentumOpCUDAKernel, float, double, plat::float16) {}
f8b8e5b6a576533cf0545cedbe145f7eaa869197.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/optimizers/lars_momentum_op.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/phi/kernels/funcs/aligned_vector.h" #include "paddle/phi/kernels/funcs/math_cuda_utils.h" #if CUDA_VERSION >= 11000 #include <cooperative_groups.h> #endif #ifdef __HIPCC__ #define LARS_BLOCK_SIZE 256 #else #define LARS_BLOCK_SIZE 512 #endif #define LARS_MAX_MERGED_OPS 60 namespace paddle { namespace operators { template <typename T> using MultiPrecisionType = typename details::MPTypeTrait<T>::Type; __device__ __forceinline__ float Sqrt(float x) { return sqrtf(x); } __device__ __forceinline__ double Sqrt(double x) { return sqrt(x); } __device__ __forceinline__ float Fma(float x, float y, float z) { return fmaf(x, y, z); } __device__ __forceinline__ double Fma(double x, double y, double z) { return fma(x, y, z); } template <typename T> class LarsThreadConfig { public: int grid_for_norm; int grid_for_lars; #if CUDA_VERSION >= 11000 private: int grid_stride; public: explicit LarsThreadConfig(int64_t numel, int sm_num, int num_blocks_per_sm) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_lars = std::min(std::min(sm_num * num_blocks_per_sm, grid), LARS_BLOCK_SIZE); grid_stride = LARS_BLOCK_SIZE * grid_for_lars; } int GetRepeatTimes(int64_t numel) { return (numel + grid_stride - 1) / grid_stride - 1; } #else int repeat_times; explicit LarsThreadConfig(const int64_t numel) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_norm = std::min(grid, LARS_BLOCK_SIZE); const int grid_stride = grid_for_norm * LARS_BLOCK_SIZE; repeat_times = (numel + grid_stride - 1) / grid_stride - 1; // Determine to read 4 fp16 or float data once, but 2 double data once. grid_for_lars = std::is_same<double, T>::value ? (numel + (LARS_BLOCK_SIZE << 1) - 1) / (LARS_BLOCK_SIZE << 1) : (numel + (LARS_BLOCK_SIZE << 2) - 1) / (LARS_BLOCK_SIZE << 2); } #endif }; template <typename T, typename MT, int VecSize, bool IsAmp = false> __device__ inline void VectorizeLarsUpdate(const T* __restrict__ grad, const MT* param, const MT* velocity, T* param_out, MT* velocity_out, const MT mu, MT local_lr, const MT lars_weight_decay, const MT rescale_grad, const int tid, const int grid_stride, const int numel, MT* master_param_out = nullptr) { using VecType = phi::AlignedVector<T, VecSize>; using VecMType = phi::AlignedVector<MT, VecSize>; int main = numel >> (VecSize >> 1); int tail_offset = main * VecSize; const VecType* grad_vec = reinterpret_cast<const VecType*>(grad); const VecMType* param_vec = reinterpret_cast<const VecMType*>(param); const VecMType* velocity_vec = reinterpret_cast<const VecMType*>(velocity); VecType* param_out_vec = reinterpret_cast<VecType*>(param_out); VecMType* velocity_out_vec = reinterpret_cast<VecMType*>(velocity_out); VecMType* master_param_out_vec; if (IsAmp) { master_param_out_vec = reinterpret_cast<VecMType*>(master_param_out); } for (int i = tid; i < main; i += grid_stride) { VecType param_out_tmp; VecMType velocity_tmp, param_tmp; VecType grad_data = grad_vec[i]; VecMType param_data = param_vec[i]; VecMType velocity_data = velocity_vec[i]; #pragma unroll for (int j = 0; j < VecSize; ++j) { MT grad_val = static_cast<MT>(grad_data[j]) * rescale_grad; velocity_tmp[j] = Fma(velocity_data[j], mu, local_lr * Fma(lars_weight_decay, param_data[j], grad_val)); param_tmp[j] = param_data[j] - velocity_tmp[j]; param_out_tmp[j] = static_cast<T>(param_tmp[j]); } param_out_vec[i] = param_out_tmp; velocity_out_vec[i] = velocity_tmp; if (IsAmp) { master_param_out_vec[i] = param_tmp; } } for (int i = tid + tail_offset; i < numel; i += grid_stride) { MT grad_val = static_cast<MT>(grad[i]) * rescale_grad; MT param_val = param[i]; MT velocity_tmp = Fma(velocity[i], mu, local_lr * Fma(lars_weight_decay, param_val, grad_val)); MT param_tmp = param_val - velocity_tmp; param_out[i] = static_cast<T>(param_tmp); velocity_out[i] = velocity_tmp; if (IsAmp) { master_param_out[i] = param_tmp; } } } #if CUDA_VERSION >= 11000 /* Once CUDA_VERSION is beyond 11, cooperative_groups can be involved in without --rdc=true compile flag, then L2_norm kernel can be set with __device__ and cooperative_groups::grid_group also can be involved. Otherwise, adding this flag may affect much, L2_norm kernel shall be set with __global__.*/ // TODO(limingshu): declaration of cooperative_groups wapper is invalid in host. template <typename T, typename MT> __forceinline__ __device__ void L2NormKernel( const cooperative_groups::grid_group* cg, #else template <typename T, typename MT> __global__ void L2NormKernel( #endif const T* p_data, const T* __restrict__ g_data, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int64_t numel, const int repeat_times, const MT rescale_grad, const int thresh = 0, MT* __restrict__ p_n = nullptr, MT* __restrict__ g_n = nullptr) { __shared__ MT s_buffer[2]; int tid = threadIdx.x + blockDim.x * blockIdx.x; int grid_stride = LARS_BLOCK_SIZE * gridDim.x; MT p_tmp = static_cast<MT>(0); MT g_tmp = static_cast<MT>(0); while (tid < numel) { MT tmp0 = static_cast<MT>(p_data[tid]); MT tmp1 = static_cast<MT>(g_data[tid]); p_tmp += (tmp0 * tmp0); g_tmp += (tmp1 * tmp1); tid += grid_stride; } p_tmp = phi::funcs::BlockReduceSum<MT>(p_tmp, FINAL_MASK); g_tmp = phi::funcs::BlockReduceSum<MT>(g_tmp, FINAL_MASK); if (threadIdx.x == 0) { p_buffer[blockIdx.x] = p_tmp; g_buffer[blockIdx.x] = g_tmp; } #if CUDA_VERSION >= 11000 cg->sync(); // Grid sync for writring partial result to gloabl memory MT p_part_sum = threadIdx.x < gridDim.x ? p_buffer[threadIdx.x] : 0; MT g_part_sum = threadIdx.x < gridDim.x ? g_buffer[threadIdx.x] : 0; MT tmp0 = phi::funcs::BlockReduceSum<MT>(p_part_sum, FINAL_MASK); MT tmp1 = phi::funcs::BlockReduceSum<MT>(g_part_sum, FINAL_MASK); if (threadIdx.x == 0) { s_buffer[0] = tmp0; s_buffer[1] = tmp1; } __syncthreads(); *p_n = Sqrt(s_buffer[0]); *g_n = rescale_grad * Sqrt(s_buffer[1]); #endif } template <typename T, typename MT> __forceinline__ __device__ void MomentumUpdate( const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, const MT mu, const MT lars_weight_decay, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const MT param_norm, const MT grad_norm, const int tid, const int grid_stride, const int64_t numel, const bool is_amp) { const MT lr = learning_rate[0]; MT local_lr = lr; if (param_norm > static_cast<MT>(0) && grad_norm > static_cast<MT>(0)) { local_lr = lr * lars_coeff * param_norm / (fma(lars_weight_decay, param_norm, grad_norm) + epsilon); } if (is_amp) { VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/true>(grad, master_param, velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel, master_param_out); } else { if (std::is_same<T, float>::value || std::is_same<T, paddle::platform::float16>::value) { /* TODO(limingshu): pointer cast may damage memory accessing for fp16 */ VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } else { VectorizeLarsUpdate<T, MT, /*VecSize=*/2, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } } } #if CUDA_VERSION >= 11000 template <typename T, typename MT> struct LarsParamWarpper { int64_t numel_arr[LARS_MAX_MERGED_OPS]; int repeat_arr[LARS_MAX_MERGED_OPS]; const T* __restrict__ g_arr[LARS_MAX_MERGED_OPS]; const MT* __restrict__ lr_arr[LARS_MAX_MERGED_OPS]; T* __restrict__ p_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ v_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ master_p_out_arr[LARS_MAX_MERGED_OPS]; MT weight_decay_arr[LARS_MAX_MERGED_OPS]; }; template <typename T, typename MT> __global__ void MergedMomentumLarsKernel(LarsParamWarpper<T, MT> lars_warpper, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int op_num, const MT mu, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const bool is_amp) { int grid_stride = gridDim.x * LARS_BLOCK_SIZE; int tid = threadIdx.x + blockIdx.x * blockDim.x; const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); for (int i = 0; i < op_num; ++i) { int numel = lars_warpper.numel_arr[i]; MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], p_buffer, g_buffer, numel, lars_warpper.repeat_arr[i], rescale_grad, 0, &param_norm, &grad_norm); MomentumUpdate<T, MT>(lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], lars_warpper.v_out_arr[i], lars_warpper.p_out_arr[i], lars_warpper.v_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.lr_arr[i], mu, lars_warpper.weight_decay_arr[i], lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } } #endif template <typename T, typename MT> __global__ void MomentumLarsKernel(const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const MT mu, const MT lars_coeff, const MT lars_weight_decay, const MT epsilon, const MT rescale_grad, const int repeat_times, const int thresh, const int64_t numel, const bool is_amp) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int grid_stride = gridDim.x * LARS_BLOCK_SIZE; #if CUDA_VERSION >= 11000 const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, param, grad, p_buffer, g_buffer, numel, repeat_times, rescale_grad, gridDim.x, &param_norm, &grad_norm); #else const MT rescale_grad_pow = rescale_grad * rescale_grad; MT param_part_norm = threadIdx.x < thresh ? p_buffer[threadIdx.x] : 0; MT grad_part_norm = threadIdx.x < thresh ? g_buffer[threadIdx.x] : 0; __syncthreads(); MT param_norm = Sqrt(phi::funcs::BlockReduceSum<MT>(param_part_norm, FINAL_MASK)); MT grad_norm = Sqrt(rescale_grad_pow * phi::funcs::BlockReduceSum<MT>( grad_part_norm, FINAL_MASK)); #endif MomentumUpdate<T, MT>(param, grad, velocity, param_out, velocity_out, master_param, master_param_out, learning_rate, mu, lars_weight_decay, lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } template <typename T, typename MT> inline void SeparatedLarsMomentumOpCUDAKernel(const phi::GPUContext& cuda_ctx, const T* param_data, T* param_out_data, const MT* velocity_data, MT* velocity_out_data, const T* grad_data, const MT* lr, MT* p_buffer, MT* g_buffer, const MT mu, const MT lars_coeff, const MT weight_decay, const MT epsilon, const MT rescale_grad, const int64_t numel, const MT* master_param_data, MT* master_out_data, const bool is_amp) { LarsThreadConfig<T> lars_thread_config(numel); L2NormKernel<T, MT><<<lars_thread_config.grid_for_norm, LARS_BLOCK_SIZE, 0, cuda_ctx.stream()>>>(param_data, grad_data, p_buffer, g_buffer, numel, lars_thread_config.repeat_times, rescale_grad); MomentumLarsKernel<T, MT> <<<lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, 0, cuda_ctx.stream()>>>(param_data, grad_data, velocity_data, param_out_data, velocity_out_data, master_param_data, master_out_data, lr, p_buffer, g_buffer, mu, lars_coeff, weight_decay, epsilon, rescale_grad, 0, lars_thread_config.grid_for_norm, numel, is_amp); } template <typename T, typename DeviceContext> class LarsMomentumOpCUDAKernel : public framework::OpKernel<T> { using MT = MultiPrecisionType<T>; public: void Compute(const framework::ExecutionContext& ctx) const override { int num_blocks_per_sm = 0; bool multi_precision = ctx.Attr<bool>("multi_precision"); auto& cuda_ctx = ctx.template device_context<phi::GPUContext>(); int sm_num = cuda_ctx.GetSMCount(); phi::DenseTensor tmp_buffer_t = ctx.AllocateTmpTensor<MT, phi::GPUContext>( {LARS_BLOCK_SIZE << 1}, cuda_ctx); auto* p_buffer = tmp_buffer_t.mutable_data<MT>(ctx.GetPlace()); auto* g_buffer = p_buffer + LARS_BLOCK_SIZE; MT mu = static_cast<MT>(ctx.Attr<float>("mu")); MT lars_coeff = static_cast<MT>(ctx.Attr<float>("lars_coeff")); MT epsilon = static_cast<MT>(ctx.Attr<float>("epsilon")); MT rescale_grad = static_cast<MT>(ctx.Attr<float>("rescale_grad")); auto weight_decay_arr = ctx.Attr<std::vector<float>>("lars_weight_decay"); auto grad = ctx.MultiInput<phi::DenseTensor>("Grad"); auto param = ctx.MultiInput<phi::DenseTensor>("Param"); auto velocity = ctx.MultiInput<phi::DenseTensor>("Velocity"); auto param_out = ctx.MultiOutput<phi::DenseTensor>("ParamOut"); auto velocity_out = ctx.MultiOutput<phi::DenseTensor>("VelocityOut"); auto learning_rate = ctx.MultiInput<phi::DenseTensor>("LearningRate"); auto master_param = ctx.MultiInput<phi::DenseTensor>("MasterParam"); auto master_param_out = ctx.MultiOutput<phi::DenseTensor>("MasterParamOut"); int op_num = grad.size(); #if CUDA_VERSION >= 11000 if (op_num > 1) { LarsParamWarpper<T, MT> lars_warpper; PADDLE_ENFORCE_LT( op_num, LARS_MAX_MERGED_OPS, platform::errors::InvalidArgument( "The maximum number of merged-ops supported is (%d), but" "lars op required for trainning this model is (%d)\n", LARS_MAX_MERGED_OPS, op_num)); /* Implementation of lars optimizer consists of following two steps: 1. Figure out the L2 norm statistic result of grad data and param data. 2. Update param and velocity with usage of L2 norm statistic result. Step1 and step2 can be merged with api provided by nvida cudaLaunchCooperativeKernel: - The thread quantity shall less than pyhsical SM limited threads - Launche as thread-block can synchronizlly execute. */ cudaOccupancyMaxActiveBlocksPerMultiprocessor( &num_blocks_per_sm, MergedMomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); size_t total_numel = 0; for (int i = 0; i < op_num; ++i) { size_t temp_numel = param[i]->numel(); total_numel += temp_numel; lars_warpper.numel_arr[i] = temp_numel; lars_warpper.g_arr[i] = grad[i]->data<T>(); lars_warpper.lr_arr[i] = learning_rate[i]->data<MT>(); lars_warpper.p_out_arr[i] = param_out[i]->mutable_data<T>(ctx.GetPlace()); lars_warpper.v_out_arr[i] = velocity_out[i]->mutable_data<MT>(ctx.GetPlace()); lars_warpper.weight_decay_arr[i] = static_cast<MT>(weight_decay_arr[i]); PADDLE_ENFORCE_EQ( param[i]->data<T>(), lars_warpper.p_out_arr[i], platform::errors::InvalidArgument( "Input(Param) and Output(ParamOut) must be the same Tensors.")); PADDLE_ENFORCE_EQ(velocity[i]->data<MT>(), lars_warpper.v_out_arr[i], platform::errors::InvalidArgument( "Input(Velocity) and Output(VelocityOut) must be " "the same Tensors.")); } int64_t avg_numel = total_numel / op_num; LarsThreadConfig<float> lars_thread_config( avg_numel, sm_num, num_blocks_per_sm); for (int i = 0; i < op_num; ++i) { lars_warpper.repeat_arr[i] = lars_thread_config.GetRepeatTimes(lars_warpper.numel_arr[i]); } if (multi_precision) { for (int i = 0; i < op_num; ++i) { lars_warpper.master_p_out_arr[i] = master_param_out[i]->mutable_data<MT>(ctx.GetPlace()); PADDLE_ENFORCE_EQ(master_param[i]->data<MT>(), lars_warpper.master_p_out_arr[i], platform::errors::InvalidArgument( "Input(MasterParam) and Output(MasterParamOut) " "must be the same Tensors.")); } } void* cuda_param[] = {reinterpret_cast<void*>(&lars_warpper), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&op_num), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads, and thead of each block synchronizedly cooperate. cudaLaunchCooperativeKernel( reinterpret_cast<void*>(MergedMomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } else { auto* param_data = param[0]->data<T>(); auto* grad_data = grad[0]->data<T>(); auto* velocity_data = velocity[0]->data<MT>(); auto* lr = learning_rate[0]->data<MT>(); auto* param_out_data = param_out[0]->mutable_data<T>(ctx.GetPlace()); auto* velocity_out_data = velocity_out[0]->mutable_data<MT>(ctx.GetPlace()); const MT* master_param_data = multi_precision ? master_param[0]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[0]->mutable_data<MT>(ctx.GetPlace()) : nullptr; int64_t numel = param[0]->numel(); MT lars_weight_decay = weight_decay_arr[0]; // Figure out how many blocks can be active in each sm. cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm, MomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); LarsThreadConfig<float> lars_thread_config( numel, sm_num, num_blocks_per_sm); int repeat_times = lars_thread_config.GetRepeatTimes(numel); int thresh = 0; void* cuda_param[] = { reinterpret_cast<void*>(&param_data), reinterpret_cast<void*>(&grad_data), reinterpret_cast<void*>(&velocity_data), reinterpret_cast<void*>(&param_out_data), reinterpret_cast<void*>(&velocity_out_data), reinterpret_cast<void*>(&master_param_data), reinterpret_cast<void*>(&master_param_out_data), reinterpret_cast<void*>(&lr), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&lars_weight_decay), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&repeat_times), reinterpret_cast<void*>(&thresh), // Just a placeholder reinterpret_cast<void*>(&numel), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads. cudaLaunchCooperativeKernel( reinterpret_cast<void*>(MomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } #else for (int i = 0; i < op_num; ++i) { const MT* master_param_data = multi_precision ? master_param[i]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[i]->mutable_data<MT>(ctx.GetPlace()) : nullptr; SeparatedLarsMomentumOpCUDAKernel<T, MT>( cuda_ctx, param[i]->data<T>(), param_out[i]->mutable_data<T>(ctx.GetPlace()), velocity[i]->data<MT>(), velocity_out[i]->mutable_data<MT>(ctx.GetPlace()), grad[i]->data<T>(), learning_rate[i]->data<MT>(), p_buffer, g_buffer, mu, lars_coeff, weight_decay_arr[i], epsilon, rescale_grad, param[i]->numel(), master_param_data, master_param_out_data, multi_precision); } #endif } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; PD_REGISTER_STRUCT_KERNEL(lars_momentum, GPU, ALL_LAYOUT, ops::LarsMomentumOpCUDAKernel, float, double, plat::float16) {}
bd559b44ff2083272d0ccef62b58f75d4e14658e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <THHUNN/THHUNN.h> #include <THH/THHTensor.hpp> #include <THHUNN/common.h> #include <TH/THHalf.h> #include <THHUNN/THHHalfAutoNumerics.cuh> // Kernel for fast unfold+copy // Borrowed from Theano // Authors: Arjun Jain, Frdric Bastien, Jan Schlter, Nicolas Ballas template <typename Dtype> __global__ void __launch_bounds__(CUDA_NUM_THREADS) // ensure that at least 1 block can be resident im3d2col_kernel(const int64_t n, const Dtype* data_im, const int64_t height, const int64_t width, const int64_t depth, const int64_t kernel_h, const int64_t kernel_w, const int64_t kernel_d, const int64_t pad_h, const int64_t pad_w, const int64_t pad_d, const int64_t stride_h, const int64_t stride_w, const int64_t stride_d, const int64_t height_col, const int64_t width_col, const int64_t depth_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { int64_t d_out = index % depth_col; int64_t w_index = index / depth_col; int64_t w_out = w_index % width_col; int64_t h_index = w_index / width_col; int64_t h_out = h_index % height_col; int64_t channel_in = h_index / height_col; //channel_in = 1; int64_t channel_out = channel_in * kernel_h * kernel_w * kernel_d; int64_t h_in = h_out * stride_h - pad_h; int64_t w_in = w_out * stride_w - pad_w; int64_t d_in = d_out * stride_d - pad_d; Dtype* data_col_ptr = data_col; data_col_ptr += channel_out * (height_col * width_col * depth_col) + h_out * (width_col * depth_col) + w_out * depth_col + d_out; const Dtype* data_im_ptr = data_im; data_im_ptr += channel_in * (height * width * depth) + h_in * (width * depth) + w_in * depth + d_in; for (int64_t i = 0; i < kernel_h; ++i) { int64_t h = h_in + i; for (int64_t j = 0; j < kernel_w; ++j) { int64_t w = w_in + j; for (int64_t k = 0; k < kernel_d; ++k) { int64_t d = d_in + k; *data_col_ptr = (h >= 0 && w >= 0 && d >= 0 && h < height && w < width && d < depth) ? data_im_ptr[i * (width * depth) + j *depth + k] : ScalarConvert<int, Dtype>::to(0); data_col_ptr += height_col * width_col * depth_col; } } } } } template <typename Dtype> void im3d2col(hipStream_t stream, const Dtype* data_im, const int64_t channels, const int64_t height, const int64_t width, const int64_t depth, const int64_t kernel_h, const int64_t kernel_w, const int64_t kernel_d, const int64_t pad_h, const int64_t pad_w, const int64_t pad_d, const int64_t stride_h, const int64_t stride_w, const int64_t stride_d, Dtype* data_col) { // We are going to launch channels * height_col * width_col * depth_col kernels, each // kernel responsible for copying a single-channel grid. int64_t height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; int64_t width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; int64_t depth_col = (depth + 2 * pad_d - kernel_d) / stride_d + 1; int64_t num_kernels = channels * height_col * width_col * depth_col; hipLaunchKernelGGL(( im3d2col_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im, height, width, depth, kernel_h, kernel_w, kernel_d, pad_h, pad_w, pad_d, stride_h, stride_w, stride_d, height_col, width_col, depth_col, data_col); THCudaCheck(hipGetLastError()); } template <typename Dtype, typename Acctype> __global__ void __launch_bounds__(CUDA_NUM_THREADS) // ensure that at least 1 block can be resident col2im3d_kernel(const int64_t n, const Dtype* data_col, const int64_t height, const int64_t width, const int64_t depth, const int64_t channels, const int64_t patch_h, const int64_t patch_w, const int64_t patch_d, const int64_t pad_h, const int64_t pad_w, const int64_t pad_d, const int64_t stride_h, const int64_t stride_w, const int64_t stride_d, const int64_t height_col, const int64_t width_col, const int64_t depth_col, Dtype* data_im) { CUDA_KERNEL_LOOP(index, n) { Acctype val = 0; int64_t d = index % depth + pad_d; int64_t w_index = index / depth; int64_t w = w_index % width + pad_w; int64_t h_index = w_index / width; int64_t h = h_index % height + pad_h; int64_t c = h_index / height; // compute the start and end of the output int64_t d_col_start = (d < patch_d) ? 0 : (d - patch_d) / stride_d + 1; int64_t d_col_end = min(d / stride_d + 1, depth_col); int64_t w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; int64_t w_col_end = min(w / stride_w + 1, width_col); int64_t h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; int64_t h_col_end = min(h / stride_h + 1, height_col); int64_t offset = (c * patch_h * patch_w * patch_d + h * patch_w * patch_d + w * patch_d + d) * height_col * width_col * depth_col; int64_t coeff_h_col = (1 - stride_h * patch_w * patch_d * height_col) * width_col * depth_col; int64_t coeff_w_col = (1 - stride_w * patch_d * height_col * width_col) * depth_col; int64_t coeff_d_col = (1 - stride_d * height_col * width_col * depth_col); for (int64_t d_col = d_col_start; d_col < d_col_end; ++d_col) for (int64_t h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int64_t w_col = w_col_start; w_col < w_col_end; ++w_col) { val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col + d_col * coeff_d_col]; } } data_im[index] = ScalarConvert<Acctype, Dtype>::to(val); } } template <typename Dtype, typename Acctype> void col2im3d(hipStream_t stream, const Dtype* data_col, const int64_t channels, const int64_t height, const int64_t width, const int64_t depth, const int64_t patch_h, const int64_t patch_w, const int64_t patch_d, const int64_t pad_h, const int64_t pad_w, const int64_t pad_d, const int64_t stride_h, const int64_t stride_w, const int64_t stride_d, Dtype* data_im) { int64_t height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; int64_t width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; int64_t depth_col = (depth + 2 * pad_d - patch_d) / stride_d + 1; int64_t num_kernels = channels * height * width * depth; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. hipLaunchKernelGGL(( col2im3d_kernel<Dtype, Acctype>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col, height, width, depth, channels, patch_h, patch_w, patch_d, pad_h, pad_w, pad_d, stride_h, stride_w, stride_d, height_col, width_col, depth_col, data_im); THCudaCheck(hipGetLastError()); } #include <THHUNN/generic/VolumetricConvolution.hip> #include <THH/THHGenerateFloatTypes.h>
bd559b44ff2083272d0ccef62b58f75d4e14658e.cu
#include <THCUNN/THCUNN.h> #include <THC/THCTensor.hpp> #include <THCUNN/common.h> #include <TH/THHalf.h> #include <THCUNN/THCHalfAutoNumerics.cuh> // Kernel for fast unfold+copy // Borrowed from Theano // Authors: Arjun Jain, Frédéric Bastien, Jan Schlüter, Nicolas Ballas template <typename Dtype> __global__ void __launch_bounds__(CUDA_NUM_THREADS) // ensure that at least 1 block can be resident im3d2col_kernel(const int64_t n, const Dtype* data_im, const int64_t height, const int64_t width, const int64_t depth, const int64_t kernel_h, const int64_t kernel_w, const int64_t kernel_d, const int64_t pad_h, const int64_t pad_w, const int64_t pad_d, const int64_t stride_h, const int64_t stride_w, const int64_t stride_d, const int64_t height_col, const int64_t width_col, const int64_t depth_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { int64_t d_out = index % depth_col; int64_t w_index = index / depth_col; int64_t w_out = w_index % width_col; int64_t h_index = w_index / width_col; int64_t h_out = h_index % height_col; int64_t channel_in = h_index / height_col; //channel_in = 1; int64_t channel_out = channel_in * kernel_h * kernel_w * kernel_d; int64_t h_in = h_out * stride_h - pad_h; int64_t w_in = w_out * stride_w - pad_w; int64_t d_in = d_out * stride_d - pad_d; Dtype* data_col_ptr = data_col; data_col_ptr += channel_out * (height_col * width_col * depth_col) + h_out * (width_col * depth_col) + w_out * depth_col + d_out; const Dtype* data_im_ptr = data_im; data_im_ptr += channel_in * (height * width * depth) + h_in * (width * depth) + w_in * depth + d_in; for (int64_t i = 0; i < kernel_h; ++i) { int64_t h = h_in + i; for (int64_t j = 0; j < kernel_w; ++j) { int64_t w = w_in + j; for (int64_t k = 0; k < kernel_d; ++k) { int64_t d = d_in + k; *data_col_ptr = (h >= 0 && w >= 0 && d >= 0 && h < height && w < width && d < depth) ? data_im_ptr[i * (width * depth) + j *depth + k] : ScalarConvert<int, Dtype>::to(0); data_col_ptr += height_col * width_col * depth_col; } } } } } template <typename Dtype> void im3d2col(cudaStream_t stream, const Dtype* data_im, const int64_t channels, const int64_t height, const int64_t width, const int64_t depth, const int64_t kernel_h, const int64_t kernel_w, const int64_t kernel_d, const int64_t pad_h, const int64_t pad_w, const int64_t pad_d, const int64_t stride_h, const int64_t stride_w, const int64_t stride_d, Dtype* data_col) { // We are going to launch channels * height_col * width_col * depth_col kernels, each // kernel responsible for copying a single-channel grid. int64_t height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; int64_t width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; int64_t depth_col = (depth + 2 * pad_d - kernel_d) / stride_d + 1; int64_t num_kernels = channels * height_col * width_col * depth_col; im3d2col_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(num_kernels, data_im, height, width, depth, kernel_h, kernel_w, kernel_d, pad_h, pad_w, pad_d, stride_h, stride_w, stride_d, height_col, width_col, depth_col, data_col); THCudaCheck(cudaGetLastError()); } template <typename Dtype, typename Acctype> __global__ void __launch_bounds__(CUDA_NUM_THREADS) // ensure that at least 1 block can be resident col2im3d_kernel(const int64_t n, const Dtype* data_col, const int64_t height, const int64_t width, const int64_t depth, const int64_t channels, const int64_t patch_h, const int64_t patch_w, const int64_t patch_d, const int64_t pad_h, const int64_t pad_w, const int64_t pad_d, const int64_t stride_h, const int64_t stride_w, const int64_t stride_d, const int64_t height_col, const int64_t width_col, const int64_t depth_col, Dtype* data_im) { CUDA_KERNEL_LOOP(index, n) { Acctype val = 0; int64_t d = index % depth + pad_d; int64_t w_index = index / depth; int64_t w = w_index % width + pad_w; int64_t h_index = w_index / width; int64_t h = h_index % height + pad_h; int64_t c = h_index / height; // compute the start and end of the output int64_t d_col_start = (d < patch_d) ? 0 : (d - patch_d) / stride_d + 1; int64_t d_col_end = min(d / stride_d + 1, depth_col); int64_t w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; int64_t w_col_end = min(w / stride_w + 1, width_col); int64_t h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; int64_t h_col_end = min(h / stride_h + 1, height_col); int64_t offset = (c * patch_h * patch_w * patch_d + h * patch_w * patch_d + w * patch_d + d) * height_col * width_col * depth_col; int64_t coeff_h_col = (1 - stride_h * patch_w * patch_d * height_col) * width_col * depth_col; int64_t coeff_w_col = (1 - stride_w * patch_d * height_col * width_col) * depth_col; int64_t coeff_d_col = (1 - stride_d * height_col * width_col * depth_col); for (int64_t d_col = d_col_start; d_col < d_col_end; ++d_col) for (int64_t h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int64_t w_col = w_col_start; w_col < w_col_end; ++w_col) { val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col + d_col * coeff_d_col]; } } data_im[index] = ScalarConvert<Acctype, Dtype>::to(val); } } template <typename Dtype, typename Acctype> void col2im3d(cudaStream_t stream, const Dtype* data_col, const int64_t channels, const int64_t height, const int64_t width, const int64_t depth, const int64_t patch_h, const int64_t patch_w, const int64_t patch_d, const int64_t pad_h, const int64_t pad_w, const int64_t pad_d, const int64_t stride_h, const int64_t stride_w, const int64_t stride_d, Dtype* data_im) { int64_t height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; int64_t width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; int64_t depth_col = (depth + 2 * pad_d - patch_d) / stride_d + 1; int64_t num_kernels = channels * height * width * depth; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. col2im3d_kernel<Dtype, Acctype><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(num_kernels, data_col, height, width, depth, channels, patch_h, patch_w, patch_d, pad_h, pad_w, pad_d, stride_h, stride_w, stride_d, height_col, width_col, depth_col, data_im); THCudaCheck(cudaGetLastError()); } #include <THCUNN/generic/VolumetricConvolution.cu> #include <THC/THCGenerateFloatTypes.h>
bab46a331a0dc9bb2df8fbd73af1d212476eb03b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <LinearHeightField.cuh> #include <LinearHeightField_Kernel.cuh> /****************************************************************************************************************************/ /****************************************************************************************************************************/ extern "C" { /****************************************************************************************************************************/ /****************************************************************************************************************************/ void LinearHeightField_calculateHeight_CUDA(double* m_pos, double a, double b, uint nbPos) { int numThreadsX, numBlocksX; computeGridSize(nbPos,numBlocksX, numThreadsX); hipLaunchKernelGGL(( LinearHeightField_calculateHeight_Kernel), dim3(numBlocksX), dim3(numThreadsX), 0, 0, (double3*) m_pos, a, b, nbPos); } /****************************************************************************************************************************/ /****************************************************************************************************************************/ void LinearHeightField_calculateHeight_Normales_CUDA(double* m_pos, double* nx0, double* nx1, double* nz0, double* nz1, double a, double b, uint nbPos) { int numThreadsX, numBlocksX; computeGridSize(nbPos,numBlocksX, numThreadsX); hipLaunchKernelGGL(( LinearHeightField_calculateHeight_Normales_Kernel), dim3(numBlocksX), dim3(numThreadsX), 0, 0, (double3*) m_pos, (double3*) nx0, (double3*) nx1, (double3*) nz0, (double3*) nz1, a, b, nbPos); } /****************************************************************************************************************************/ /****************************************************************************************************************************/ } /****************************************************************************************************************************/ /****************************************************************************************************************************/
bab46a331a0dc9bb2df8fbd73af1d212476eb03b.cu
#include <LinearHeightField.cuh> #include <LinearHeightField_Kernel.cuh> /****************************************************************************************************************************/ /****************************************************************************************************************************/ extern "C" { /****************************************************************************************************************************/ /****************************************************************************************************************************/ void LinearHeightField_calculateHeight_CUDA(double* m_pos, double a, double b, uint nbPos) { int numThreadsX, numBlocksX; computeGridSize(nbPos,numBlocksX, numThreadsX); LinearHeightField_calculateHeight_Kernel<<<numBlocksX, numThreadsX>>>((double3*) m_pos, a, b, nbPos); } /****************************************************************************************************************************/ /****************************************************************************************************************************/ void LinearHeightField_calculateHeight_Normales_CUDA(double* m_pos, double* nx0, double* nx1, double* nz0, double* nz1, double a, double b, uint nbPos) { int numThreadsX, numBlocksX; computeGridSize(nbPos,numBlocksX, numThreadsX); LinearHeightField_calculateHeight_Normales_Kernel<<<numBlocksX, numThreadsX>>>((double3*) m_pos, (double3*) nx0, (double3*) nx1, (double3*) nz0, (double3*) nz1, a, b, nbPos); } /****************************************************************************************************************************/ /****************************************************************************************************************************/ } /****************************************************************************************************************************/ /****************************************************************************************************************************/
0d00205a141aee2051c53ff7236d198000887f36.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <limits> #include <cudf/column/column_factories.hpp> #include <cudf/detail/sequence.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include "row_conversion.hpp" namespace cudf { namespace java { /** * Copy a simple vector to device memory asynchronously. Be sure to read * the data on the same stream as is used to copy it. */ template <typename T> std::unique_ptr<rmm::device_uvector<T>> copy_to_dev_async(const std::vector<T> &input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { std::unique_ptr<rmm::device_uvector<T>> ret(new rmm::device_uvector<T>(input.size(), stream, mr)); CUDA_TRY(hipMemcpyAsync(ret->data(), input.data(), sizeof(T) * input.size(), hipMemcpyHostToDevice, stream.value())); return ret; } __global__ void copy_to_fixed_width_columns(const cudf::size_type num_rows, const cudf::size_type num_columns, const cudf::size_type row_size, const cudf::size_type *input_offset_in_row, const cudf::size_type *num_bytes, int8_t **output_data, cudf::bitmask_type **output_nm, const int8_t *input_data) { // We are going to copy the data in two passes. // The first pass copies a chunk of data into shared memory. // The second pass copies that chunk from shared memory out to the final location. // Because shared memory is limited we copy a subset of the rows at a time. // For simplicity we will refer to this as a row_group // In practice we have found writing more than 4 columns of data per thread // results in performance loss. As such we are using a 2 dimensional // kernel in terms of threads, but not in terms of blocks. Columns are // controlled by the y dimension (there is no y dimension in blocks). Rows // are controlled by the x dimension (there are multiple blocks in the x // dimension). cudf::size_type rows_per_group = blockDim.x; cudf::size_type row_group_start = blockIdx.x; cudf::size_type row_group_stride = gridDim.x; cudf::size_type row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1; extern __shared__ int8_t shared_data[]; // Because we are copying fixed width only data and we stride the rows // this thread will always start copying from shared data in the same place int8_t *row_tmp = &shared_data[row_size * threadIdx.x]; int8_t *row_vld_tmp = &row_tmp[input_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]]; for (cudf::size_type row_group_index = row_group_start; row_group_index < row_group_end; row_group_index += row_group_stride) { // Step 1: Copy the data into shared memory // We know row_size is always aligned with and a multiple of int64_t; int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data); const int64_t *long_input = reinterpret_cast<int64_t const *>(input_data); cudf::size_type shared_output_index = threadIdx.x + (threadIdx.y * blockDim.x); cudf::size_type shared_output_stride = blockDim.x * blockDim.y; cudf::size_type row_index_end = ((row_group_index + 1) * rows_per_group); if (row_index_end > num_rows) { row_index_end = num_rows; } cudf::size_type num_rows_in_group = row_index_end - (row_group_index * rows_per_group); cudf::size_type shared_length = row_size * num_rows_in_group; cudf::size_type shared_output_end = shared_length / sizeof(int64_t); cudf::size_type start_input_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t); for (cudf::size_type shared_index = shared_output_index; shared_index < shared_output_end; shared_index += shared_output_stride) { long_shared[shared_index] = long_input[start_input_index + shared_index]; } // Wait for all of the data to be in shared memory __syncthreads(); // Step 2 copy the data back out // Within the row group there should be 1 thread for each row. This is a // requirement for launching the kernel cudf::size_type row_index = (row_group_index * rows_per_group) + threadIdx.x; // But we might not use all of the threads if the number of rows does not go // evenly into the thread count. We don't want those threads to exit yet // because we may need them to copy data in for the next row group. uint32_t active_mask = __ballot_sync(0xffffffff, row_index < num_rows); if (row_index < num_rows) { cudf::size_type col_index_start = threadIdx.y; cudf::size_type col_index_stride = blockDim.y; for (cudf::size_type col_index = col_index_start; col_index < num_columns; col_index += col_index_stride) { cudf::size_type col_size = num_bytes[col_index]; const int8_t *col_tmp = &(row_tmp[input_offset_in_row[col_index]]); int8_t *col_output = output_data[col_index]; switch (col_size) { case 1: { col_output[row_index] = *col_tmp; break; } case 2: { int16_t *short_col_output = reinterpret_cast<int16_t *>(col_output); short_col_output[row_index] = *reinterpret_cast<const int16_t *>(col_tmp); break; } case 4: { int32_t *int_col_output = reinterpret_cast<int32_t *>(col_output); int_col_output[row_index] = *reinterpret_cast<const int32_t *>(col_tmp); break; } case 8: { int64_t *long_col_output = reinterpret_cast<int64_t *>(col_output); long_col_output[row_index] = *reinterpret_cast<const int64_t *>(col_tmp); break; } default: { cudf::size_type output_offset = col_size * row_index; // TODO this should just not be supported for fixed width columns, but just in case... for (cudf::size_type b = 0; b < col_size; b++) { col_output[b + output_offset] = col_tmp[b]; } break; } } cudf::bitmask_type *nm = output_nm[col_index]; int8_t *valid_byte = &row_vld_tmp[col_index / 8]; cudf::size_type byte_bit_offset = col_index % 8; int predicate = *valid_byte & (1 << byte_bit_offset); uint32_t bitmask = __ballot_sync(active_mask, predicate); if (row_index % 32 == 0) { nm[word_index(row_index)] = bitmask; } } // end column loop } // end row copy // wait for the row_group to be totally copied before starting on the next row group __syncthreads(); } } __global__ void copy_from_fixed_width_columns(const cudf::size_type start_row, const cudf::size_type num_rows, const cudf::size_type num_columns, const cudf::size_type row_size, const cudf::size_type *output_offset_in_row, const cudf::size_type *num_bytes, const int8_t **input_data, const cudf::bitmask_type **input_nm, int8_t *output_data) { // We are going to copy the data in two passes. // The first pass copies a chunk of data into shared memory. // The second pass copies that chunk from shared memory out to the final location. // Because shared memory is limited we copy a subset of the rows at a time. // We do not support copying a subset of the columns in a row yet, so we don't // currently support a row that is wider than shared memory. // For simplicity we will refer to this as a row_group // In practice we have found reading more than 4 columns of data per thread // results in performance loss. As such we are using a 2 dimensional // kernel in terms of threads, but not in terms of blocks. Columns are // controlled by the y dimension (there is no y dimension in blocks). Rows // are controlled by the x dimension (there are multiple blocks in the x // dimension). cudf::size_type rows_per_group = blockDim.x; cudf::size_type row_group_start = blockIdx.x; cudf::size_type row_group_stride = gridDim.x; cudf::size_type row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1; extern __shared__ int8_t shared_data[]; // Because we are copying fixed width only data and we stride the rows // this thread will always start copying to shared data in the same place int8_t *row_tmp = &shared_data[row_size * threadIdx.x]; int8_t *row_vld_tmp = &row_tmp[output_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]]; for (cudf::size_type row_group_index = row_group_start; row_group_index < row_group_end; row_group_index += row_group_stride) { // Within the row group there should be 1 thread for each row. This is a // requirement for launching the kernel cudf::size_type row_index = start_row + (row_group_index * rows_per_group) + threadIdx.x; // But we might not use all of the threads if the number of rows does not go // evenly into the thread count. We don't want those threads to exit yet // because we may need them to copy data back out. if (row_index < (start_row + num_rows)) { cudf::size_type col_index_start = threadIdx.y; cudf::size_type col_index_stride = blockDim.y; for (cudf::size_type col_index = col_index_start; col_index < num_columns; col_index += col_index_stride) { cudf::size_type col_size = num_bytes[col_index]; int8_t *col_tmp = &(row_tmp[output_offset_in_row[col_index]]); const int8_t *col_input = input_data[col_index]; switch (col_size) { case 1: { *col_tmp = col_input[row_index]; break; } case 2: { const int16_t *short_col_input = reinterpret_cast<const int16_t *>(col_input); *reinterpret_cast<int16_t *>(col_tmp) = short_col_input[row_index]; break; } case 4: { const int32_t *int_col_input = reinterpret_cast<const int32_t *>(col_input); *reinterpret_cast<int32_t *>(col_tmp) = int_col_input[row_index]; break; } case 8: { const int64_t *long_col_input = reinterpret_cast<const int64_t *>(col_input); *reinterpret_cast<int64_t *>(col_tmp) = long_col_input[row_index]; break; } default: { cudf::size_type input_offset = col_size * row_index; // TODO this should just not be supported for fixed width columns, but just in case... for (cudf::size_type b = 0; b < col_size; b++) { col_tmp[b] = col_input[b + input_offset]; } break; } } // atomicOr only works on 32 bit or 64 bit aligned values, and not byte aligned // so we have to rewrite the addresses to make sure that it is 4 byte aligned int8_t *valid_byte = &row_vld_tmp[col_index / 8]; cudf::size_type byte_bit_offset = col_index % 8; uint64_t fixup_bytes = reinterpret_cast<uint64_t>(valid_byte) % 4; int32_t *valid_int = reinterpret_cast<int32_t *>(valid_byte - fixup_bytes); cudf::size_type int_bit_offset = byte_bit_offset + (fixup_bytes * 8); // Now copy validity for the column if (input_nm[col_index]) { if (bit_is_set(input_nm[col_index], row_index)) { atomicOr_block(valid_int, 1 << int_bit_offset); } else { atomicAnd_block(valid_int, ~(1 << int_bit_offset)); } } else { // It is valid so just set the bit atomicOr_block(valid_int, 1 << int_bit_offset); } } // end column loop } // end row copy // wait for the row_group to be totally copied into shared memory __syncthreads(); // Step 2: Copy the data back out // We know row_size is always aligned with and a multiple of int64_t; int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data); int64_t *long_output = reinterpret_cast<int64_t *>(output_data); cudf::size_type shared_input_index = threadIdx.x + (threadIdx.y * blockDim.x); cudf::size_type shared_input_stride = blockDim.x * blockDim.y; cudf::size_type row_index_end = ((row_group_index + 1) * rows_per_group); if (row_index_end > num_rows) { row_index_end = num_rows; } cudf::size_type num_rows_in_group = row_index_end - (row_group_index * rows_per_group); cudf::size_type shared_length = row_size * num_rows_in_group; cudf::size_type shared_input_end = shared_length / sizeof(int64_t); cudf::size_type start_output_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t); for (cudf::size_type shared_index = shared_input_index; shared_index < shared_input_end; shared_index += shared_input_stride) { long_output[start_output_index + shared_index] = long_shared[shared_index]; } __syncthreads(); // Go for the next round } } /** * Calculate the dimensions of the kernel for fixed width only columns. * @param [in] num_columns the number of columns being copied. * @param [in] num_rows the number of rows being copied. * @param [in] size_per_row the size each row takes up when padded. * @param [out] blocks the size of the blocks for the kernel * @param [out] threads the size of the threads for the kernel * @return the size in bytes of shared memory needed for each block. */ static int calc_fixed_width_kernel_dims(const cudf::size_type num_columns, const cudf::size_type num_rows, const cudf::size_type size_per_row, dim3 &blocks, dim3 &threads) { // We have found speed degrades when a thread handles more than 4 columns. // Each block is 2 dimensional. The y dimension indicates the columns. // We limit this to 32 threads in the y dimension so we can still // have at least 32 threads in the x dimension (1 warp) which should // result in better coalescing of memory operations. We also // want to guarantee that we are processing a multiple of 32 threads // in the x dimension because we use atomic operations at the block // level when writing validity data out to main memory, and that would // need to change if we split a word of validity data between blocks. int y_block_size = (num_columns + 3) / 4; if (y_block_size > 32) { y_block_size = 32; } int x_possible_block_size = 1024 / y_block_size; // 48KB is the default setting for shared memory per block according to the cuda tutorials // If someone configures the GPU to only have 16 KB this might not work. int max_shared_size = 48 * 1024; int max_block_size = max_shared_size / size_per_row; // If we don't have enough shared memory there is no point in having more threads // per block that will just sit idle max_block_size = max_block_size > x_possible_block_size ? x_possible_block_size : max_block_size; // Make sure that the x dimension is a multiple of 32 this not only helps // coalesce memory access it also lets us do a ballot sync for validity to write // the data back out the warp level. If x is a multiple of 32 then each thread in the y // dimension is associated with one or more warps, that should correspond to the validity // words directly. int block_size = (max_block_size / 32) * 32; CUDF_EXPECTS(block_size != 0, "Row size is too large to fit in shared memory"); int num_blocks = (num_rows + block_size - 1) / block_size; if (num_blocks < 1) { num_blocks = 1; } else if (num_blocks > 10240) { // The maximum number of blocks supported in the x dimension is 2 ^ 31 - 1 // but in practice haveing too many can cause some overhead that I don't totally // understand. Playing around with this haveing as little as 600 blocks appears // to be able to saturate memory on V100, so this is an order of magnitude higher // to try and future proof this a bit. num_blocks = 10240; } blocks.x = num_blocks; blocks.y = 1; blocks.z = 1; threads.x = block_size; threads.y = y_block_size; threads.z = 1; return size_per_row * block_size; } /** * When converting to rows it is possible that the size of the table was too big to fit * in a single column. This creates an output column for a subset of the rows in a table * going from start row and containing the next num_rows. Most of the parameters passed * into this function are common between runs and should be calculated once. */ static std::unique_ptr<cudf::column> fixed_width_convert_to_rows( const cudf::size_type start_row, const cudf::size_type num_rows, const cudf::size_type num_columns, const cudf::size_type size_per_row, std::unique_ptr<rmm::device_uvector<cudf::size_type>> &column_start, std::unique_ptr<rmm::device_uvector<cudf::size_type>> &column_size, std::unique_ptr<rmm::device_uvector<const int8_t *>> &input_data, std::unique_ptr<rmm::device_uvector<const cudf::bitmask_type *>> &input_nm, const cudf::scalar &zero, const cudf::scalar &scalar_size_per_row, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { int64_t total_allocation = size_per_row * num_rows; // We made a mistake in the split somehow CUDF_EXPECTS(total_allocation < std::numeric_limits<int>::max(), "Table is too large to fit!"); // Allocate and set the offsets row for the byte array std::unique_ptr<cudf::column> offsets = cudf::detail::sequence(num_rows + 1, zero, scalar_size_per_row, stream); std::unique_ptr<cudf::column> data = cudf::make_numeric_column( cudf::data_type(cudf::type_id::INT8), static_cast<cudf::size_type>(total_allocation), cudf::mask_state::UNALLOCATED, stream, mr); dim3 blocks; dim3 threads; int shared_size = calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads); hipLaunchKernelGGL(( copy_from_fixed_width_columns), dim3(blocks), dim3(threads), shared_size, stream.value(), start_row, num_rows, num_columns, size_per_row, column_start->data(), column_size->data(), input_data->data(), input_nm->data(), data->mutable_view().data<int8_t>()); return cudf::make_lists_column(num_rows, std::move(offsets), std::move(data), 0, rmm::device_buffer{0, rmm::cuda_stream_default, mr}, stream, mr); } static cudf::data_type get_data_type(const cudf::column_view &v) { return v.type(); } static bool is_fixed_width(const cudf::data_type &t) { return cudf::is_fixed_width(t); } static inline int32_t align_offset(int32_t offset, std::size_t alignment) { return (offset + alignment - 1) & ~(alignment - 1); } static inline bool are_all_fixed_width(std::vector<cudf::data_type> const &schema) { return std::all_of(schema.begin(), schema.end(), cudf::java::is_fixed_width); } /** * Given a set of fixed width columns, calculate how the data will be laid out in memory. * @param [in] schema the types of columns that need to be laid out. * @param [out] column_start the byte offset where each column starts in the row. * @param [out] column_size the size in bytes of the data for each columns in the row. * @return the size in bytes each row needs. */ static inline int32_t compute_fixed_width_layout(std::vector<cudf::data_type> const &schema, std::vector<cudf::size_type> &column_start, std::vector<cudf::size_type> &column_size) { // We guarantee that the start of each column is 64-bit aligned so anything can go // there, but to make the code simple we will still do an alignment for it. int32_t at_offset = 0; for (auto col = schema.begin(); col < schema.end(); col++) { cudf::size_type s = cudf::size_of(*col); column_size.emplace_back(s); std::size_t allocation_needed = s; std::size_t alignment_needed = allocation_needed; // They are the same for fixed width types at_offset = align_offset(at_offset, alignment_needed); column_start.emplace_back(at_offset); at_offset += allocation_needed; } // Now we need to add in space for validity // Eventually we can think about nullable vs not nullable, but for now we will just always add it // in int32_t validity_bytes_needed = (schema.size() + 7) / 8; // validity comes at the end and is byte aligned so we can pack more in. at_offset += validity_bytes_needed; // Now we need to pad the end so all rows are 64 bit aligned return align_offset(at_offset, 8); // 8 bytes (64 bits) } std::vector<std::unique_ptr<cudf::column>> convert_to_rows(cudf::table_view const &tbl, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { const cudf::size_type num_columns = tbl.num_columns(); std::vector<cudf::data_type> schema; schema.resize(num_columns); std::transform(tbl.begin(), tbl.end(), schema.begin(), cudf::java::get_data_type); if (are_all_fixed_width(schema)) { std::vector<cudf::size_type> column_start; std::vector<cudf::size_type> column_size; int32_t size_per_row = compute_fixed_width_layout(schema, column_start, column_size); auto dev_column_start = copy_to_dev_async(column_start, stream, mr); auto dev_column_size = copy_to_dev_async(column_size, stream, mr); int32_t max_rows_per_batch = std::numeric_limits<int>::max() / size_per_row; // Make the number of rows per batch a multiple of 32 so we don't have to worry about // splitting validity at a specific row offset. This might change in the future. max_rows_per_batch = (max_rows_per_batch / 32) * 32; cudf::size_type num_rows = tbl.num_rows(); // Get the pointers to the input columnar data ready std::vector<const int8_t *> input_data; std::vector<cudf::bitmask_type const *> input_nm; for (cudf::size_type column_number = 0; column_number < num_columns; column_number++) { cudf::column_view cv = tbl.column(column_number); input_data.emplace_back(cv.data<int8_t>()); input_nm.emplace_back(cv.null_mask()); } auto dev_input_data = copy_to_dev_async(input_data, stream, mr); auto dev_input_nm = copy_to_dev_async(input_nm, stream, mr); using ScalarType = cudf::scalar_type_t<cudf::size_type>; auto zero = cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::INT32), stream.value()); zero->set_valid(true, stream); static_cast<ScalarType *>(zero.get())->set_value(0, stream); auto step = cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::INT32), stream.value()); step->set_valid(true, stream); static_cast<ScalarType *>(step.get()) ->set_value(static_cast<cudf::size_type>(size_per_row), stream); std::vector<std::unique_ptr<cudf::column>> ret; for (cudf::size_type row_start = 0; row_start < num_rows; row_start += max_rows_per_batch) { cudf::size_type row_count = num_rows - row_start; row_count = row_count > max_rows_per_batch ? max_rows_per_batch : row_count; ret.emplace_back(fixed_width_convert_to_rows( row_start, row_count, num_columns, size_per_row, dev_column_start, dev_column_size, dev_input_data, dev_input_nm, *zero, *step, stream, mr)); } return ret; } else { CUDF_FAIL("Only fixed width types are currently supported"); } } std::unique_ptr<cudf::table> convert_from_rows(cudf::lists_column_view const &input, std::vector<cudf::data_type> const &schema, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // verify that the types are what we expect cudf::column_view child = input.child(); cudf::type_id list_type = child.type().id(); CUDF_EXPECTS(list_type == cudf::type_id::INT8 || list_type == cudf::type_id::UINT8, "Only a list of bytes is supported as input"); cudf::size_type num_columns = schema.size(); if (are_all_fixed_width(schema)) { std::vector<cudf::size_type> column_start; std::vector<cudf::size_type> column_size; cudf::size_type num_rows = input.parent().size(); int32_t size_per_row = compute_fixed_width_layout(schema, column_start, column_size); // Ideally we would check that the offsets are all the same, etc. but for now // this is probably fine CUDF_EXPECTS(size_per_row * num_rows == child.size(), "The layout of the data appears to be off"); auto dev_column_start = copy_to_dev_async(column_start, stream, mr); auto dev_column_size = copy_to_dev_async(column_size, stream, mr); // Allocate the columns we are going to write into std::vector<std::unique_ptr<cudf::column>> output_columns; std::vector<int8_t *> output_data; std::vector<cudf::bitmask_type *> output_nm; for (cudf::size_type i = 0; i < num_columns; i++) { auto column = cudf::make_fixed_width_column(schema[i], num_rows, cudf::mask_state::UNINITIALIZED, stream, mr); auto mut = column->mutable_view(); output_data.emplace_back(mut.data<int8_t>()); output_nm.emplace_back(mut.null_mask()); output_columns.emplace_back(std::move(column)); } auto dev_output_data = copy_to_dev_async(output_data, stream, mr); auto dev_output_nm = copy_to_dev_async(output_nm, stream, mr); dim3 blocks; dim3 threads; int shared_size = calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads); hipLaunchKernelGGL(( copy_to_fixed_width_columns), dim3(blocks), dim3(threads), shared_size, stream.value(), num_rows, num_columns, size_per_row, dev_column_start->data(), dev_column_size->data(), dev_output_data->data(), dev_output_nm->data(), child.data<int8_t>()); return std::make_unique<cudf::table>(std::move(output_columns)); } else { CUDF_FAIL("Only fixed width types are currently supported"); } } } // namespace java } // namespace cudf
0d00205a141aee2051c53ff7236d198000887f36.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <limits> #include <cudf/column/column_factories.hpp> #include <cudf/detail/sequence.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include "row_conversion.hpp" namespace cudf { namespace java { /** * Copy a simple vector to device memory asynchronously. Be sure to read * the data on the same stream as is used to copy it. */ template <typename T> std::unique_ptr<rmm::device_uvector<T>> copy_to_dev_async(const std::vector<T> &input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { std::unique_ptr<rmm::device_uvector<T>> ret(new rmm::device_uvector<T>(input.size(), stream, mr)); CUDA_TRY(cudaMemcpyAsync(ret->data(), input.data(), sizeof(T) * input.size(), cudaMemcpyHostToDevice, stream.value())); return ret; } __global__ void copy_to_fixed_width_columns(const cudf::size_type num_rows, const cudf::size_type num_columns, const cudf::size_type row_size, const cudf::size_type *input_offset_in_row, const cudf::size_type *num_bytes, int8_t **output_data, cudf::bitmask_type **output_nm, const int8_t *input_data) { // We are going to copy the data in two passes. // The first pass copies a chunk of data into shared memory. // The second pass copies that chunk from shared memory out to the final location. // Because shared memory is limited we copy a subset of the rows at a time. // For simplicity we will refer to this as a row_group // In practice we have found writing more than 4 columns of data per thread // results in performance loss. As such we are using a 2 dimensional // kernel in terms of threads, but not in terms of blocks. Columns are // controlled by the y dimension (there is no y dimension in blocks). Rows // are controlled by the x dimension (there are multiple blocks in the x // dimension). cudf::size_type rows_per_group = blockDim.x; cudf::size_type row_group_start = blockIdx.x; cudf::size_type row_group_stride = gridDim.x; cudf::size_type row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1; extern __shared__ int8_t shared_data[]; // Because we are copying fixed width only data and we stride the rows // this thread will always start copying from shared data in the same place int8_t *row_tmp = &shared_data[row_size * threadIdx.x]; int8_t *row_vld_tmp = &row_tmp[input_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]]; for (cudf::size_type row_group_index = row_group_start; row_group_index < row_group_end; row_group_index += row_group_stride) { // Step 1: Copy the data into shared memory // We know row_size is always aligned with and a multiple of int64_t; int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data); const int64_t *long_input = reinterpret_cast<int64_t const *>(input_data); cudf::size_type shared_output_index = threadIdx.x + (threadIdx.y * blockDim.x); cudf::size_type shared_output_stride = blockDim.x * blockDim.y; cudf::size_type row_index_end = ((row_group_index + 1) * rows_per_group); if (row_index_end > num_rows) { row_index_end = num_rows; } cudf::size_type num_rows_in_group = row_index_end - (row_group_index * rows_per_group); cudf::size_type shared_length = row_size * num_rows_in_group; cudf::size_type shared_output_end = shared_length / sizeof(int64_t); cudf::size_type start_input_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t); for (cudf::size_type shared_index = shared_output_index; shared_index < shared_output_end; shared_index += shared_output_stride) { long_shared[shared_index] = long_input[start_input_index + shared_index]; } // Wait for all of the data to be in shared memory __syncthreads(); // Step 2 copy the data back out // Within the row group there should be 1 thread for each row. This is a // requirement for launching the kernel cudf::size_type row_index = (row_group_index * rows_per_group) + threadIdx.x; // But we might not use all of the threads if the number of rows does not go // evenly into the thread count. We don't want those threads to exit yet // because we may need them to copy data in for the next row group. uint32_t active_mask = __ballot_sync(0xffffffff, row_index < num_rows); if (row_index < num_rows) { cudf::size_type col_index_start = threadIdx.y; cudf::size_type col_index_stride = blockDim.y; for (cudf::size_type col_index = col_index_start; col_index < num_columns; col_index += col_index_stride) { cudf::size_type col_size = num_bytes[col_index]; const int8_t *col_tmp = &(row_tmp[input_offset_in_row[col_index]]); int8_t *col_output = output_data[col_index]; switch (col_size) { case 1: { col_output[row_index] = *col_tmp; break; } case 2: { int16_t *short_col_output = reinterpret_cast<int16_t *>(col_output); short_col_output[row_index] = *reinterpret_cast<const int16_t *>(col_tmp); break; } case 4: { int32_t *int_col_output = reinterpret_cast<int32_t *>(col_output); int_col_output[row_index] = *reinterpret_cast<const int32_t *>(col_tmp); break; } case 8: { int64_t *long_col_output = reinterpret_cast<int64_t *>(col_output); long_col_output[row_index] = *reinterpret_cast<const int64_t *>(col_tmp); break; } default: { cudf::size_type output_offset = col_size * row_index; // TODO this should just not be supported for fixed width columns, but just in case... for (cudf::size_type b = 0; b < col_size; b++) { col_output[b + output_offset] = col_tmp[b]; } break; } } cudf::bitmask_type *nm = output_nm[col_index]; int8_t *valid_byte = &row_vld_tmp[col_index / 8]; cudf::size_type byte_bit_offset = col_index % 8; int predicate = *valid_byte & (1 << byte_bit_offset); uint32_t bitmask = __ballot_sync(active_mask, predicate); if (row_index % 32 == 0) { nm[word_index(row_index)] = bitmask; } } // end column loop } // end row copy // wait for the row_group to be totally copied before starting on the next row group __syncthreads(); } } __global__ void copy_from_fixed_width_columns(const cudf::size_type start_row, const cudf::size_type num_rows, const cudf::size_type num_columns, const cudf::size_type row_size, const cudf::size_type *output_offset_in_row, const cudf::size_type *num_bytes, const int8_t **input_data, const cudf::bitmask_type **input_nm, int8_t *output_data) { // We are going to copy the data in two passes. // The first pass copies a chunk of data into shared memory. // The second pass copies that chunk from shared memory out to the final location. // Because shared memory is limited we copy a subset of the rows at a time. // We do not support copying a subset of the columns in a row yet, so we don't // currently support a row that is wider than shared memory. // For simplicity we will refer to this as a row_group // In practice we have found reading more than 4 columns of data per thread // results in performance loss. As such we are using a 2 dimensional // kernel in terms of threads, but not in terms of blocks. Columns are // controlled by the y dimension (there is no y dimension in blocks). Rows // are controlled by the x dimension (there are multiple blocks in the x // dimension). cudf::size_type rows_per_group = blockDim.x; cudf::size_type row_group_start = blockIdx.x; cudf::size_type row_group_stride = gridDim.x; cudf::size_type row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1; extern __shared__ int8_t shared_data[]; // Because we are copying fixed width only data and we stride the rows // this thread will always start copying to shared data in the same place int8_t *row_tmp = &shared_data[row_size * threadIdx.x]; int8_t *row_vld_tmp = &row_tmp[output_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]]; for (cudf::size_type row_group_index = row_group_start; row_group_index < row_group_end; row_group_index += row_group_stride) { // Within the row group there should be 1 thread for each row. This is a // requirement for launching the kernel cudf::size_type row_index = start_row + (row_group_index * rows_per_group) + threadIdx.x; // But we might not use all of the threads if the number of rows does not go // evenly into the thread count. We don't want those threads to exit yet // because we may need them to copy data back out. if (row_index < (start_row + num_rows)) { cudf::size_type col_index_start = threadIdx.y; cudf::size_type col_index_stride = blockDim.y; for (cudf::size_type col_index = col_index_start; col_index < num_columns; col_index += col_index_stride) { cudf::size_type col_size = num_bytes[col_index]; int8_t *col_tmp = &(row_tmp[output_offset_in_row[col_index]]); const int8_t *col_input = input_data[col_index]; switch (col_size) { case 1: { *col_tmp = col_input[row_index]; break; } case 2: { const int16_t *short_col_input = reinterpret_cast<const int16_t *>(col_input); *reinterpret_cast<int16_t *>(col_tmp) = short_col_input[row_index]; break; } case 4: { const int32_t *int_col_input = reinterpret_cast<const int32_t *>(col_input); *reinterpret_cast<int32_t *>(col_tmp) = int_col_input[row_index]; break; } case 8: { const int64_t *long_col_input = reinterpret_cast<const int64_t *>(col_input); *reinterpret_cast<int64_t *>(col_tmp) = long_col_input[row_index]; break; } default: { cudf::size_type input_offset = col_size * row_index; // TODO this should just not be supported for fixed width columns, but just in case... for (cudf::size_type b = 0; b < col_size; b++) { col_tmp[b] = col_input[b + input_offset]; } break; } } // atomicOr only works on 32 bit or 64 bit aligned values, and not byte aligned // so we have to rewrite the addresses to make sure that it is 4 byte aligned int8_t *valid_byte = &row_vld_tmp[col_index / 8]; cudf::size_type byte_bit_offset = col_index % 8; uint64_t fixup_bytes = reinterpret_cast<uint64_t>(valid_byte) % 4; int32_t *valid_int = reinterpret_cast<int32_t *>(valid_byte - fixup_bytes); cudf::size_type int_bit_offset = byte_bit_offset + (fixup_bytes * 8); // Now copy validity for the column if (input_nm[col_index]) { if (bit_is_set(input_nm[col_index], row_index)) { atomicOr_block(valid_int, 1 << int_bit_offset); } else { atomicAnd_block(valid_int, ~(1 << int_bit_offset)); } } else { // It is valid so just set the bit atomicOr_block(valid_int, 1 << int_bit_offset); } } // end column loop } // end row copy // wait for the row_group to be totally copied into shared memory __syncthreads(); // Step 2: Copy the data back out // We know row_size is always aligned with and a multiple of int64_t; int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data); int64_t *long_output = reinterpret_cast<int64_t *>(output_data); cudf::size_type shared_input_index = threadIdx.x + (threadIdx.y * blockDim.x); cudf::size_type shared_input_stride = blockDim.x * blockDim.y; cudf::size_type row_index_end = ((row_group_index + 1) * rows_per_group); if (row_index_end > num_rows) { row_index_end = num_rows; } cudf::size_type num_rows_in_group = row_index_end - (row_group_index * rows_per_group); cudf::size_type shared_length = row_size * num_rows_in_group; cudf::size_type shared_input_end = shared_length / sizeof(int64_t); cudf::size_type start_output_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t); for (cudf::size_type shared_index = shared_input_index; shared_index < shared_input_end; shared_index += shared_input_stride) { long_output[start_output_index + shared_index] = long_shared[shared_index]; } __syncthreads(); // Go for the next round } } /** * Calculate the dimensions of the kernel for fixed width only columns. * @param [in] num_columns the number of columns being copied. * @param [in] num_rows the number of rows being copied. * @param [in] size_per_row the size each row takes up when padded. * @param [out] blocks the size of the blocks for the kernel * @param [out] threads the size of the threads for the kernel * @return the size in bytes of shared memory needed for each block. */ static int calc_fixed_width_kernel_dims(const cudf::size_type num_columns, const cudf::size_type num_rows, const cudf::size_type size_per_row, dim3 &blocks, dim3 &threads) { // We have found speed degrades when a thread handles more than 4 columns. // Each block is 2 dimensional. The y dimension indicates the columns. // We limit this to 32 threads in the y dimension so we can still // have at least 32 threads in the x dimension (1 warp) which should // result in better coalescing of memory operations. We also // want to guarantee that we are processing a multiple of 32 threads // in the x dimension because we use atomic operations at the block // level when writing validity data out to main memory, and that would // need to change if we split a word of validity data between blocks. int y_block_size = (num_columns + 3) / 4; if (y_block_size > 32) { y_block_size = 32; } int x_possible_block_size = 1024 / y_block_size; // 48KB is the default setting for shared memory per block according to the cuda tutorials // If someone configures the GPU to only have 16 KB this might not work. int max_shared_size = 48 * 1024; int max_block_size = max_shared_size / size_per_row; // If we don't have enough shared memory there is no point in having more threads // per block that will just sit idle max_block_size = max_block_size > x_possible_block_size ? x_possible_block_size : max_block_size; // Make sure that the x dimension is a multiple of 32 this not only helps // coalesce memory access it also lets us do a ballot sync for validity to write // the data back out the warp level. If x is a multiple of 32 then each thread in the y // dimension is associated with one or more warps, that should correspond to the validity // words directly. int block_size = (max_block_size / 32) * 32; CUDF_EXPECTS(block_size != 0, "Row size is too large to fit in shared memory"); int num_blocks = (num_rows + block_size - 1) / block_size; if (num_blocks < 1) { num_blocks = 1; } else if (num_blocks > 10240) { // The maximum number of blocks supported in the x dimension is 2 ^ 31 - 1 // but in practice haveing too many can cause some overhead that I don't totally // understand. Playing around with this haveing as little as 600 blocks appears // to be able to saturate memory on V100, so this is an order of magnitude higher // to try and future proof this a bit. num_blocks = 10240; } blocks.x = num_blocks; blocks.y = 1; blocks.z = 1; threads.x = block_size; threads.y = y_block_size; threads.z = 1; return size_per_row * block_size; } /** * When converting to rows it is possible that the size of the table was too big to fit * in a single column. This creates an output column for a subset of the rows in a table * going from start row and containing the next num_rows. Most of the parameters passed * into this function are common between runs and should be calculated once. */ static std::unique_ptr<cudf::column> fixed_width_convert_to_rows( const cudf::size_type start_row, const cudf::size_type num_rows, const cudf::size_type num_columns, const cudf::size_type size_per_row, std::unique_ptr<rmm::device_uvector<cudf::size_type>> &column_start, std::unique_ptr<rmm::device_uvector<cudf::size_type>> &column_size, std::unique_ptr<rmm::device_uvector<const int8_t *>> &input_data, std::unique_ptr<rmm::device_uvector<const cudf::bitmask_type *>> &input_nm, const cudf::scalar &zero, const cudf::scalar &scalar_size_per_row, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { int64_t total_allocation = size_per_row * num_rows; // We made a mistake in the split somehow CUDF_EXPECTS(total_allocation < std::numeric_limits<int>::max(), "Table is too large to fit!"); // Allocate and set the offsets row for the byte array std::unique_ptr<cudf::column> offsets = cudf::detail::sequence(num_rows + 1, zero, scalar_size_per_row, stream); std::unique_ptr<cudf::column> data = cudf::make_numeric_column( cudf::data_type(cudf::type_id::INT8), static_cast<cudf::size_type>(total_allocation), cudf::mask_state::UNALLOCATED, stream, mr); dim3 blocks; dim3 threads; int shared_size = calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads); copy_from_fixed_width_columns<<<blocks, threads, shared_size, stream.value()>>>( start_row, num_rows, num_columns, size_per_row, column_start->data(), column_size->data(), input_data->data(), input_nm->data(), data->mutable_view().data<int8_t>()); return cudf::make_lists_column(num_rows, std::move(offsets), std::move(data), 0, rmm::device_buffer{0, rmm::cuda_stream_default, mr}, stream, mr); } static cudf::data_type get_data_type(const cudf::column_view &v) { return v.type(); } static bool is_fixed_width(const cudf::data_type &t) { return cudf::is_fixed_width(t); } static inline int32_t align_offset(int32_t offset, std::size_t alignment) { return (offset + alignment - 1) & ~(alignment - 1); } static inline bool are_all_fixed_width(std::vector<cudf::data_type> const &schema) { return std::all_of(schema.begin(), schema.end(), cudf::java::is_fixed_width); } /** * Given a set of fixed width columns, calculate how the data will be laid out in memory. * @param [in] schema the types of columns that need to be laid out. * @param [out] column_start the byte offset where each column starts in the row. * @param [out] column_size the size in bytes of the data for each columns in the row. * @return the size in bytes each row needs. */ static inline int32_t compute_fixed_width_layout(std::vector<cudf::data_type> const &schema, std::vector<cudf::size_type> &column_start, std::vector<cudf::size_type> &column_size) { // We guarantee that the start of each column is 64-bit aligned so anything can go // there, but to make the code simple we will still do an alignment for it. int32_t at_offset = 0; for (auto col = schema.begin(); col < schema.end(); col++) { cudf::size_type s = cudf::size_of(*col); column_size.emplace_back(s); std::size_t allocation_needed = s; std::size_t alignment_needed = allocation_needed; // They are the same for fixed width types at_offset = align_offset(at_offset, alignment_needed); column_start.emplace_back(at_offset); at_offset += allocation_needed; } // Now we need to add in space for validity // Eventually we can think about nullable vs not nullable, but for now we will just always add it // in int32_t validity_bytes_needed = (schema.size() + 7) / 8; // validity comes at the end and is byte aligned so we can pack more in. at_offset += validity_bytes_needed; // Now we need to pad the end so all rows are 64 bit aligned return align_offset(at_offset, 8); // 8 bytes (64 bits) } std::vector<std::unique_ptr<cudf::column>> convert_to_rows(cudf::table_view const &tbl, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { const cudf::size_type num_columns = tbl.num_columns(); std::vector<cudf::data_type> schema; schema.resize(num_columns); std::transform(tbl.begin(), tbl.end(), schema.begin(), cudf::java::get_data_type); if (are_all_fixed_width(schema)) { std::vector<cudf::size_type> column_start; std::vector<cudf::size_type> column_size; int32_t size_per_row = compute_fixed_width_layout(schema, column_start, column_size); auto dev_column_start = copy_to_dev_async(column_start, stream, mr); auto dev_column_size = copy_to_dev_async(column_size, stream, mr); int32_t max_rows_per_batch = std::numeric_limits<int>::max() / size_per_row; // Make the number of rows per batch a multiple of 32 so we don't have to worry about // splitting validity at a specific row offset. This might change in the future. max_rows_per_batch = (max_rows_per_batch / 32) * 32; cudf::size_type num_rows = tbl.num_rows(); // Get the pointers to the input columnar data ready std::vector<const int8_t *> input_data; std::vector<cudf::bitmask_type const *> input_nm; for (cudf::size_type column_number = 0; column_number < num_columns; column_number++) { cudf::column_view cv = tbl.column(column_number); input_data.emplace_back(cv.data<int8_t>()); input_nm.emplace_back(cv.null_mask()); } auto dev_input_data = copy_to_dev_async(input_data, stream, mr); auto dev_input_nm = copy_to_dev_async(input_nm, stream, mr); using ScalarType = cudf::scalar_type_t<cudf::size_type>; auto zero = cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::INT32), stream.value()); zero->set_valid(true, stream); static_cast<ScalarType *>(zero.get())->set_value(0, stream); auto step = cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::INT32), stream.value()); step->set_valid(true, stream); static_cast<ScalarType *>(step.get()) ->set_value(static_cast<cudf::size_type>(size_per_row), stream); std::vector<std::unique_ptr<cudf::column>> ret; for (cudf::size_type row_start = 0; row_start < num_rows; row_start += max_rows_per_batch) { cudf::size_type row_count = num_rows - row_start; row_count = row_count > max_rows_per_batch ? max_rows_per_batch : row_count; ret.emplace_back(fixed_width_convert_to_rows( row_start, row_count, num_columns, size_per_row, dev_column_start, dev_column_size, dev_input_data, dev_input_nm, *zero, *step, stream, mr)); } return ret; } else { CUDF_FAIL("Only fixed width types are currently supported"); } } std::unique_ptr<cudf::table> convert_from_rows(cudf::lists_column_view const &input, std::vector<cudf::data_type> const &schema, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // verify that the types are what we expect cudf::column_view child = input.child(); cudf::type_id list_type = child.type().id(); CUDF_EXPECTS(list_type == cudf::type_id::INT8 || list_type == cudf::type_id::UINT8, "Only a list of bytes is supported as input"); cudf::size_type num_columns = schema.size(); if (are_all_fixed_width(schema)) { std::vector<cudf::size_type> column_start; std::vector<cudf::size_type> column_size; cudf::size_type num_rows = input.parent().size(); int32_t size_per_row = compute_fixed_width_layout(schema, column_start, column_size); // Ideally we would check that the offsets are all the same, etc. but for now // this is probably fine CUDF_EXPECTS(size_per_row * num_rows == child.size(), "The layout of the data appears to be off"); auto dev_column_start = copy_to_dev_async(column_start, stream, mr); auto dev_column_size = copy_to_dev_async(column_size, stream, mr); // Allocate the columns we are going to write into std::vector<std::unique_ptr<cudf::column>> output_columns; std::vector<int8_t *> output_data; std::vector<cudf::bitmask_type *> output_nm; for (cudf::size_type i = 0; i < num_columns; i++) { auto column = cudf::make_fixed_width_column(schema[i], num_rows, cudf::mask_state::UNINITIALIZED, stream, mr); auto mut = column->mutable_view(); output_data.emplace_back(mut.data<int8_t>()); output_nm.emplace_back(mut.null_mask()); output_columns.emplace_back(std::move(column)); } auto dev_output_data = copy_to_dev_async(output_data, stream, mr); auto dev_output_nm = copy_to_dev_async(output_nm, stream, mr); dim3 blocks; dim3 threads; int shared_size = calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads); copy_to_fixed_width_columns<<<blocks, threads, shared_size, stream.value()>>>( num_rows, num_columns, size_per_row, dev_column_start->data(), dev_column_size->data(), dev_output_data->data(), dev_output_nm->data(), child.data<int8_t>()); return std::make_unique<cudf::table>(std::move(output_columns)); } else { CUDF_FAIL("Only fixed width types are currently supported"); } } } // namespace java } // namespace cudf
dc47296f949674cd1cd9a486d1cc80f39da0d7c9.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdio.h> #include <math.h> #include "KernelUtils.h" #include "HostUtils.h" #include "BuildGraphKernelSequential.h" /***************************************************** * * * SEQUENTIAL ALGORITHMS * * - Execute sequential algorithms for gene network analisys * - Implementation base for time compare. * *****************************************************/ void kernelBuildGraphStatesSequentialByDinamicProgramming( int *regulationMatrix, int regMatrixSizeX, int regMatrixSizeY, long long int *countSolutions, long long int sizeCountSolutions ) { int *cache = (int*) malloc( sizeof(int) * regMatrixSizeX * regMatrixSizeY ); int *cacheSum = (int*) malloc( sizeof(int) * regMatrixSizeY ); for (int i = 0; i < (regMatrixSizeX * regMatrixSizeY); i++) { cache[i] = 0; if( i < regMatrixSizeY ) { cacheSum[i] = 0; } } // Dimanic Programming algorithm: buildGraphStates for (long long int i = 0; i < sizeCountSolutions; i++) { // ^ is XOR bit operation long long int v1 = i ^ (i >> 1); long long int graycodeBef = (i - 1) ^ ((i - 1) >> 1); long int bitChanged = (graycodeBef ^ v1); long int bitChangedIdx = (bitChanged == 0 ? 0 : log2( (float) bitChanged )); long long int v2 = 0; long long int ONE = 1; long long int col = (regMatrixSizeX - bitChangedIdx - 1); // bitQPermanent = (v1 / (long long int) powf( 2, row )) % 2; long long int bit = (v1 / (ONE << (regMatrixSizeX - col - 1))) & ONE; long long int bitBef = (graycodeBef / (ONE << (regMatrixSizeX - col - 1))) & ONE; long long int number = v1; for (int row = 0; row < regMatrixSizeX && number > 0; row++) { int idxMatrix = row * regMatrixSizeY + col; // cacheSum[row] -= cache[idxMatrix]; // // cache[idxMatrix] = regulationMatrix[idxMatrix] * bit; // // cacheSum[row] += cache[idxMatrix]; cacheSum[row] += (regulationMatrix[idxMatrix] * bit) - (regulationMatrix[idxMatrix] * bitBef); } // Normalization for (int row = 0; row < regMatrixSizeX && number > 0; row++) { long long int bitPathernV2 = 0; long long int bitQPermanent = (v1 / (ONE << (regMatrixSizeX - row - 1))) & 1; bitPathernV2 = cacheSum[row] == 0 ? bitQPermanent : cacheSum[row] < 0 ? 0 : 1; long long int one = 1; // v2 += bitPathernV2 * (long long int) powf( 2, (regMatrixSizeX - row // - 1) ); v2 += bitPathernV2 * (one << (regMatrixSizeX - row - 1)); } // Exists a vertex from v1 to v2 countSolutions[v1] = v2; } free( cache ); free( cacheSum ); } void kernelBuildGraphStatesSequential( int *regulationMatriz, int regMatrixSizeX, int regMatrixSizeY, long long int *countSolutions, long long int sizeCountSolutions ) { for (int i = 0; i < sizeCountSolutions; i++) { long long int v1 = i; long long int v2 = 0; for (int row = 0; row < regMatrixSizeY; row++) { long long int number = v1; long long int bitPathernV2 = 0; long long int bitQPermanent = 0; for (int col = regMatrixSizeX - 1; (col >= 0) && (number > 0); col--) { int idxMatrix = row * regMatrixSizeY + col; // Generate digits in reverse order // long long int bitQ = number % 2; long long int bitQ = number & 1; if( row == col ) { bitQPermanent = bitQ; } bitPathernV2 += regulationMatriz[idxMatrix] * bitQ; number /= 2; } // Normalization bitPathernV2 = bitPathernV2 == 0 ? bitQPermanent : bitPathernV2 < 0 ? 0 : 1; // Exists an arc v1 -> outV2 long long int one = 1; // v2 += bitPathernV2 * (long long int) powf( 2, (regMatrixSizeX - row // - 1) ); v2 += bitPathernV2 * (one << (regMatrixSizeX - row - 1)); } // Exists a vertex from v1 to v2 countSolutions[i] = v2; } } long long int * executeGeneNetworkKernelSequential( int *regulationMatrix, int m, int n, long long int *outSizeCountSolutions ) { printf( "\n----------- Start: executeGeneNetworkKernelSequential --------- \n\n" ); long long int sizeCountSolutions = (long long int) pow( 2.0, m ); *(outSizeCountSolutions) = sizeCountSolutions; long long int memCountSolutions = sizeCountSolutions * (long long int) sizeof(long long int); long long int *countSolutions; countSolutions = NULL; countSolutions = (long long int*) malloc( memCountSolutions ); if( countSolutions == NULL || countSolutions <= 0 ) { printf( "Host error: countSolutions Memory Allocation \n" ); exit( 0 ); } // Calculate total time for algorithm execution hipEvent_t startTotalTime, stopTotalTime; float totalTime; hipEventCreate( &startTotalTime ); hipEventCreate( &stopTotalTime ); hipEventRecord( startTotalTime, 0 ); kernelBuildGraphStatesSequentialByDinamicProgramming( regulationMatrix, m, n, countSolutions, sizeCountSolutions ); hipEventRecord( stopTotalTime, 0 ); hipEventSynchronize( stopTotalTime ); hipEventElapsedTime( &totalTime, startTotalTime, stopTotalTime ); hipEventDestroy( startTotalTime ); hipEventDestroy( stopTotalTime ); printf( "kernelBuildGraphStatesSequential time %f s \n", (totalTime / 1000) ); printf( "............End: executeGeneNetworkKernelSequential \n\n" ); return countSolutions; } long long int * executeGeneNetworkKernelSequentialByDinamicalProgramming( int *regulationMatrix, int m, int n, long long int *outSizeCountSolutions ) { printf( "\n----------- Start: executeGeneNetworkKernelSequential --------- \n\n" ); long long int sizeCountSolutions = (long long int) pow( 2.0, m ); *(outSizeCountSolutions) = sizeCountSolutions; long long int memCountSolutions = sizeCountSolutions * (long long int) sizeof(long long int); long long int *countSolutions; countSolutions = NULL; countSolutions = (long long int*) malloc( memCountSolutions ); if( countSolutions == NULL || countSolutions <= 0 ) { printf( "Host error: countSolutions Memory Allocation \n" ); exit( 0 ); } // Calculate total time for algorithm execution hipEvent_t startTotalTime, stopTotalTime; float totalTime; hipEventCreate( &startTotalTime ); hipEventCreate( &stopTotalTime ); hipEventRecord( startTotalTime, 0 ); kernelBuildGraphStatesSequentialByDinamicProgramming( regulationMatrix, m, n, countSolutions, sizeCountSolutions ); hipEventRecord( stopTotalTime, 0 ); hipEventSynchronize( stopTotalTime ); hipEventElapsedTime( &totalTime, startTotalTime, stopTotalTime ); hipEventDestroy( startTotalTime ); hipEventDestroy( stopTotalTime ); printf( "kernelBuildGraphStatesSequential time %f s \n", (totalTime / 1000) ); printf( "............End: executeGeneNetworkKernelSequential \n\n" ); return countSolutions; }
dc47296f949674cd1cd9a486d1cc80f39da0d7c9.cu
// includes, system #include <stdio.h> #include <math.h> #include "KernelUtils.h" #include "HostUtils.h" #include "BuildGraphKernelSequential.h" /***************************************************** * * * SEQUENTIAL ALGORITHMS * * - Execute sequential algorithms for gene network analisys * - Implementation base for time compare. * *****************************************************/ void kernelBuildGraphStatesSequentialByDinamicProgramming( int *regulationMatrix, int regMatrixSizeX, int regMatrixSizeY, long long int *countSolutions, long long int sizeCountSolutions ) { int *cache = (int*) malloc( sizeof(int) * regMatrixSizeX * regMatrixSizeY ); int *cacheSum = (int*) malloc( sizeof(int) * regMatrixSizeY ); for (int i = 0; i < (regMatrixSizeX * regMatrixSizeY); i++) { cache[i] = 0; if( i < regMatrixSizeY ) { cacheSum[i] = 0; } } // Dimanic Programming algorithm: buildGraphStates for (long long int i = 0; i < sizeCountSolutions; i++) { // ^ is XOR bit operation long long int v1 = i ^ (i >> 1); long long int graycodeBef = (i - 1) ^ ((i - 1) >> 1); long int bitChanged = (graycodeBef ^ v1); long int bitChangedIdx = (bitChanged == 0 ? 0 : log2( (float) bitChanged )); long long int v2 = 0; long long int ONE = 1; long long int col = (regMatrixSizeX - bitChangedIdx - 1); // bitQPermanent = (v1 / (long long int) powf( 2, row )) % 2; long long int bit = (v1 / (ONE << (regMatrixSizeX - col - 1))) & ONE; long long int bitBef = (graycodeBef / (ONE << (regMatrixSizeX - col - 1))) & ONE; long long int number = v1; for (int row = 0; row < regMatrixSizeX && number > 0; row++) { int idxMatrix = row * regMatrixSizeY + col; // cacheSum[row] -= cache[idxMatrix]; // // cache[idxMatrix] = regulationMatrix[idxMatrix] * bit; // // cacheSum[row] += cache[idxMatrix]; cacheSum[row] += (regulationMatrix[idxMatrix] * bit) - (regulationMatrix[idxMatrix] * bitBef); } // Normalization for (int row = 0; row < regMatrixSizeX && number > 0; row++) { long long int bitPathernV2 = 0; long long int bitQPermanent = (v1 / (ONE << (regMatrixSizeX - row - 1))) & 1; bitPathernV2 = cacheSum[row] == 0 ? bitQPermanent : cacheSum[row] < 0 ? 0 : 1; long long int one = 1; // v2 += bitPathernV2 * (long long int) powf( 2, (regMatrixSizeX - row // - 1) ); v2 += bitPathernV2 * (one << (regMatrixSizeX - row - 1)); } // Exists a vertex from v1 to v2 countSolutions[v1] = v2; } free( cache ); free( cacheSum ); } void kernelBuildGraphStatesSequential( int *regulationMatriz, int regMatrixSizeX, int regMatrixSizeY, long long int *countSolutions, long long int sizeCountSolutions ) { for (int i = 0; i < sizeCountSolutions; i++) { long long int v1 = i; long long int v2 = 0; for (int row = 0; row < regMatrixSizeY; row++) { long long int number = v1; long long int bitPathernV2 = 0; long long int bitQPermanent = 0; for (int col = regMatrixSizeX - 1; (col >= 0) && (number > 0); col--) { int idxMatrix = row * regMatrixSizeY + col; // Generate digits in reverse order // long long int bitQ = number % 2; long long int bitQ = number & 1; if( row == col ) { bitQPermanent = bitQ; } bitPathernV2 += regulationMatriz[idxMatrix] * bitQ; number /= 2; } // Normalization bitPathernV2 = bitPathernV2 == 0 ? bitQPermanent : bitPathernV2 < 0 ? 0 : 1; // Exists an arc v1 -> outV2 long long int one = 1; // v2 += bitPathernV2 * (long long int) powf( 2, (regMatrixSizeX - row // - 1) ); v2 += bitPathernV2 * (one << (regMatrixSizeX - row - 1)); } // Exists a vertex from v1 to v2 countSolutions[i] = v2; } } long long int * executeGeneNetworkKernelSequential( int *regulationMatrix, int m, int n, long long int *outSizeCountSolutions ) { printf( "\n----------- Start: executeGeneNetworkKernelSequential --------- \n\n" ); long long int sizeCountSolutions = (long long int) pow( 2.0, m ); *(outSizeCountSolutions) = sizeCountSolutions; long long int memCountSolutions = sizeCountSolutions * (long long int) sizeof(long long int); long long int *countSolutions; countSolutions = NULL; countSolutions = (long long int*) malloc( memCountSolutions ); if( countSolutions == NULL || countSolutions <= 0 ) { printf( "Host error: countSolutions Memory Allocation \n" ); exit( 0 ); } // Calculate total time for algorithm execution cudaEvent_t startTotalTime, stopTotalTime; float totalTime; cudaEventCreate( &startTotalTime ); cudaEventCreate( &stopTotalTime ); cudaEventRecord( startTotalTime, 0 ); kernelBuildGraphStatesSequentialByDinamicProgramming( regulationMatrix, m, n, countSolutions, sizeCountSolutions ); cudaEventRecord( stopTotalTime, 0 ); cudaEventSynchronize( stopTotalTime ); cudaEventElapsedTime( &totalTime, startTotalTime, stopTotalTime ); cudaEventDestroy( startTotalTime ); cudaEventDestroy( stopTotalTime ); printf( "kernelBuildGraphStatesSequential time %f s \n", (totalTime / 1000) ); printf( "............End: executeGeneNetworkKernelSequential \n\n" ); return countSolutions; } long long int * executeGeneNetworkKernelSequentialByDinamicalProgramming( int *regulationMatrix, int m, int n, long long int *outSizeCountSolutions ) { printf( "\n----------- Start: executeGeneNetworkKernelSequential --------- \n\n" ); long long int sizeCountSolutions = (long long int) pow( 2.0, m ); *(outSizeCountSolutions) = sizeCountSolutions; long long int memCountSolutions = sizeCountSolutions * (long long int) sizeof(long long int); long long int *countSolutions; countSolutions = NULL; countSolutions = (long long int*) malloc( memCountSolutions ); if( countSolutions == NULL || countSolutions <= 0 ) { printf( "Host error: countSolutions Memory Allocation \n" ); exit( 0 ); } // Calculate total time for algorithm execution cudaEvent_t startTotalTime, stopTotalTime; float totalTime; cudaEventCreate( &startTotalTime ); cudaEventCreate( &stopTotalTime ); cudaEventRecord( startTotalTime, 0 ); kernelBuildGraphStatesSequentialByDinamicProgramming( regulationMatrix, m, n, countSolutions, sizeCountSolutions ); cudaEventRecord( stopTotalTime, 0 ); cudaEventSynchronize( stopTotalTime ); cudaEventElapsedTime( &totalTime, startTotalTime, stopTotalTime ); cudaEventDestroy( startTotalTime ); cudaEventDestroy( stopTotalTime ); printf( "kernelBuildGraphStatesSequential time %f s \n", (totalTime / 1000) ); printf( "............End: executeGeneNetworkKernelSequential \n\n" ); return countSolutions; }
a07f417e58fcc4e2fa11080798c9e9bce3d2f914.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpuIt3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *tNew = NULL; hipMalloc(&tNew, XSIZE*YSIZE); float *tOld = NULL; hipMalloc(&tOld, XSIZE*YSIZE); float *tOrig = NULL; hipMalloc(&tOrig, XSIZE*YSIZE); int x = 1; int y = 1; int z = 1; float k = 1; float st = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpuIt3), dim3(gridBlock),dim3(threadBlock), 0, 0, tNew,tOld,tOrig,x,y,z,k,st); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpuIt3), dim3(gridBlock),dim3(threadBlock), 0, 0, tNew,tOld,tOrig,x,y,z,k,st); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpuIt3), dim3(gridBlock),dim3(threadBlock), 0, 0, tNew,tOld,tOrig,x,y,z,k,st); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a07f417e58fcc4e2fa11080798c9e9bce3d2f914.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpuIt3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *tNew = NULL; cudaMalloc(&tNew, XSIZE*YSIZE); float *tOld = NULL; cudaMalloc(&tOld, XSIZE*YSIZE); float *tOrig = NULL; cudaMalloc(&tOrig, XSIZE*YSIZE); int x = 1; int y = 1; int z = 1; float k = 1; float st = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpuIt3<<<gridBlock,threadBlock>>>(tNew,tOld,tOrig,x,y,z,k,st); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpuIt3<<<gridBlock,threadBlock>>>(tNew,tOld,tOrig,x,y,z,k,st); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpuIt3<<<gridBlock,threadBlock>>>(tNew,tOld,tOrig,x,y,z,k,st); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9063e072c24a48bf5a22425f32b065c9b266b3d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <cmath> #include <cstdlib> #include <string> #include <iomanip> #define T_P_B 1024 ///////////////////////////// Global variables ///////////////////////////////// std::string dimension; // grid dimension float k; // k-step int timesteps; // num of timesteps int width, height, depth; // grid size float startTemp, fixedTemp; // node start temp int heat_x, heat_y, heat_z, // fixed heater vars heat_w, heat_h, heat_d; float *d_old, *d_new, *d_heaters, // grids for values *g_old, *g_new, *heaters; ///////////////////////////// CUDA Functions /////////////////////////////////// __global__ void heat_sim(float *oldg, float * newg, float *fixed, int width, int height, int depth, float k) { int idx = threadIdx.x + blockIdx.x * blockDim.x; float left, right, up, down, above, below; float old = oldg[idx]; if (idx < (width*height*depth)) { if (fixed[idx] != 0) newg[idx] = fixed[idx]; else if (fixed[idx] == 0) { // x-, x+ ((idx%width) == 0) ? (left = old) : (left = oldg[idx-1]); ((idx%width) == (width-1)) ? (right = old) : (right = oldg[idx+1]); // y-, y+ (idx%(width*height) < width) ? (up = old) : (up = oldg[idx - width]); (idx%(width*height) >= ((height-1)*width)) ? (down = old) : (down = oldg[idx + width]); // z-, z+ if (depth <= 1) { above = 0.0; below = 0.0; newg[idx] = oldg[idx] + k*(up+down+left+right-(4.0*oldg[idx])); } else if (depth > 1) { if (idx < (width*height)) above = old; else above = oldg[idx - (width*height)]; if (idx >= ((depth-1)*(width*height))) below = old; else below = oldg[idx + (width*height)]; newg[idx] = oldg[idx] + k*(up+down+left +right+above+below-(6.0*oldg[idx])); } } } } __global__ void grid_cpy(float *oldg, float *newg, int size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < size) oldg[idx] = newg[idx]; } ///////////////////////////// End CUDA Functions /////////////////////////////// int main(int argc, char * argv[]) { ///////////////////////////// Config file parser /////////////////////////////// std::ifstream conf(argv[1]); if (conf.is_open()) { std::string line; while (getline(conf, line)){ if ((line[0] == '#') || line.empty() || line[0] == '\r') continue; // get dimension while ((line[0] == '#') || line.empty() || line[0] == '\r') getline(conf,line); dimension = line.substr(0,2); // get k value getline(conf, line); while ((line[0] == '#') || line.empty() || line[0] == '\r') getline(conf,line); k = std::stof(line); // get timesteps getline(conf, line); while ((line[0] == '#') || line.empty() || line[0] == '\r') getline(conf,line); timesteps = std::stoi(line); // get grid size getline(conf, line); while ((line[0] == '#') || line.empty() || line[0] == '\r') getline(conf,line); int comma = line.find(','); width = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); if (dimension == "2D"){ height = std::stoi(line); depth = 1; } else if (dimension == "3D"){ comma = line.find(','); height = std::stoi(line.substr(0, comma)); depth = std::stoi(line.substr(comma+1)); } // get block start temp getline(conf, line); while ((line[0] == '#') || line.empty() || line[0] == '\r') getline(conf,line); startTemp = std::stof(line); // create heaters heaters = new float[width*height*depth]; std::fill(heaters, heaters+(width*height*depth), 0); while(getline(conf, line)){ if (line[0] == '#' || line.empty() || line[0] == '\r') continue; int comma = line.find(','); heat_x = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); heat_y = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); if (dimension == "2D"){ heat_w = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); heat_h = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); heat_d = 1; heat_z = 0; fixedTemp = std::stof(line); } else if (dimension == "3D"){ heat_z = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); heat_w = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); heat_h = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); heat_d = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); fixedTemp = std::stof(line); } for (int i = heat_x+width*heat_y; i < heat_x+heat_w+width*heat_y; i++) for (int j = 0; j < heat_h; j++) for (int k = heat_z; k < heat_z+heat_d; k++) heaters[i+(j*width)+(k*width*height)] = fixedTemp; } } } else std::cerr << "Couldn't open config file."; ////////////////////////// End config file parser ////////////////////////////// int dim = width*height*depth; // set up host grids g_old = new float[dim]; g_new = new float[dim]; std::fill(g_new, g_new+dim, 0); std::fill(g_old, g_old+dim, 0); for (int i = 0; i < dim; i++) { g_old[i] = startTemp; if (heaters[i] != 0) g_old[i] = heaters[i]; } // allocate blockSize - must be at least one block int blockSize = ceil(float(dim)/float(T_P_B)); // allocate device memory in 1D array hipMalloc((void**)&d_new, dim*sizeof(float)); hipMalloc((void**)&d_old, dim*sizeof(float)); hipMalloc((void**)&d_heaters, dim*sizeof(float)); // copy filled arrays from host to device hipMemcpy(d_old, g_old, dim*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_new, g_new, dim*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_heaters, heaters, dim*sizeof(float), hipMemcpyHostToDevice); // run kernels for (int t = 0; t < timesteps; t++) { hipLaunchKernelGGL(( heat_sim), dim3(blockSize), dim3(T_P_B), 0, 0, d_old, d_new, d_heaters, width, height, depth, k); hipDeviceSynchronize(); hipLaunchKernelGGL(( grid_cpy), dim3(blockSize), dim3(T_P_B), 0, 0, d_old, d_new, dim); hipDeviceSynchronize(); } // copy data back from device to host hipMemcpy(g_new, d_new, dim*sizeof(float), hipMemcpyDeviceToHost); // print out to csv std::ofstream csv("../heatOutput.csv", std::ios::out); if (csv.is_open()){ for (int i = 0; i < dim; i++) { if (i%width == width-1) csv << g_new[i] << std::endl; else csv << g_new[i] << ", "; if (i%(width*height) == (width*height)-1) csv << std::endl; } } else std::cout << "Unable to open file, try again." << std::endl; csv.close(); // deallocate all memory delete[] g_old; delete[] g_new; delete[] heaters; hipFree(d_old); hipFree(d_new); hipFree(d_heaters); }
9063e072c24a48bf5a22425f32b065c9b266b3d2.cu
#include <iostream> #include <fstream> #include <cmath> #include <cstdlib> #include <string> #include <iomanip> #define T_P_B 1024 ///////////////////////////// Global variables ///////////////////////////////// std::string dimension; // grid dimension float k; // k-step int timesteps; // num of timesteps int width, height, depth; // grid size float startTemp, fixedTemp; // node start temp int heat_x, heat_y, heat_z, // fixed heater vars heat_w, heat_h, heat_d; float *d_old, *d_new, *d_heaters, // grids for values *g_old, *g_new, *heaters; ///////////////////////////// CUDA Functions /////////////////////////////////// __global__ void heat_sim(float *oldg, float * newg, float *fixed, int width, int height, int depth, float k) { int idx = threadIdx.x + blockIdx.x * blockDim.x; float left, right, up, down, above, below; float old = oldg[idx]; if (idx < (width*height*depth)) { if (fixed[idx] != 0) newg[idx] = fixed[idx]; else if (fixed[idx] == 0) { // x-, x+ ((idx%width) == 0) ? (left = old) : (left = oldg[idx-1]); ((idx%width) == (width-1)) ? (right = old) : (right = oldg[idx+1]); // y-, y+ (idx%(width*height) < width) ? (up = old) : (up = oldg[idx - width]); (idx%(width*height) >= ((height-1)*width)) ? (down = old) : (down = oldg[idx + width]); // z-, z+ if (depth <= 1) { above = 0.0; below = 0.0; newg[idx] = oldg[idx] + k*(up+down+left+right-(4.0*oldg[idx])); } else if (depth > 1) { if (idx < (width*height)) above = old; else above = oldg[idx - (width*height)]; if (idx >= ((depth-1)*(width*height))) below = old; else below = oldg[idx + (width*height)]; newg[idx] = oldg[idx] + k*(up+down+left +right+above+below-(6.0*oldg[idx])); } } } } __global__ void grid_cpy(float *oldg, float *newg, int size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < size) oldg[idx] = newg[idx]; } ///////////////////////////// End CUDA Functions /////////////////////////////// int main(int argc, char * argv[]) { ///////////////////////////// Config file parser /////////////////////////////// std::ifstream conf(argv[1]); if (conf.is_open()) { std::string line; while (getline(conf, line)){ if ((line[0] == '#') || line.empty() || line[0] == '\r') continue; // get dimension while ((line[0] == '#') || line.empty() || line[0] == '\r') getline(conf,line); dimension = line.substr(0,2); // get k value getline(conf, line); while ((line[0] == '#') || line.empty() || line[0] == '\r') getline(conf,line); k = std::stof(line); // get timesteps getline(conf, line); while ((line[0] == '#') || line.empty() || line[0] == '\r') getline(conf,line); timesteps = std::stoi(line); // get grid size getline(conf, line); while ((line[0] == '#') || line.empty() || line[0] == '\r') getline(conf,line); int comma = line.find(','); width = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); if (dimension == "2D"){ height = std::stoi(line); depth = 1; } else if (dimension == "3D"){ comma = line.find(','); height = std::stoi(line.substr(0, comma)); depth = std::stoi(line.substr(comma+1)); } // get block start temp getline(conf, line); while ((line[0] == '#') || line.empty() || line[0] == '\r') getline(conf,line); startTemp = std::stof(line); // create heaters heaters = new float[width*height*depth]; std::fill(heaters, heaters+(width*height*depth), 0); while(getline(conf, line)){ if (line[0] == '#' || line.empty() || line[0] == '\r') continue; int comma = line.find(','); heat_x = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); heat_y = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); if (dimension == "2D"){ heat_w = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); heat_h = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); heat_d = 1; heat_z = 0; fixedTemp = std::stof(line); } else if (dimension == "3D"){ heat_z = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); heat_w = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); heat_h = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); comma = line.find(','); heat_d = std::stoi(line.substr(0, comma)); line = line.substr(comma+1); fixedTemp = std::stof(line); } for (int i = heat_x+width*heat_y; i < heat_x+heat_w+width*heat_y; i++) for (int j = 0; j < heat_h; j++) for (int k = heat_z; k < heat_z+heat_d; k++) heaters[i+(j*width)+(k*width*height)] = fixedTemp; } } } else std::cerr << "Couldn't open config file."; ////////////////////////// End config file parser ////////////////////////////// int dim = width*height*depth; // set up host grids g_old = new float[dim]; g_new = new float[dim]; std::fill(g_new, g_new+dim, 0); std::fill(g_old, g_old+dim, 0); for (int i = 0; i < dim; i++) { g_old[i] = startTemp; if (heaters[i] != 0) g_old[i] = heaters[i]; } // allocate blockSize - must be at least one block int blockSize = ceil(float(dim)/float(T_P_B)); // allocate device memory in 1D array cudaMalloc((void**)&d_new, dim*sizeof(float)); cudaMalloc((void**)&d_old, dim*sizeof(float)); cudaMalloc((void**)&d_heaters, dim*sizeof(float)); // copy filled arrays from host to device cudaMemcpy(d_old, g_old, dim*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_new, g_new, dim*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_heaters, heaters, dim*sizeof(float), cudaMemcpyHostToDevice); // run kernels for (int t = 0; t < timesteps; t++) { heat_sim<<<blockSize, T_P_B>>> (d_old, d_new, d_heaters, width, height, depth, k); cudaDeviceSynchronize(); grid_cpy<<< blockSize, T_P_B>>> (d_old, d_new, dim); cudaDeviceSynchronize(); } // copy data back from device to host cudaMemcpy(g_new, d_new, dim*sizeof(float), cudaMemcpyDeviceToHost); // print out to csv std::ofstream csv("../heatOutput.csv", std::ios::out); if (csv.is_open()){ for (int i = 0; i < dim; i++) { if (i%width == width-1) csv << g_new[i] << std::endl; else csv << g_new[i] << ", "; if (i%(width*height) == (width*height)-1) csv << std::endl; } } else std::cout << "Unable to open file, try again." << std::endl; csv.close(); // deallocate all memory delete[] g_old; delete[] g_new; delete[] heaters; cudaFree(d_old); cudaFree(d_new); cudaFree(d_heaters); }
62bd9bf05ba18c5cd4ecc2bf920072c9a2c6ea8c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <iomanip> #include <vector> #include <tuple> #include <iostream> #include <limits> #include <float.h> #include <assert.h> // CPU #include "libCSV/csv.hpp" #include "libalg/CPUMatrix.hpp" #include "libalg/CPUView.hpp" #include "libalg/alg.hpp" #include "libalg/print.hpp" #include "error.hpp" // GPU #include "libgpualg/mean.cuh" #include "error.cuh" #include "libgpuicp/corresp.cuh" #include "libgpuicp/dist.cuh" #include "libgpuicp/corresp.cuh" #include "libgpuicp/crosscov.cuh" int main(int argc, char **argv) { std::string f1Header{}; size_t Qlines, Qcols, Plines, Pcols; double* Pt = readCSV(argv[1], f1Header, Plines, Pcols); double* Qt = readCSV(argv[2], f1Header, Qlines, Qcols); double* d_P, * d_Q, *d_cov; hipMalloc(&d_P, sizeof(double) * Plines * Pcols); hipMalloc(&d_Q, sizeof(double) * Qlines * Qcols); hipMemcpy(d_P, Pt, sizeof(double) * Pcols * Plines, hipMemcpyHostToDevice); hipMemcpy(d_Q, Qt, sizeof(double) * Qcols * Qlines, hipMemcpyHostToDevice); unsigned int* d_array_correspondances; hipMalloc(&d_array_correspondances, sizeof(unsigned int) * Plines); get_array_correspondences(d_array_correspondances, d_P, d_Q, Plines, Pcols, Qlines, Qcols); hipMalloc(&d_cov, sizeof(double) * Qcols * Pcols); get_array_cross_cov(d_cov, d_array_correspondances, d_P, d_Q, Plines, Pcols, Qlines, Qcols); double* h_cov = (double*)malloc(Pcols * Qcols * sizeof(double)); hipMemcpy(h_cov, d_cov, Pcols * Qcols * sizeof(unsigned int), hipMemcpyDeviceToHost); for (int i = 0; i < Pcols * Qcols; i++) { std::cerr << h_cov[i] << " "; if (i % 3) std::cerr << std::endl; } hipFree(d_P); hipFree(d_Q); hipFree(d_array_correspondances); free(h_cov); free(Pt); free(Qt); }
62bd9bf05ba18c5cd4ecc2bf920072c9a2c6ea8c.cu
#include <stdio.h> #include <iostream> #include <iomanip> #include <vector> #include <tuple> #include <iostream> #include <limits> #include <float.h> #include <assert.h> // CPU #include "libCSV/csv.hpp" #include "libalg/CPUMatrix.hpp" #include "libalg/CPUView.hpp" #include "libalg/alg.hpp" #include "libalg/print.hpp" #include "error.hpp" // GPU #include "libgpualg/mean.cuh" #include "error.cuh" #include "libgpuicp/corresp.cuh" #include "libgpuicp/dist.cuh" #include "libgpuicp/corresp.cuh" #include "libgpuicp/crosscov.cuh" int main(int argc, char **argv) { std::string f1Header{}; size_t Qlines, Qcols, Plines, Pcols; double* Pt = readCSV(argv[1], f1Header, Plines, Pcols); double* Qt = readCSV(argv[2], f1Header, Qlines, Qcols); double* d_P, * d_Q, *d_cov; cudaMalloc(&d_P, sizeof(double) * Plines * Pcols); cudaMalloc(&d_Q, sizeof(double) * Qlines * Qcols); cudaMemcpy(d_P, Pt, sizeof(double) * Pcols * Plines, cudaMemcpyHostToDevice); cudaMemcpy(d_Q, Qt, sizeof(double) * Qcols * Qlines, cudaMemcpyHostToDevice); unsigned int* d_array_correspondances; cudaMalloc(&d_array_correspondances, sizeof(unsigned int) * Plines); get_array_correspondences(d_array_correspondances, d_P, d_Q, Plines, Pcols, Qlines, Qcols); cudaMalloc(&d_cov, sizeof(double) * Qcols * Pcols); get_array_cross_cov(d_cov, d_array_correspondances, d_P, d_Q, Plines, Pcols, Qlines, Qcols); double* h_cov = (double*)malloc(Pcols * Qcols * sizeof(double)); cudaMemcpy(h_cov, d_cov, Pcols * Qcols * sizeof(unsigned int), cudaMemcpyDeviceToHost); for (int i = 0; i < Pcols * Qcols; i++) { std::cerr << h_cov[i] << " "; if (i % 3) std::cerr << std::endl; } cudaFree(d_P); cudaFree(d_Q); cudaFree(d_array_correspondances); free(h_cov); free(Pt); free(Qt); }
94b3d30933675acf31febd766a78084a95f5aa94.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "deform_conv.hpp" namespace Shadow { namespace Vision { __device__ float deform_im2col_bilinear(const float* data, float x, float y, int width, int height) { auto h_low = static_cast<int>(floorf(y)); auto w_low = static_cast<int>(floorf(x)); int h_high = h_low + 1, w_high = w_low + 1; float lh = y - h_low, lw = x - w_low; float hh = 1 - lh, hw = 1 - lw; float v1 = (h_low >= 0 && w_low >= 0) ? data[h_low * width + w_low] : 0; float v2 = (h_low >= 0 && w_high <= width - 1) ? data[h_low * width + w_high] : 0; float v3 = (h_high <= height - 1 && w_low >= 0) ? data[h_high * width + w_low] : 0; float v4 = (h_high <= height - 1 && w_high <= width - 1) ? data[h_high * width + w_high] : 0; float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; return (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); } __global__ void deform_im2col_gpu_kernel( int count, const float* in_data, const float* offset_data, const float* mask_data, int in_h, int in_w, int kernel_size_h, int kernel_size_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int channel_per_deform_group, int out_h, int out_w, bool use_mask, float* col_data) { CUDA_KERNEL_LOOP(globalid, count) { int temp = globalid / out_w; int w_out = globalid % out_w; int h_out = temp % out_h; int c_in = temp / out_h; int c_out = c_in * kernel_size_h * kernel_size_w; int deform_group_idx = c_in / channel_per_deform_group; int om_offset = deform_group_idx * kernel_size_h * kernel_size_w * out_h; int h_offset = h_out * stride_h - pad_h; int w_offset = w_out * stride_w - pad_w; int out_spatial = out_h * out_w; in_data += c_in * in_h * in_w; offset_data += (2 * om_offset + h_out) * out_w + w_out; if (use_mask) { mask_data += (om_offset + h_out) * out_w + w_out; } col_data += (c_out * out_h + h_out) * out_w + w_out; for (int kh = 0; kh < kernel_size_h; ++kh) { for (int kw = 0; kw < kernel_size_w; ++kw, offset_data += 2 * out_spatial) { auto h_in = h_offset + kh * dilation_h + offset_data[0]; auto w_in = w_offset + kw * dilation_w + offset_data[out_spatial]; auto mask_val = 1.f; if (use_mask) { mask_val = *mask_data; mask_data += out_spatial; } if (h_in > -1 && h_in < in_h && w_in > -1 && w_in < in_w) { *col_data = mask_val * deform_im2col_bilinear(in_data, w_in, h_in, in_w, in_h); } else { *col_data = 0.f; } col_data += out_spatial; } } } } template <> void DeformIm2Col2D<DeviceType::kGPU, float>( const float* in_data, const VecInt& in_shape, const float* offset_data, const float* mask_data, int kernel_size_h, int kernel_size_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deform_group, bool use_mask, const VecInt& out_shape, float* col_data, Context* context) { int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3]; int out_h = out_shape[2], out_w = out_shape[3]; int count = in_c * out_h * out_w; hipLaunchKernelGGL(( deform_im2col_gpu_kernel), dim3(GetBlocks(count)), dim3(NumThreads), 0, hipStream_t(context->stream()), count, in_data, offset_data, mask_data, in_h, in_w, kernel_size_h, kernel_size_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, in_c / deform_group, out_h, out_w, use_mask, col_data); CUDA_CHECK(hipPeekAtLastError()); } } // namespace Vision } // namespace Shadow namespace Shadow { REGISTER_OP_KERNEL_DEFAULT(DeformConvGPU, DeformConvKernelDefault<DeviceType::kGPU>); } // namespace Shadow
94b3d30933675acf31febd766a78084a95f5aa94.cu
#include "deform_conv.hpp" namespace Shadow { namespace Vision { __device__ float deform_im2col_bilinear(const float* data, float x, float y, int width, int height) { auto h_low = static_cast<int>(floorf(y)); auto w_low = static_cast<int>(floorf(x)); int h_high = h_low + 1, w_high = w_low + 1; float lh = y - h_low, lw = x - w_low; float hh = 1 - lh, hw = 1 - lw; float v1 = (h_low >= 0 && w_low >= 0) ? data[h_low * width + w_low] : 0; float v2 = (h_low >= 0 && w_high <= width - 1) ? data[h_low * width + w_high] : 0; float v3 = (h_high <= height - 1 && w_low >= 0) ? data[h_high * width + w_low] : 0; float v4 = (h_high <= height - 1 && w_high <= width - 1) ? data[h_high * width + w_high] : 0; float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; return (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); } __global__ void deform_im2col_gpu_kernel( int count, const float* in_data, const float* offset_data, const float* mask_data, int in_h, int in_w, int kernel_size_h, int kernel_size_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int channel_per_deform_group, int out_h, int out_w, bool use_mask, float* col_data) { CUDA_KERNEL_LOOP(globalid, count) { int temp = globalid / out_w; int w_out = globalid % out_w; int h_out = temp % out_h; int c_in = temp / out_h; int c_out = c_in * kernel_size_h * kernel_size_w; int deform_group_idx = c_in / channel_per_deform_group; int om_offset = deform_group_idx * kernel_size_h * kernel_size_w * out_h; int h_offset = h_out * stride_h - pad_h; int w_offset = w_out * stride_w - pad_w; int out_spatial = out_h * out_w; in_data += c_in * in_h * in_w; offset_data += (2 * om_offset + h_out) * out_w + w_out; if (use_mask) { mask_data += (om_offset + h_out) * out_w + w_out; } col_data += (c_out * out_h + h_out) * out_w + w_out; for (int kh = 0; kh < kernel_size_h; ++kh) { for (int kw = 0; kw < kernel_size_w; ++kw, offset_data += 2 * out_spatial) { auto h_in = h_offset + kh * dilation_h + offset_data[0]; auto w_in = w_offset + kw * dilation_w + offset_data[out_spatial]; auto mask_val = 1.f; if (use_mask) { mask_val = *mask_data; mask_data += out_spatial; } if (h_in > -1 && h_in < in_h && w_in > -1 && w_in < in_w) { *col_data = mask_val * deform_im2col_bilinear(in_data, w_in, h_in, in_w, in_h); } else { *col_data = 0.f; } col_data += out_spatial; } } } } template <> void DeformIm2Col2D<DeviceType::kGPU, float>( const float* in_data, const VecInt& in_shape, const float* offset_data, const float* mask_data, int kernel_size_h, int kernel_size_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deform_group, bool use_mask, const VecInt& out_shape, float* col_data, Context* context) { int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3]; int out_h = out_shape[2], out_w = out_shape[3]; int count = in_c * out_h * out_w; deform_im2col_gpu_kernel<<<GetBlocks(count), NumThreads, 0, cudaStream_t(context->stream())>>>( count, in_data, offset_data, mask_data, in_h, in_w, kernel_size_h, kernel_size_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, in_c / deform_group, out_h, out_w, use_mask, col_data); CUDA_CHECK(cudaPeekAtLastError()); } } // namespace Vision } // namespace Shadow namespace Shadow { REGISTER_OP_KERNEL_DEFAULT(DeformConvGPU, DeformConvKernelDefault<DeviceType::kGPU>); } // namespace Shadow
6b2c9447aa4a452b995ebe6211a415d845835330.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * See COPYRIGHT.txt for license information */ #include <stdio.h> #include "nvshmem.h" #include "nvshmemx.h" #ifdef ENABLE_MPI_SUPPORT #include "mpi.h" #endif #define THRESHOLD 42 #define CORRECTION 7 #undef CUDA_CHECK #define CUDA_CHECK(stmt) \ do { \ hipError_t result = (stmt); \ if (hipSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", __FILE__, __LINE__, \ hipGetErrorString(result)); \ exit(-1); \ } \ } while (0) __global__ void accumulate(int *input, int *partial_sum) { int index = threadIdx.x; if (0 == index) *partial_sum = 0; __syncthreads(); atomicAdd(partial_sum, input[index]); } __global__ void correct_accumulate(int *input, int *partial_sum, int *full_sum) { int index = threadIdx.x; if (*full_sum > THRESHOLD) { input[index] = input[index] - CORRECTION; } if (0 == index) *partial_sum = 0; __syncthreads(); atomicAdd(partial_sum, input[index]); } int main(int c, char *v[]) { int mype, npes; int *input; int *partial_sum; int *full_sum; int input_nelems = 512; int to_all_nelems = 1; int PE_start = 0; int PE_size = 0; int logPE_stride = 0; long *pSync; int *pWrk; hipStream_t stream; #ifdef ENABLE_MPI_SUPPORT bool use_mpi = false; char *value = getenv("NVSHMEMTEST_USE_MPI_LAUNCHER"); if (value) use_mpi = atoi(value); #endif #ifdef ENABLE_MPI_SUPPORT if (use_mpi) { MPI_Init(&c, &v); int rank, nranks; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nranks); MPI_Comm mpi_comm = MPI_COMM_WORLD; nvshmemx_init_attr_t attr; attr.mpi_comm = &mpi_comm; nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr); } else nvshmem_init(); #else nvshmem_init(); #endif PE_size = nvshmem_n_pes(); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); CUDA_CHECK(hipSetDevice(mype)); CUDA_CHECK(hipStreamCreate(&stream)); input = (int *)nvshmem_malloc(sizeof(int) * input_nelems); partial_sum = (int *)nvshmem_malloc(sizeof(int)); full_sum = (int *)nvshmem_malloc(sizeof(int)); pWrk = (int *)nvshmem_malloc(sizeof(int) * NVSHMEM_REDUCE_MIN_WRKDATA_SIZE); pSync = (long *)nvshmem_malloc(sizeof(long) * NVSHMEM_REDUCE_SYNC_SIZE); hipLaunchKernelGGL(( accumulate), dim3(1), dim3(input_nelems), 0, stream, input, partial_sum); nvshmemx_int_sum_to_all_on_stream(full_sum, partial_sum, to_all_nelems, PE_start, logPE_stride, PE_size, pWrk, pSync, stream); hipLaunchKernelGGL(( correct_accumulate), dim3(1), dim3(input_nelems), 0, stream, input, partial_sum, full_sum); CUDA_CHECK(hipStreamSynchronize(stream)); printf("[%d of %d] run complete \n", mype, npes); CUDA_CHECK(hipStreamDestroy(stream)); nvshmem_free(input); nvshmem_free(partial_sum); nvshmem_free(full_sum); nvshmem_free(pWrk); nvshmem_free(pSync); nvshmem_finalize(); #ifdef ENABLE_MPI_SUPPORT if (use_mpi) MPI_Finalize(); #endif return 0; }
6b2c9447aa4a452b995ebe6211a415d845835330.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * See COPYRIGHT.txt for license information */ #include <stdio.h> #include "nvshmem.h" #include "nvshmemx.h" #ifdef ENABLE_MPI_SUPPORT #include "mpi.h" #endif #define THRESHOLD 42 #define CORRECTION 7 #undef CUDA_CHECK #define CUDA_CHECK(stmt) \ do { \ cudaError_t result = (stmt); \ if (cudaSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", __FILE__, __LINE__, \ cudaGetErrorString(result)); \ exit(-1); \ } \ } while (0) __global__ void accumulate(int *input, int *partial_sum) { int index = threadIdx.x; if (0 == index) *partial_sum = 0; __syncthreads(); atomicAdd(partial_sum, input[index]); } __global__ void correct_accumulate(int *input, int *partial_sum, int *full_sum) { int index = threadIdx.x; if (*full_sum > THRESHOLD) { input[index] = input[index] - CORRECTION; } if (0 == index) *partial_sum = 0; __syncthreads(); atomicAdd(partial_sum, input[index]); } int main(int c, char *v[]) { int mype, npes; int *input; int *partial_sum; int *full_sum; int input_nelems = 512; int to_all_nelems = 1; int PE_start = 0; int PE_size = 0; int logPE_stride = 0; long *pSync; int *pWrk; cudaStream_t stream; #ifdef ENABLE_MPI_SUPPORT bool use_mpi = false; char *value = getenv("NVSHMEMTEST_USE_MPI_LAUNCHER"); if (value) use_mpi = atoi(value); #endif #ifdef ENABLE_MPI_SUPPORT if (use_mpi) { MPI_Init(&c, &v); int rank, nranks; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nranks); MPI_Comm mpi_comm = MPI_COMM_WORLD; nvshmemx_init_attr_t attr; attr.mpi_comm = &mpi_comm; nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr); } else nvshmem_init(); #else nvshmem_init(); #endif PE_size = nvshmem_n_pes(); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); CUDA_CHECK(cudaSetDevice(mype)); CUDA_CHECK(cudaStreamCreate(&stream)); input = (int *)nvshmem_malloc(sizeof(int) * input_nelems); partial_sum = (int *)nvshmem_malloc(sizeof(int)); full_sum = (int *)nvshmem_malloc(sizeof(int)); pWrk = (int *)nvshmem_malloc(sizeof(int) * NVSHMEM_REDUCE_MIN_WRKDATA_SIZE); pSync = (long *)nvshmem_malloc(sizeof(long) * NVSHMEM_REDUCE_SYNC_SIZE); accumulate<<<1, input_nelems, 0, stream>>>(input, partial_sum); nvshmemx_int_sum_to_all_on_stream(full_sum, partial_sum, to_all_nelems, PE_start, logPE_stride, PE_size, pWrk, pSync, stream); correct_accumulate<<<1, input_nelems, 0, stream>>>(input, partial_sum, full_sum); CUDA_CHECK(cudaStreamSynchronize(stream)); printf("[%d of %d] run complete \n", mype, npes); CUDA_CHECK(cudaStreamDestroy(stream)); nvshmem_free(input); nvshmem_free(partial_sum); nvshmem_free(full_sum); nvshmem_free(pWrk); nvshmem_free(pSync); nvshmem_finalize(); #ifdef ENABLE_MPI_SUPPORT if (use_mpi) MPI_Finalize(); #endif return 0; }
c402499b0bb68477942108ac1b74d1796cefe184.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include "caffe/common.hpp" #include "caffe/util/deformable_im2col.hpp" namespace caffe { template <typename Dtype> __device__ Dtype deformable_im2col_bilinear(const Dtype* bottom_data, const int data_width, const int height, const int width, Dtype h, Dtype w) { int h_low = floor(h); int w_low = floor(w); int h_high; int w_high; if (h_low >= height - 1) { h_high = h_low = height - 1; h = (Dtype)h_low; } else { h_high = h_low + 1; } if (w_low >= width - 1) { w_high = w_low = width - 1; w = (Dtype)w_low; } else { w_high = w_low + 1; } Dtype lh = h - h_low; Dtype lw = w - w_low; Dtype hh = 1 - lh, hw = 1 - lw; Dtype v1 = bottom_data[h_low * data_width + w_low]; Dtype v2 = bottom_data[h_low * data_width + w_high]; Dtype v3 = bottom_data[h_high * data_width + w_low]; Dtype v4 = bottom_data[h_high * data_width + w_high]; Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename Dtype> __device__ Dtype get_gradient_weight(Dtype argmax_h, Dtype argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } argmax_h = max(argmax_h, (Dtype)0.0f); argmax_w = max(argmax_w, (Dtype)0.0f); int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (h == argmax_h_low) { if (w == argmax_w_low) { weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); } } else if (h == argmax_h_high) { if (w == argmax_w_low) { weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); } } return weight; } template <typename Dtype> __device__ Dtype get_coordinate_weight(Dtype argmax_h, Dtype argmax_w, const int height, const int width, const Dtype* im_data, const int data_width, const int bp_dir) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } if (argmax_h < 0) argmax_h = 0; if (argmax_w < 0) argmax_w = 0; int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (bp_dir == 0) { weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } /*! * \brief deformable_im2col gpu kernel. * DO NOT call this directly. Use wrapper function im2col() instead; */ template <typename Dtype> __global__ void deformable_im2col_gpu_kernel(const int n, const Dtype* data_im, const Dtype* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col) / height_col; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; Dtype* data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col; const Dtype* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype val = static_cast<Dtype>(0); const Dtype h_im = h_in + i * dilation_h + offset_h; const Dtype w_im = w_in + j * dilation_w + offset_w; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { const Dtype map_h = i * dilation_h + offset_h; const Dtype map_w = j * dilation_w + offset_w; const int cur_height = height - h_in; const int cur_width = width - w_in; val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); } *data_col_ptr = val; data_col_ptr += height_col * width_col; } } } } template <typename Dtype> void deformable_im2col_gpu(const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col; int channel_per_deformable_group = height / deformable_group; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( deformable_im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, data_offset, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; } template void deformable_im2col_gpu<float>(const float* data_im, const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, float* data_col); template void deformable_im2col_gpu<double>(const double* data_im, const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, double* data_col); template <typename Dtype> __global__ void deformable_col2im_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; const Dtype cur_inv_h_data = h_in + i * dilation_h + offset_h; const Dtype cur_inv_w_data = w_in + j * dilation_w + offset_w; const Dtype cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1 ) { int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx; Dtype weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename Dtype> void deformable_col2im_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, Dtype* grad_im) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height * width; int channel_per_deformable_group = height / deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_col, data_offset, height, width, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, grad_im); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_gpu<float>(const float* data_col, const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, float* grad_im); template void deformable_col2im_gpu<double>(const double* data_col, const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, double* grad_im); template <typename Dtype> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* grad_offset) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = index / width_col / height_col; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const Dtype* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col; const Dtype* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = ((col_c * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col) % kernel_w; int i = (col_pos / width_col / height_col / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype inv_h = h_in + i * dilation_h + offset_h; Dtype inv_w = w_in + j * dilation_w + offset_w; if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -1; } const Dtype weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } template <typename Dtype> void deformable_col2im_coord_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, Dtype* grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * weight_col * 2 * kernel_h * kernel_h * deformable_group int channel_per_deformable_group = height / deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_col, data_offset, height, width, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, data_im); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_coord_gpu<float>(const float* data_col, const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, float* grad_offset); template void deformable_col2im_coord_gpu<double>(const double* data_col, const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, double* grad_offset); }
c402499b0bb68477942108ac1b74d1796cefe184.cu
#include <algorithm> #include "caffe/common.hpp" #include "caffe/util/deformable_im2col.hpp" namespace caffe { template <typename Dtype> __device__ Dtype deformable_im2col_bilinear(const Dtype* bottom_data, const int data_width, const int height, const int width, Dtype h, Dtype w) { int h_low = floor(h); int w_low = floor(w); int h_high; int w_high; if (h_low >= height - 1) { h_high = h_low = height - 1; h = (Dtype)h_low; } else { h_high = h_low + 1; } if (w_low >= width - 1) { w_high = w_low = width - 1; w = (Dtype)w_low; } else { w_high = w_low + 1; } Dtype lh = h - h_low; Dtype lw = w - w_low; Dtype hh = 1 - lh, hw = 1 - lw; Dtype v1 = bottom_data[h_low * data_width + w_low]; Dtype v2 = bottom_data[h_low * data_width + w_high]; Dtype v3 = bottom_data[h_high * data_width + w_low]; Dtype v4 = bottom_data[h_high * data_width + w_high]; Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename Dtype> __device__ Dtype get_gradient_weight(Dtype argmax_h, Dtype argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } argmax_h = max(argmax_h, (Dtype)0.0f); argmax_w = max(argmax_w, (Dtype)0.0f); int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (h == argmax_h_low) { if (w == argmax_w_low) { weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); } } else if (h == argmax_h_high) { if (w == argmax_w_low) { weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); } } return weight; } template <typename Dtype> __device__ Dtype get_coordinate_weight(Dtype argmax_h, Dtype argmax_w, const int height, const int width, const Dtype* im_data, const int data_width, const int bp_dir) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } if (argmax_h < 0) argmax_h = 0; if (argmax_w < 0) argmax_w = 0; int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (bp_dir == 0) { weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } /*! * \brief deformable_im2col gpu kernel. * DO NOT call this directly. Use wrapper function im2col() instead; */ template <typename Dtype> __global__ void deformable_im2col_gpu_kernel(const int n, const Dtype* data_im, const Dtype* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col) / height_col; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; Dtype* data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col; const Dtype* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype val = static_cast<Dtype>(0); const Dtype h_im = h_in + i * dilation_h + offset_h; const Dtype w_im = w_in + j * dilation_w + offset_w; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { const Dtype map_h = i * dilation_h + offset_h; const Dtype map_w = j * dilation_w + offset_w; const int cur_height = height - h_in; const int cur_width = width - w_in; val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); } *data_col_ptr = val; data_col_ptr += height_col * width_col; } } } } template <typename Dtype> void deformable_im2col_gpu(const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col; int channel_per_deformable_group = height / deformable_group; // NOLINT_NEXT_LINE(whitespace/operators) deformable_im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, data_offset, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; } template void deformable_im2col_gpu<float>(const float* data_im, const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, float* data_col); template void deformable_im2col_gpu<double>(const double* data_im, const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, double* data_col); template <typename Dtype> __global__ void deformable_col2im_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; const Dtype cur_inv_h_data = h_in + i * dilation_h + offset_h; const Dtype cur_inv_w_data = w_in + j * dilation_w + offset_w; const Dtype cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1 ) { int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx; Dtype weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename Dtype> void deformable_col2im_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, Dtype* grad_im) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height * width; int channel_per_deformable_group = height / deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_col, data_offset, height, width, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, grad_im); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_gpu<float>(const float* data_col, const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, float* grad_im); template void deformable_col2im_gpu<double>(const double* data_col, const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, double* grad_im); template <typename Dtype> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* grad_offset) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = index / width_col / height_col; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const Dtype* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col; const Dtype* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = ((col_c * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col) % kernel_w; int i = (col_pos / width_col / height_col / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype inv_h = h_in + i * dilation_h + offset_h; Dtype inv_w = w_in + j * dilation_w + offset_w; if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -1; } const Dtype weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } template <typename Dtype> void deformable_col2im_coord_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, Dtype* grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * weight_col * 2 * kernel_h * kernel_h * deformable_group int channel_per_deformable_group = height / deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_col, data_offset, height, width, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, data_im); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_coord_gpu<float>(const float* data_col, const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, float* grad_offset); template void deformable_col2im_coord_gpu<double>(const double* data_col, const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const uint32_t deformable_group, double* grad_offset); }
bb8fcfa5252f01a6b03eea007b666b6c9d46a5a4.hip
// !!! This is a file automatically generated by hipify!!! #include "../common.h" #include <stdio.h> #include "../include/cuda_runtime.h" #include "../include/device_launch_parameters.h" #include <stdio.h> #define MEGABYTE (1024 * 1024) int main(int argc, char **argv) { // set up device int dev = 0; CHECK(hipSetDevice(dev)); if (argc != 2) { printf("usage: %s <size-in-mbs>\n", argv[0]); return 1; } // memory size int n_mbs = atoi(argv[1]); unsigned int nbytes = n_mbs * MEGABYTE; // get device information hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); if (!deviceProp.canMapHostMemory) { printf("Device %d does not support mapping CPU host memory!\n", dev); CHECK(hipDeviceReset()); exit(EXIT_SUCCESS); } printf("%s starting at ", argv[0]); printf("device %d: %s nbyte %5.2fMB canMap %d\n", dev, deviceProp.name, nbytes / (1024.0f * 1024.0f), deviceProp.canMapHostMemory); // allocate pinned host memory float *h_a; double start = seconds(); CHECK(hipHostMalloc ((float **)&h_a, nbytes)); double elapsed = seconds() - start; printf("Host memory allocation took %2.10f us\n", elapsed * 1000000.0); // allocate device memory start = seconds(); float *d_a; CHECK(hipMalloc((float **)&d_a, nbytes)); elapsed = seconds() - start; printf("Allocate device memory allocation took %2.10f us\n", elapsed * 1000000.0); // initialize host memory memset(h_a, 0, nbytes); for (int i = 0; i < nbytes / sizeof(float); i++) h_a[i] = 100.10f; // transfer data from the host to the device // transfer data from the host to the device start = seconds(); CHECK(hipMemcpy(d_a, h_a, nbytes, hipMemcpyHostToDevice)); elapsed = seconds() - start; printf("Transfer data from the host to the device took %2.10f us\n", elapsed * 1000000.0); // transfer data from the device to the host start = seconds(); CHECK(hipMemcpy(h_a, d_a, nbytes, hipMemcpyDeviceToHost)); elapsed = seconds() - start; printf("Transfer data from the device to the host took %2.10f us\n", elapsed * 1000000.0); // free memory start = seconds(); CHECK(hipFree(d_a)); elapsed = seconds() - start; printf("Free cuda memory took %2.10f us\n", elapsed * 1000000.0); // reset device CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
bb8fcfa5252f01a6b03eea007b666b6c9d46a5a4.cu
#include "../common.h" #include <stdio.h> #include "../include/cuda_runtime.h" #include "../include/device_launch_parameters.h" #include <stdio.h> #define MEGABYTE (1024 * 1024) int main(int argc, char **argv) { // set up device int dev = 0; CHECK(cudaSetDevice(dev)); if (argc != 2) { printf("usage: %s <size-in-mbs>\n", argv[0]); return 1; } // memory size int n_mbs = atoi(argv[1]); unsigned int nbytes = n_mbs * MEGABYTE; // get device information cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); if (!deviceProp.canMapHostMemory) { printf("Device %d does not support mapping CPU host memory!\n", dev); CHECK(cudaDeviceReset()); exit(EXIT_SUCCESS); } printf("%s starting at ", argv[0]); printf("device %d: %s nbyte %5.2fMB canMap %d\n", dev, deviceProp.name, nbytes / (1024.0f * 1024.0f), deviceProp.canMapHostMemory); // allocate pinned host memory float *h_a; double start = seconds(); CHECK(cudaMallocHost ((float **)&h_a, nbytes)); double elapsed = seconds() - start; printf("Host memory allocation took %2.10f us\n", elapsed * 1000000.0); // allocate device memory start = seconds(); float *d_a; CHECK(cudaMalloc((float **)&d_a, nbytes)); elapsed = seconds() - start; printf("Allocate device memory allocation took %2.10f us\n", elapsed * 1000000.0); // initialize host memory memset(h_a, 0, nbytes); for (int i = 0; i < nbytes / sizeof(float); i++) h_a[i] = 100.10f; // transfer data from the host to the device // transfer data from the host to the device start = seconds(); CHECK(cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice)); elapsed = seconds() - start; printf("Transfer data from the host to the device took %2.10f us\n", elapsed * 1000000.0); // transfer data from the device to the host start = seconds(); CHECK(cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost)); elapsed = seconds() - start; printf("Transfer data from the device to the host took %2.10f us\n", elapsed * 1000000.0); // free memory start = seconds(); CHECK(cudaFree(d_a)); elapsed = seconds() - start; printf("Free cuda memory took %2.10f us\n", elapsed * 1000000.0); // reset device CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
6ffe34deedde06df557d162169fbd60ad0ce3152.hip
// !!! This is a file automatically generated by hipify!!! /* This function takes a pointer to the file pointer so that it can update the position of the file pointer */ #include <hip/hip_vector_types.h> #include <driver_functions.h> #include <hip/hip_runtime.h> // CUDA utilities and system includes #include <hip/hip_vector_types.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <omp.h> #include <hip/hip_runtime.h> #include "AstroAccelerate/params.h" void allocate_memory_cpu_input(FILE **fp, size_t gpu_memory, int maxshift, int num_tchunks, int max_ndms, int total_ndms, int nsamp, int nchans, int nbits, int range, int *ndms, int **t_processed, unsigned short **input_buffer, float ****output_buffer, unsigned short **d_input, float **d_output, size_t *gpu_inputsize, size_t *gpu_outputsize, size_t *inputsize, size_t *outputsize) { *inputsize = nsamp * (size_t) nchans * sizeof(unsigned short); *input_buffer = (unsigned short *) malloc(*inputsize); } void allocate_memory_cpu_output(FILE **fp, size_t gpu_memory, int maxshift, int num_tchunks, int max_ndms, int total_ndms, int nsamp, int nchans, int nbits, int range, int *ndms, int **t_processed, unsigned short **input_buffer, float ****output_buffer, unsigned short **d_input, float **d_output, size_t *gpu_inputsize, size_t *gpu_outputsize, size_t *inputsize, size_t *outputsize) { *outputsize = 0; *output_buffer = (float ***) malloc(range * sizeof(float **)); for (int i = 0; i < range; i++) { int total_samps = 0; for (int k = 0; k < num_tchunks; k++) total_samps += t_processed[i][k]; //printf("\nTOTSAMPS:\t%d %d", total_samps, i); ( *output_buffer )[i] = (float **) malloc(ndms[i] * sizeof(float *)); //if((*output_buffer)[i]) printf("\n FAILED! Could not allocate %zu bytes", ndms[i]*sizeof(float *)); for (int j = 0; j < ndms[i]; j++) { ( *output_buffer )[i][j] = (float *) malloc(( total_samps ) * sizeof(float)); //if((*output_buffer)[i][j]) printf("\n FAILED! Could not allocate %zu bytes", ndms[i]*sizeof(float *)); // memset((*output_buffer)[i][j],0.0f,(total_samps)*sizeof(float)); } *outputsize += ( total_samps ) * ndms[i] * sizeof(float); printf("\noutput size: %llu", (unsigned long long) sizeof( *output_buffer ) / 1024 / 1024 / 1024); } } void allocate_memory_gpu(FILE **fp, size_t gpu_memory, int maxshift, int num_tchunks, int max_ndms, int total_ndms, int nsamp, int nchans, int nbits, int range, int *ndms, int **t_processed, unsigned short **input_buffer, float ****output_buffer, unsigned short **d_input, float **d_output, size_t *gpu_inputsize, size_t *gpu_outputsize, size_t *inputsize, size_t *outputsize) { int time_samps = t_processed[0][0] + maxshift; *gpu_inputsize = time_samps * (size_t) nchans * sizeof(unsigned short); ( hipMalloc((void **) d_input, *gpu_inputsize) ); if (nchans < max_ndms) { *gpu_outputsize = time_samps * max_ndms * sizeof(float); } else { *gpu_outputsize = time_samps * nchans * sizeof(float); } ( hipMalloc((void **) d_output, *gpu_outputsize) ); //end_t=omp_get_wtime(); //time = (float)(end_t-start_t); //printf("\nGPU Malloc in: %f ", time); ( hipMemset(*d_output, 0, *gpu_outputsize) ); }
6ffe34deedde06df557d162169fbd60ad0ce3152.cu
/* This function takes a pointer to the file pointer so that it can update the position of the file pointer */ #include <vector_types.h> #include <driver_functions.h> #include <cuda_runtime.h> // CUDA utilities and system includes #include <vector_types.h> #include <stdio.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <omp.h> #include <cuda.h> #include "AstroAccelerate/params.h" void allocate_memory_cpu_input(FILE **fp, size_t gpu_memory, int maxshift, int num_tchunks, int max_ndms, int total_ndms, int nsamp, int nchans, int nbits, int range, int *ndms, int **t_processed, unsigned short **input_buffer, float ****output_buffer, unsigned short **d_input, float **d_output, size_t *gpu_inputsize, size_t *gpu_outputsize, size_t *inputsize, size_t *outputsize) { *inputsize = nsamp * (size_t) nchans * sizeof(unsigned short); *input_buffer = (unsigned short *) malloc(*inputsize); } void allocate_memory_cpu_output(FILE **fp, size_t gpu_memory, int maxshift, int num_tchunks, int max_ndms, int total_ndms, int nsamp, int nchans, int nbits, int range, int *ndms, int **t_processed, unsigned short **input_buffer, float ****output_buffer, unsigned short **d_input, float **d_output, size_t *gpu_inputsize, size_t *gpu_outputsize, size_t *inputsize, size_t *outputsize) { *outputsize = 0; *output_buffer = (float ***) malloc(range * sizeof(float **)); for (int i = 0; i < range; i++) { int total_samps = 0; for (int k = 0; k < num_tchunks; k++) total_samps += t_processed[i][k]; //printf("\nTOTSAMPS:\t%d %d", total_samps, i); ( *output_buffer )[i] = (float **) malloc(ndms[i] * sizeof(float *)); //if((*output_buffer)[i]) printf("\n FAILED! Could not allocate %zu bytes", ndms[i]*sizeof(float *)); for (int j = 0; j < ndms[i]; j++) { ( *output_buffer )[i][j] = (float *) malloc(( total_samps ) * sizeof(float)); //if((*output_buffer)[i][j]) printf("\n FAILED! Could not allocate %zu bytes", ndms[i]*sizeof(float *)); // memset((*output_buffer)[i][j],0.0f,(total_samps)*sizeof(float)); } *outputsize += ( total_samps ) * ndms[i] * sizeof(float); printf("\noutput size: %llu", (unsigned long long) sizeof( *output_buffer ) / 1024 / 1024 / 1024); } } void allocate_memory_gpu(FILE **fp, size_t gpu_memory, int maxshift, int num_tchunks, int max_ndms, int total_ndms, int nsamp, int nchans, int nbits, int range, int *ndms, int **t_processed, unsigned short **input_buffer, float ****output_buffer, unsigned short **d_input, float **d_output, size_t *gpu_inputsize, size_t *gpu_outputsize, size_t *inputsize, size_t *outputsize) { int time_samps = t_processed[0][0] + maxshift; *gpu_inputsize = time_samps * (size_t) nchans * sizeof(unsigned short); ( cudaMalloc((void **) d_input, *gpu_inputsize) ); if (nchans < max_ndms) { *gpu_outputsize = time_samps * max_ndms * sizeof(float); } else { *gpu_outputsize = time_samps * nchans * sizeof(float); } ( cudaMalloc((void **) d_output, *gpu_outputsize) ); //end_t=omp_get_wtime(); //time = (float)(end_t-start_t); //printf("\nGPU Malloc in: %f ", time); ( cudaMemset(*d_output, 0, *gpu_outputsize) ); }
3a36b680c69709af98dfd4215c1264a21e00069d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //nvcc fractalAnim2.0.cu -o temp -lglut -lGL -lm -run #include <GL/glut.h> #include <stdlib.h> #include <iostream> #include <stdio.h> #include <math.h> #include <time.h> #include <signal.h> using namespace std; float *pixelsJulia_CPU, *pixelsMandel_CPU, *pixels_CPU; float *pixelsJulia_GPU, *pixelsMandel_GPU, *pixels_GPU; //dim3 dimBlock; /*float A = -0.624; float B = 0.4351; // */ float A = 0; float B = 0.75; // */ float t = 0; float tmod = 100.0; float titer = 1.0; float moveiter = 1.0; int N = 100; int mandelID; int juliaID; unsigned int window_height = 960; unsigned int window_width = 2*window_height; float xMin = -2.0; float xMax = 2.0; float yMin = -2.0; float yMax = 2.0; float stepSizeX = (xMax - xMin)/((float)window_width); float stepSizeY = (yMax - yMin)/((float)window_height); void AllocateMemory(){ hipMalloc((void**)&pixelsJulia_GPU, window_width/2*window_height*3*sizeof(float)); pixelsJulia_CPU = (float *)malloc(window_width/2*window_height*3*sizeof(float)); hipMalloc((void**)&pixelsMandel_GPU, window_width/2*window_height*3*sizeof(float)); pixelsMandel_CPU = (float *)malloc(window_width/2*window_height*3*sizeof(float)); hipMalloc((void**)&pixels_GPU, window_width*window_height*3*sizeof(float)); pixels_CPU = (float *)malloc(window_width*window_height*3*sizeof(float)); } // */ //Saves the appropriate memory chunks for later use. //References the globally defined variables. float color (float x, float y) //hopefully centered on (0,0)? { float mag,maxMag,t1; float maxCount = 200; float count = 0; maxMag = 10; mag = 0.0; while (mag < maxMag && count < maxCount) { t1 = x; x = x*x - y*y + A; y = (2.0 * t1 * y) + B; mag = sqrt(x*x + y*y); count++; } if(count < maxCount) { return(1.0); } else { return(0.0); }// */ } __global__ void cudaWeave(float *pixelsMandel_GPU, float *pixelsJulia_GPU, float *pixels_GPU){ //red pixels_GPU[(2*blockIdx.x*blockDim.x + threadIdx.x)*3] = pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3]; //First 600 on each row should be from the Julia set. pixels_GPU[((2*blockIdx.x+1)*blockDim.x + threadIdx.x)*3] = pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3]; //601-1200 on each row should be from the Mandelbrot set. //green pixels_GPU[(2*blockIdx.x*blockDim.x + threadIdx.x)*3+1] = pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3+1]; //"" pixels_GPU[((2*blockIdx.x+1)*blockDim.x + threadIdx.x)*3+1] = pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3+1]; //"" //blue pixels_GPU[(2*blockIdx.x*blockDim.x + threadIdx.x)*3+2] = pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3+2]; //"" pixels_GPU[((2*blockIdx.x+1)*blockDim.x + threadIdx.x)*3+2] = pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3+2]; //"" }// */ __global__ void cudaColorJulia(float *pixelsJulia_GPU, float X, float iY){ float x = (((float)threadIdx.x)/(blockDim.x))*4-2; float y = (((float)blockIdx.x)/(gridDim.x))*4-2; float mag,maxMag, t1; int maxCount = 200; int count = 0; maxMag = 10; mag = 0.0; while (mag < maxMag && count < maxCount) { t1 = x; x = x*x - y*y + X; y = (2.0 * t1 * y) + iY; mag = sqrt(x*x + y*y); count++; } if(count < maxCount) { pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.5*log((double)count)/log((double)maxCount); pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 1.0*log((double)count)/log((double)maxCount); pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.4; } else { pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.0; pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 0.0; pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.0; }// */ } __global__ void cudaColorMandelbrot(float *pixelsMandel_GPU, float xseed, float yseed){ float x = 0; float y = 0; float X = (((float)threadIdx.x)/(blockDim.x))*4-2.5; float iY = (((float)blockIdx.x)/(gridDim.x))*4-2; float mag,maxMag, t1; int maxCount = 200; int count = 0; maxMag = 10; mag = 0.0; if ((abs(xseed - (float)threadIdx.x/blockDim.x*4+2.5) <= 2.0/blockDim.x) && (abs(yseed - (float)blockIdx.x/gridDim.x*4+2)) <=2.0/gridDim.x){ //If this pixel corresponds to the seed for the Julia set being generated, pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 1.0; pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 0.0; pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.0; //... make this pixel red. } else{ //Otherwise, find the color the way we normally would with the Mandelbrot set. while (mag < maxMag && count < maxCount){ //As long as the complex number doesn't get farther than a certain distance, // and as long as we haven't iterated this enough times, t1 = x; x = x*x - y*y + X; y = (2.0 * t1 * y) + iY; mag = sqrt(x*x + y*y); count++; //... find the next point in the sequence and update to it. } if(count < maxCount){ //If we broke the above loop before iterating as many times as we want, // then the sequence diverges, pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.5*log((double)count)/log((double)maxCount); pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 1.0*log((double)count)/log((double)maxCount); pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.4; //... and we color it prettily according to how quickly it diverged. } else //Otherwise, the point is in the mandelbrot set (or close enough to it), { pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.0; pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 0.0; pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.0; //... and we color it black. } } }// */ void update(int value){ //t = t + titer; /*A = -pow(sin(t/tmod),2); B = sin(2*t/tmod)/2;// */ /*A = -pow(sin((2*t/5)/50.0),2); B = sin((2*t/3)/50.0)/2;// */ /*A = -pow(sin((2.0*t/5)/tmod),2); B = sin((sqrt(5)+1)/2*(t)/tmod)/2;// */ glutPostRedisplay(); glutTimerFunc(16,update, 0); } /*static void signalHandler(int signum) { int command; bool exitMenu = 0; //cout << "I handled it :)" << endl; while (exitMenu == 0) { cout << "Enter 0 to exit the program." << endl; cout << "Enter 1 to continue." << endl; cin >> command; if(command == 0) { exitMenu = 1; } else if (command == 1){ exitMenu = 1; cout << "resuming..." << endl; } else { cout << "Invalid Command!" << endl; } cout << endl; } }// */ /*void processSpecialKeys(int key, int x, int y) { switch(key) { case GLUT_KEY_RIGHT : t = t + titer*100; break; case GLUT_KEY_LEFT : t = t - titer*100; break; case GLUT_KEY_UP : titer = titer*1.1; break; case GLUT_KEY_DOWN : titer = titer/1.1; break; } }// */ void processSpecialKeys(int key, int x, int y) { switch(key) { case GLUT_KEY_RIGHT : A = A + moveiter/window_width; break; case GLUT_KEY_LEFT : A = A - moveiter/window_width; break; case GLUT_KEY_UP : B = B + moveiter/window_height; break; case GLUT_KEY_DOWN : B = B - moveiter/window_height; break; } }// */ void processNormalKeys(unsigned char key, int x, int y) { if(key == 43){ //Plus sign key, '+' moveiter = moveiter * 1.2; }else if(key == 45){ //Minus sign key, '-' moveiter = moveiter/1.2; } }// */ void mouseClicks(int button, int state, int x, int y) { /*switch(button) { case GLUT_LEFT_BUTTON : A = ((float)x-window_width/2)/window_width*2.0-2.5; B = -(float)y/window_height*2.0-2.0; break; case GLUT_RIGHT_BUTTON : break; }*/ switch(button) { case GLUT_LEFT_BUTTON : A = ((float)x)/window_width*8.0-6.5; B = -(float)y/window_height*4.0+2.0; break; case GLUT_RIGHT_BUTTON : break; } }// */ /*void displayJulia(void) { glutSetWindow(juliaID); cudaColorJulia<<<window_width, window_height>>>(pixelsJulia_GPU, A, B); hipMemcpy(pixelsJulia_CPU, pixelsJulia_GPU, window_width*window_height*3*sizeof(float), hipMemcpyDeviceToHost); glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixelsJulia_CPU); glFlush(); } void displayMandelbrot(void) { glutSetWindow(mandelID); cudaColorMandelbrot<<<window_width, window_height>>>(pixelsMandel_GPU, A, B); hipMemcpy(pixelsMandel_CPU, pixelsMandel_GPU, window_width*window_height*3*sizeof(float), hipMemcpyDeviceToHost); glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixelsMandel_CPU); glFlush(); }// */ void weavePixels(){ hipMemcpy(pixelsJulia_GPU, pixelsJulia_CPU, window_width/2*window_height*3*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(pixelsMandel_GPU, pixelsMandel_CPU, window_width/2*window_height*3*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cudaWeave), dim3(window_width/2),dim3(window_height), 0, 0, pixelsMandel_GPU, pixelsJulia_GPU, pixels_GPU); hipMemcpy(pixels_CPU, pixels_GPU, window_width*window_height*3*sizeof(float), hipMemcpyDeviceToHost); }// */ void display(void){ hipLaunchKernelGGL(( cudaColorJulia), dim3(window_width/2), dim3(window_height), 0, 0, pixelsJulia_GPU, A, B); hipMemcpy(pixelsJulia_CPU, pixelsJulia_GPU, window_width/2*window_height*3*sizeof(float), hipMemcpyDeviceToHost); hipLaunchKernelGGL(( cudaColorMandelbrot), dim3(window_width/2), dim3(window_height), 0, 0, pixelsMandel_GPU, A, B); hipMemcpy(pixelsMandel_CPU, pixelsMandel_GPU, window_width/2*window_height*3*sizeof(float), hipMemcpyDeviceToHost); weavePixels(); /* //glRasterPos2i(0,0); glDrawPixels(window_width/2, window_height, GL_RGB, GL_FLOAT, pixelsJulia_CPU); //glRasterPos2i(window_width/2,0); glDrawPixels(window_width/2, window_height, GL_RGB, GL_FLOAT, pixelsMandel_CPU); // */ glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixels_CPU); glFlush(); } void CleanUp(float *A_CPU, float *B_CPU, float *C_CPU, float *A_GPU, float *B_GPU, float *C_GPU){ free(A_CPU); free(B_CPU); free(C_CPU); hipFree(A_GPU); hipFree(B_GPU); hipFree(C_GPU); } // */ int main(int argc, char *argv[]) { if(argc == 2){ char *ptr; N = strtol(argv[1], &ptr, 10); } else if(argc > 2){ printf("One or zero arguments expected."); return(1); } AllocateMemory(); glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE); glutInitWindowSize(window_width, window_height); /*glutCreateWindow("Subwindow"); glutDisplayFunc(displayMandelbrot); mandelID = glutGetWindow(); glutMouseFunc(mouseClicks); glutSpecialFunc(processSpecialKeys); glutKeyboardFunc(processNormalKeys);// */ glutCreateWindow("Fractals man, fractals."); glutDisplayFunc(display); //juliaID = glutGetWindow(); glutMouseFunc(mouseClicks); //glutSpecialFunc(processSpecialKeys); glutKeyboardFunc(processNormalKeys); glutSpecialFunc(processSpecialKeys); glutTimerFunc(16, update, 0); glutMainLoop(); }
3a36b680c69709af98dfd4215c1264a21e00069d.cu
//nvcc fractalAnim2.0.cu -o temp -lglut -lGL -lm -run #include <GL/glut.h> #include <stdlib.h> #include <iostream> #include <stdio.h> #include <math.h> #include <time.h> #include <signal.h> using namespace std; float *pixelsJulia_CPU, *pixelsMandel_CPU, *pixels_CPU; float *pixelsJulia_GPU, *pixelsMandel_GPU, *pixels_GPU; //dim3 dimBlock; /*float A = -0.624; float B = 0.4351; // */ float A = 0; float B = 0.75; // */ float t = 0; float tmod = 100.0; float titer = 1.0; float moveiter = 1.0; int N = 100; int mandelID; int juliaID; unsigned int window_height = 960; unsigned int window_width = 2*window_height; float xMin = -2.0; float xMax = 2.0; float yMin = -2.0; float yMax = 2.0; float stepSizeX = (xMax - xMin)/((float)window_width); float stepSizeY = (yMax - yMin)/((float)window_height); void AllocateMemory(){ cudaMalloc((void**)&pixelsJulia_GPU, window_width/2*window_height*3*sizeof(float)); pixelsJulia_CPU = (float *)malloc(window_width/2*window_height*3*sizeof(float)); cudaMalloc((void**)&pixelsMandel_GPU, window_width/2*window_height*3*sizeof(float)); pixelsMandel_CPU = (float *)malloc(window_width/2*window_height*3*sizeof(float)); cudaMalloc((void**)&pixels_GPU, window_width*window_height*3*sizeof(float)); pixels_CPU = (float *)malloc(window_width*window_height*3*sizeof(float)); } // */ //Saves the appropriate memory chunks for later use. //References the globally defined variables. float color (float x, float y) //hopefully centered on (0,0)? { float mag,maxMag,t1; float maxCount = 200; float count = 0; maxMag = 10; mag = 0.0; while (mag < maxMag && count < maxCount) { t1 = x; x = x*x - y*y + A; y = (2.0 * t1 * y) + B; mag = sqrt(x*x + y*y); count++; } if(count < maxCount) { return(1.0); } else { return(0.0); }// */ } __global__ void cudaWeave(float *pixelsMandel_GPU, float *pixelsJulia_GPU, float *pixels_GPU){ //red pixels_GPU[(2*blockIdx.x*blockDim.x + threadIdx.x)*3] = pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3]; //First 600 on each row should be from the Julia set. pixels_GPU[((2*blockIdx.x+1)*blockDim.x + threadIdx.x)*3] = pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3]; //601-1200 on each row should be from the Mandelbrot set. //green pixels_GPU[(2*blockIdx.x*blockDim.x + threadIdx.x)*3+1] = pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3+1]; //"" pixels_GPU[((2*blockIdx.x+1)*blockDim.x + threadIdx.x)*3+1] = pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3+1]; //"" //blue pixels_GPU[(2*blockIdx.x*blockDim.x + threadIdx.x)*3+2] = pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3+2]; //"" pixels_GPU[((2*blockIdx.x+1)*blockDim.x + threadIdx.x)*3+2] = pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3+2]; //"" }// */ __global__ void cudaColorJulia(float *pixelsJulia_GPU, float X, float iY){ float x = (((float)threadIdx.x)/(blockDim.x))*4-2; float y = (((float)blockIdx.x)/(gridDim.x))*4-2; float mag,maxMag, t1; int maxCount = 200; int count = 0; maxMag = 10; mag = 0.0; while (mag < maxMag && count < maxCount) { t1 = x; x = x*x - y*y + X; y = (2.0 * t1 * y) + iY; mag = sqrt(x*x + y*y); count++; } if(count < maxCount) { pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.5*log((double)count)/log((double)maxCount); pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 1.0*log((double)count)/log((double)maxCount); pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.4; } else { pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.0; pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 0.0; pixelsJulia_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.0; }// */ } __global__ void cudaColorMandelbrot(float *pixelsMandel_GPU, float xseed, float yseed){ float x = 0; float y = 0; float X = (((float)threadIdx.x)/(blockDim.x))*4-2.5; float iY = (((float)blockIdx.x)/(gridDim.x))*4-2; float mag,maxMag, t1; int maxCount = 200; int count = 0; maxMag = 10; mag = 0.0; if ((abs(xseed - (float)threadIdx.x/blockDim.x*4+2.5) <= 2.0/blockDim.x) && (abs(yseed - (float)blockIdx.x/gridDim.x*4+2)) <=2.0/gridDim.x){ //If this pixel corresponds to the seed for the Julia set being generated, pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 1.0; pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 0.0; pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.0; //... make this pixel red. } else{ //Otherwise, find the color the way we normally would with the Mandelbrot set. while (mag < maxMag && count < maxCount){ //As long as the complex number doesn't get farther than a certain distance, // and as long as we haven't iterated this enough times, t1 = x; x = x*x - y*y + X; y = (2.0 * t1 * y) + iY; mag = sqrt(x*x + y*y); count++; //... find the next point in the sequence and update to it. } if(count < maxCount){ //If we broke the above loop before iterating as many times as we want, // then the sequence diverges, pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.5*log((double)count)/log((double)maxCount); pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 1.0*log((double)count)/log((double)maxCount); pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.4; //... and we color it prettily according to how quickly it diverged. } else //Otherwise, the point is in the mandelbrot set (or close enough to it), { pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.0; pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 0.0; pixelsMandel_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.0; //... and we color it black. } } }// */ void update(int value){ //t = t + titer; /*A = -pow(sin(t/tmod),2); B = sin(2*t/tmod)/2;// */ /*A = -pow(sin((2*t/5)/50.0),2); B = sin((2*t/3)/50.0)/2;// */ /*A = -pow(sin((2.0*t/5)/tmod),2); B = sin((sqrt(5)+1)/2*(t)/tmod)/2;// */ glutPostRedisplay(); glutTimerFunc(16,update, 0); } /*static void signalHandler(int signum) { int command; bool exitMenu = 0; //cout << "I handled it :)" << endl; while (exitMenu == 0) { cout << "Enter 0 to exit the program." << endl; cout << "Enter 1 to continue." << endl; cin >> command; if(command == 0) { exitMenu = 1; } else if (command == 1){ exitMenu = 1; cout << "resuming..." << endl; } else { cout << "Invalid Command!" << endl; } cout << endl; } }// */ /*void processSpecialKeys(int key, int x, int y) { switch(key) { case GLUT_KEY_RIGHT : t = t + titer*100; break; case GLUT_KEY_LEFT : t = t - titer*100; break; case GLUT_KEY_UP : titer = titer*1.1; break; case GLUT_KEY_DOWN : titer = titer/1.1; break; } }// */ void processSpecialKeys(int key, int x, int y) { switch(key) { case GLUT_KEY_RIGHT : A = A + moveiter/window_width; break; case GLUT_KEY_LEFT : A = A - moveiter/window_width; break; case GLUT_KEY_UP : B = B + moveiter/window_height; break; case GLUT_KEY_DOWN : B = B - moveiter/window_height; break; } }// */ void processNormalKeys(unsigned char key, int x, int y) { if(key == 43){ //Plus sign key, '+' moveiter = moveiter * 1.2; }else if(key == 45){ //Minus sign key, '-' moveiter = moveiter/1.2; } }// */ void mouseClicks(int button, int state, int x, int y) { /*switch(button) { case GLUT_LEFT_BUTTON : A = ((float)x-window_width/2)/window_width*2.0-2.5; B = -(float)y/window_height*2.0-2.0; break; case GLUT_RIGHT_BUTTON : break; }*/ switch(button) { case GLUT_LEFT_BUTTON : A = ((float)x)/window_width*8.0-6.5; B = -(float)y/window_height*4.0+2.0; break; case GLUT_RIGHT_BUTTON : break; } }// */ /*void displayJulia(void) { glutSetWindow(juliaID); cudaColorJulia<<<window_width, window_height>>>(pixelsJulia_GPU, A, B); cudaMemcpy(pixelsJulia_CPU, pixelsJulia_GPU, window_width*window_height*3*sizeof(float), cudaMemcpyDeviceToHost); glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixelsJulia_CPU); glFlush(); } void displayMandelbrot(void) { glutSetWindow(mandelID); cudaColorMandelbrot<<<window_width, window_height>>>(pixelsMandel_GPU, A, B); cudaMemcpy(pixelsMandel_CPU, pixelsMandel_GPU, window_width*window_height*3*sizeof(float), cudaMemcpyDeviceToHost); glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixelsMandel_CPU); glFlush(); }// */ void weavePixels(){ cudaMemcpy(pixelsJulia_GPU, pixelsJulia_CPU, window_width/2*window_height*3*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(pixelsMandel_GPU, pixelsMandel_CPU, window_width/2*window_height*3*sizeof(float), cudaMemcpyHostToDevice); cudaWeave<<<window_width/2,window_height>>>(pixelsMandel_GPU, pixelsJulia_GPU, pixels_GPU); cudaMemcpy(pixels_CPU, pixels_GPU, window_width*window_height*3*sizeof(float), cudaMemcpyDeviceToHost); }// */ void display(void){ cudaColorJulia<<<window_width/2, window_height>>>(pixelsJulia_GPU, A, B); cudaMemcpy(pixelsJulia_CPU, pixelsJulia_GPU, window_width/2*window_height*3*sizeof(float), cudaMemcpyDeviceToHost); cudaColorMandelbrot<<<window_width/2, window_height>>>(pixelsMandel_GPU, A, B); cudaMemcpy(pixelsMandel_CPU, pixelsMandel_GPU, window_width/2*window_height*3*sizeof(float), cudaMemcpyDeviceToHost); weavePixels(); /* //glRasterPos2i(0,0); glDrawPixels(window_width/2, window_height, GL_RGB, GL_FLOAT, pixelsJulia_CPU); //glRasterPos2i(window_width/2,0); glDrawPixels(window_width/2, window_height, GL_RGB, GL_FLOAT, pixelsMandel_CPU); // */ glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixels_CPU); glFlush(); } void CleanUp(float *A_CPU, float *B_CPU, float *C_CPU, float *A_GPU, float *B_GPU, float *C_GPU){ free(A_CPU); free(B_CPU); free(C_CPU); cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU); } // */ int main(int argc, char *argv[]) { if(argc == 2){ char *ptr; N = strtol(argv[1], &ptr, 10); } else if(argc > 2){ printf("One or zero arguments expected."); return(1); } AllocateMemory(); glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE); glutInitWindowSize(window_width, window_height); /*glutCreateWindow("Subwindow"); glutDisplayFunc(displayMandelbrot); mandelID = glutGetWindow(); glutMouseFunc(mouseClicks); glutSpecialFunc(processSpecialKeys); glutKeyboardFunc(processNormalKeys);// */ glutCreateWindow("Fractals man, fractals."); glutDisplayFunc(display); //juliaID = glutGetWindow(); glutMouseFunc(mouseClicks); //glutSpecialFunc(processSpecialKeys); glutKeyboardFunc(processNormalKeys); glutSpecialFunc(processSpecialKeys); glutTimerFunc(16, update, 0); glutMainLoop(); }
8b5fb87e73ec03ee99e1d791b1d46cf1d04c15b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef BUILD_CUDA #include <cmath> #include <cstdio> #include "cudaFluid.cuh" #include "../kernel.h" __global__ void simulate_update_position_predict_position( int n, Vector3R *particle_positions, Vector3R *particle_preditced_position, Vector3R *particle_velocities, const Vector3R external_accelerations, REAL delta_t ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { auto &positions_i = particle_positions[i]; auto &velocities_i = particle_velocities[i]; auto &preditced_positions_i = particle_preditced_position[i]; // if (i == 0) { // printf("Thread 0 position: (%lf, %lf, %lf), acc: (%lf, %lf, %lf), delta_t: %lf\n", // positions_i.x, positions_i.y, positions_i.z, // external_accelerations.x, external_accelerations.y, external_accelerations.z, // delta_t // ); // } velocities_i += external_accelerations * delta_t; preditced_positions_i = positions_i + velocities_i * delta_t; } } __global__ void calculate_lambda( int n, Vector3R *particle_positions, int *neighbor_search_results, int *neighbor_search_results_size_prefix_sum, REAL particle_mass, REAL density, REAL epsilon, REAL h, REAL *lambda ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { const auto &p_i = particle_positions[i]; // line 10: calculate lambda const int neighbors_size_last = i == 0 ? 0 : neighbor_search_results_size_prefix_sum[i-1]; const int neighbors_size_i = neighbor_search_results_size_prefix_sum[i]-neighbors_size_last; const int *neighbors_i = &neighbor_search_results[neighbors_size_last]; // Eq 2 REAL rho_i = 0; for (int jj = 0; jj < neighbors_size_i; jj++) { int j = neighbors_i[jj]; const auto &p_j = particle_positions[j]; rho_i += W_poly6(p_i-p_j, h); } // add itself rho_i += W_poly6(p_i-p_i, h); rho_i *= particle_mass; // Eq 1 const REAL C_i = rho_i / density - 1.; REAL C_i_p_k_2_sum = 0; // Eq 8 // if k = j REAL C_i_p_k_j_2 = 0; // if k = i Vector3R C_i_p_k_i; for (int jj = 0; jj < neighbors_size_i; jj++) { int j = neighbors_i[jj]; const auto &p_j = particle_positions[j]; const auto W_spiky_gradient_i_j = W_spiky_gradient(p_i-p_j, h) * (p_i-p_j); C_i_p_k_i += W_spiky_gradient_i_j; C_i_p_k_j_2 += W_spiky_gradient_i_j.norm2(); } C_i_p_k_2_sum += C_i_p_k_i.norm2(); C_i_p_k_2_sum /= pow(density, 2); lambda[i] = - C_i / (C_i_p_k_2_sum+epsilon); } } __global__ void calculate_delta_pi_and_collision_response( int num_particles, Vector3R *particle_positions, int *neighbor_search_results, int *neighbor_search_results_size_prefix_sum, Vector3R *delta_p, REAL n, REAL k, REAL h, REAL density, REAL *lambda, int nObjs, Plane_cuda *collision_objects ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_particles; i += blockDim.x * gridDim.x) { const auto &p_i = particle_positions[i]; // line 13: calculate delta p_i const int neighbors_size_last = i == 0 ? 0 : neighbor_search_results_size_prefix_sum[i-1]; const int neighbors_size_i = neighbor_search_results_size_prefix_sum[i]-neighbors_size_last; const int *neighbors_i = &neighbor_search_results[neighbors_size_last]; delta_p[i] = 0; const auto lambda_i = lambda[i]; // Eq 12 for (int jj = 0; jj < neighbors_size_i; jj++) { int j = neighbors_i[jj]; const auto &p_j = particle_positions[j]; // Eq 13 double s_corr = -k*pow(W_poly6(p_i-p_j, h)/W_poly6(0.3*h, h), n); delta_p[i] += (lambda_i+lambda[j]+s_corr) * W_spiky_gradient(p_i-p_j, h) * (p_i-p_j); } delta_p[i] /= density; // line 14: collision detection and response // TODO: apply them for (int j = 0; j < nObjs; j++) { collision_objects[j].collide(particle_positions[i],delta_p[i]); } } } __global__ void update_predicted_positions( int num_particles, Vector3R *particle_predicted_positions, Vector3R *delta_p ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_particles; i += blockDim.x * gridDim.x) { particle_predicted_positions[i] += delta_p[i]; } } __global__ void update_velocities( int num_particles, Vector3R *particle_positions, Vector3R *predicted_positions, Vector3R *particle_velocities, REAL delta_t ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_particles; i += blockDim.x * gridDim.x) { particle_velocities[i] = (predicted_positions[i] - particle_positions[i]) / delta_t; } } __global__ void apply_XSPH_viscosity( int num_particles, Vector3R *particle_positions, Vector3R *particle_velocities, int *neighbor_search_results, int *neighbor_search_results_size_prefix_sum, REAL particle_mass, REAL h, REAL c, REAL delta_t ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_particles; i += blockDim.x * gridDim.x) { const auto &p_i = particle_positions[i]; const int neighbors_size_last = i == 0 ? 0 : neighbor_search_results_size_prefix_sum[i-1]; const int neighbors_size_i = neighbor_search_results_size_prefix_sum[i]-neighbors_size_last; const int *neighbors_i = &neighbor_search_results[neighbors_size_last]; // line 22: vorticity confinement and XSPH viscosity Vector3R f_vorticity; Vector3R omega_i; // Eq 17: Vector3R V_xsph; for (int jj = 0; jj < neighbors_size_i; jj++) { int j = neighbors_i[jj]; const auto &p_j = particle_positions[j]; const auto &p_ij = p_i-p_j; const auto &v_ij = particle_velocities[j] - particle_velocities[i]; // the smallest |p_ij| with h=0.1 gives >100 so c has to correct it to ~1 V_xsph += v_ij * W_poly6(p_ij, h); omega_i += cross(v_ij, W_spiky_gradient(p_ij, h)*p_ij); } // TODO: vorticity // const auto &eta = ; // const auto &N = eta.unit(); // f_vorticity = epsilon*cross(N, omega_i); V_xsph *= c; particle_velocities[i] += V_xsph + f_vorticity / particle_mass * delta_t; } } void copy_predicted_positions_to_position( REAL3 *particle_position, REAL3 *particle_preditced_position, size_t N ) { hipMemcpy(particle_position, particle_preditced_position, N, hipMemcpyDeviceToDevice); } Fluid_cuda::Fluid_cuda( unique_ptr<vector<REAL3>> &&particle_positions, unique_ptr<vector<REAL3>> &&particle_velocities, REAL h ): nsearch(h) { if (particle_positions == nullptr) { throw std::runtime_error("particle_positions == nullptr!"); } this->particle_positions = std::move(particle_positions); this->particle_velocities = std::move(particle_velocities); } Fluid_cuda::~Fluid_cuda(){ hipFree(particle_positions_device); hipFree(particle_velocities_device); hipFree(particle_predicted_positions_device); hipFree(delta_p_device); hipFree(lambda_device); hipFree(neighbor_search_results_dev); hipFree(neighbor_search_results_size_prefix_sum_dev); } void Fluid_cuda::init() { const auto num_particles = particle_positions->size(); const auto SIZE_REAL3_N = sizeof(REAL3) * num_particles; hipMalloc(&particle_positions_device, SIZE_REAL3_N); hipMemcpy(particle_positions_device, particle_positions->data(), SIZE_REAL3_N, hipMemcpyHostToDevice); hipMalloc(&particle_velocities_device, SIZE_REAL3_N); if (!particle_velocities) { hipMemset(particle_velocities_device, 0, SIZE_REAL3_N); } else { hipMemcpy(particle_velocities_device, particle_velocities->data(), SIZE_REAL3_N, hipMemcpyHostToDevice); } hipMalloc(&particle_predicted_positions_device, SIZE_REAL3_N); hipMalloc(&delta_p_device, SIZE_REAL3_N); hipMalloc(&lambda_device, sizeof(REAL)*num_particles); neighbor_search_results_dev_capacity = num_particles * default_capacity; hipMalloc(&neighbor_search_results_dev, sizeof(int) * neighbor_search_results_dev_capacity); hipMalloc(&neighbor_search_results_size_prefix_sum_dev, sizeof(int) * num_particles); neighbor_search_results_host.resize(neighbor_search_results_dev_capacity); neighbor_search_results_size_prefix_sum_host.resize(num_particles); hipDeviceSynchronize(); nsearch.add_point_set( this->particle_positions->front().data(), this->particle_positions->size(), true, true ); nsearch.find_neighbors(); } void Fluid_cuda::find_neighbors(){ int num_particles = particle_positions->size(); nsearch.find_neighbors(); // serial calculate prefix_sum for (int i = 0; i < num_particles; i++) { auto &pointSet = nsearch.point_set(0); auto count = pointSet.n_neighbors(0, i); const auto last_sum = i == 0 ? 0 : neighbor_search_results_size_prefix_sum_host[i-1]; neighbor_search_results_size_prefix_sum_host[i] = last_sum + count; // range for result_i: [sum_{i-1}, sum_{i}) } // ensure capacity const auto minCapacity = neighbor_search_results_size_prefix_sum_host.back(); if (minCapacity > neighbor_search_results_dev_capacity) { neighbor_search_results_dev_capacity = minCapacity * 1.2; neighbor_search_results_host.resize(neighbor_search_results_dev_capacity); hipFree(neighbor_search_results_dev); hipMalloc(&neighbor_search_results_dev, sizeof(int) * neighbor_search_results_dev_capacity); } #pragma omp parallel for for (int i = 0; i < num_particles; i++) { // line 6: find neighboring particles auto &pointSet = nsearch.point_set(0); auto count = pointSet.n_neighbors(0, i); const int start = i == 0 ? 0 : neighbor_search_results_size_prefix_sum_host[i-1]; for (int j = 0; j < count; j++) { neighbor_search_results_host[start + j] = pointSet.neighbor(0, i, j); } } // copy neighbor results to device hipMemcpy( neighbor_search_results_dev, neighbor_search_results_host.data(), sizeof(int) * neighbor_search_results_dev_capacity, hipMemcpyHostToDevice ); // update size prefix sum to device hipMemcpy( neighbor_search_results_size_prefix_sum_dev, neighbor_search_results_size_prefix_sum_host.data(), sizeof(int) * num_particles, hipMemcpyHostToDevice ); hipDeviceSynchronize(); } void Fluid_cuda::simulate(REAL delta_t, const FluidParameters *fp, thrust::device_vector<Plane_cuda> &collision_objects) { int num_particles = particle_positions->size(); const auto particle_positions_dev = REAL3AsVector3R(particle_positions_device); const auto particle_predicted_positions = REAL3AsVector3R(particle_predicted_positions_device); const auto particle_velocities = REAL3AsVector3R(particle_velocities_device); const auto delta_p = REAL3AsVector3R(delta_p_device); const auto density = fp->density; const auto particle_mass = fp->particle_mass; const auto damping = fp->damping; const auto solverIterations = fp->solverIterations; const auto h = fp->h; const auto epsilon = fp->epsilon; const auto n = fp->n; const auto k = fp->k; const auto c = fp->c; const Vector3R &external_accelerations = fp->external_forces; hipLaunchKernelGGL(( simulate_update_position_predict_position), dim3(num_particles),dim3(1), 0, 0, num_particles, particle_positions_dev, particle_predicted_positions, particle_velocities, external_accelerations, delta_t ); find_neighbors(); for (int iter = 0; iter < solverIterations; iter++) { hipLaunchKernelGGL(( calculate_lambda), dim3(num_particles),dim3(1), 0, 0, num_particles, particle_positions_dev, neighbor_search_results_dev, neighbor_search_results_size_prefix_sum_dev, particle_mass, density, epsilon, h, lambda_device ); hipLaunchKernelGGL(( calculate_delta_pi_and_collision_response), dim3(num_particles),dim3(1), 0, 0, num_particles, particle_positions_dev, neighbor_search_results_dev, neighbor_search_results_size_prefix_sum_dev, delta_p, n, k, h, density, lambda_device, collision_objects.size(), thrust::raw_pointer_cast(collision_objects.data()) ); hipLaunchKernelGGL(( update_predicted_positions), dim3(num_particles),dim3(1), 0, 0, num_particles, particle_predicted_positions, delta_p ); } hipLaunchKernelGGL(( update_velocities), dim3(num_particles),dim3(1), 0, 0, num_particles, particle_positions_dev, particle_predicted_positions, particle_velocities, delta_t ); hipLaunchKernelGGL(( apply_XSPH_viscosity), dim3(num_particles),dim3(1), 0, 0, num_particles, particle_positions_dev, particle_velocities, neighbor_search_results_dev, neighbor_search_results_size_prefix_sum_dev, particle_mass, h, c, delta_t ); const auto SIZE_REAL3_N = sizeof(REAL3) * num_particles; copy_predicted_positions_to_position(particle_positions_device, particle_predicted_positions_device, SIZE_REAL3_N); // copy result back to host hipMemcpy(particle_positions->data(), particle_positions_device, SIZE_REAL3_N, hipMemcpyDeviceToHost); } #endif
8b5fb87e73ec03ee99e1d791b1d46cf1d04c15b4.cu
#ifdef BUILD_CUDA #include <cmath> #include <cstdio> #include "cudaFluid.cuh" #include "../kernel.h" __global__ void simulate_update_position_predict_position( int n, Vector3R *particle_positions, Vector3R *particle_preditced_position, Vector3R *particle_velocities, const Vector3R external_accelerations, REAL delta_t ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { auto &positions_i = particle_positions[i]; auto &velocities_i = particle_velocities[i]; auto &preditced_positions_i = particle_preditced_position[i]; // if (i == 0) { // printf("Thread 0 position: (%lf, %lf, %lf), acc: (%lf, %lf, %lf), delta_t: %lf\n", // positions_i.x, positions_i.y, positions_i.z, // external_accelerations.x, external_accelerations.y, external_accelerations.z, // delta_t // ); // } velocities_i += external_accelerations * delta_t; preditced_positions_i = positions_i + velocities_i * delta_t; } } __global__ void calculate_lambda( int n, Vector3R *particle_positions, int *neighbor_search_results, int *neighbor_search_results_size_prefix_sum, REAL particle_mass, REAL density, REAL epsilon, REAL h, REAL *lambda ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { const auto &p_i = particle_positions[i]; // line 10: calculate lambda const int neighbors_size_last = i == 0 ? 0 : neighbor_search_results_size_prefix_sum[i-1]; const int neighbors_size_i = neighbor_search_results_size_prefix_sum[i]-neighbors_size_last; const int *neighbors_i = &neighbor_search_results[neighbors_size_last]; // Eq 2 REAL rho_i = 0; for (int jj = 0; jj < neighbors_size_i; jj++) { int j = neighbors_i[jj]; const auto &p_j = particle_positions[j]; rho_i += W_poly6(p_i-p_j, h); } // add itself rho_i += W_poly6(p_i-p_i, h); rho_i *= particle_mass; // Eq 1 const REAL C_i = rho_i / density - 1.; REAL C_i_p_k_2_sum = 0; // Eq 8 // if k = j REAL C_i_p_k_j_2 = 0; // if k = i Vector3R C_i_p_k_i; for (int jj = 0; jj < neighbors_size_i; jj++) { int j = neighbors_i[jj]; const auto &p_j = particle_positions[j]; const auto W_spiky_gradient_i_j = W_spiky_gradient(p_i-p_j, h) * (p_i-p_j); C_i_p_k_i += W_spiky_gradient_i_j; C_i_p_k_j_2 += W_spiky_gradient_i_j.norm2(); } C_i_p_k_2_sum += C_i_p_k_i.norm2(); C_i_p_k_2_sum /= pow(density, 2); lambda[i] = - C_i / (C_i_p_k_2_sum+epsilon); } } __global__ void calculate_delta_pi_and_collision_response( int num_particles, Vector3R *particle_positions, int *neighbor_search_results, int *neighbor_search_results_size_prefix_sum, Vector3R *delta_p, REAL n, REAL k, REAL h, REAL density, REAL *lambda, int nObjs, Plane_cuda *collision_objects ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_particles; i += blockDim.x * gridDim.x) { const auto &p_i = particle_positions[i]; // line 13: calculate delta p_i const int neighbors_size_last = i == 0 ? 0 : neighbor_search_results_size_prefix_sum[i-1]; const int neighbors_size_i = neighbor_search_results_size_prefix_sum[i]-neighbors_size_last; const int *neighbors_i = &neighbor_search_results[neighbors_size_last]; delta_p[i] = 0; const auto lambda_i = lambda[i]; // Eq 12 for (int jj = 0; jj < neighbors_size_i; jj++) { int j = neighbors_i[jj]; const auto &p_j = particle_positions[j]; // Eq 13 double s_corr = -k*pow(W_poly6(p_i-p_j, h)/W_poly6(0.3*h, h), n); delta_p[i] += (lambda_i+lambda[j]+s_corr) * W_spiky_gradient(p_i-p_j, h) * (p_i-p_j); } delta_p[i] /= density; // line 14: collision detection and response // TODO: apply them for (int j = 0; j < nObjs; j++) { collision_objects[j].collide(particle_positions[i],delta_p[i]); } } } __global__ void update_predicted_positions( int num_particles, Vector3R *particle_predicted_positions, Vector3R *delta_p ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_particles; i += blockDim.x * gridDim.x) { particle_predicted_positions[i] += delta_p[i]; } } __global__ void update_velocities( int num_particles, Vector3R *particle_positions, Vector3R *predicted_positions, Vector3R *particle_velocities, REAL delta_t ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_particles; i += blockDim.x * gridDim.x) { particle_velocities[i] = (predicted_positions[i] - particle_positions[i]) / delta_t; } } __global__ void apply_XSPH_viscosity( int num_particles, Vector3R *particle_positions, Vector3R *particle_velocities, int *neighbor_search_results, int *neighbor_search_results_size_prefix_sum, REAL particle_mass, REAL h, REAL c, REAL delta_t ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_particles; i += blockDim.x * gridDim.x) { const auto &p_i = particle_positions[i]; const int neighbors_size_last = i == 0 ? 0 : neighbor_search_results_size_prefix_sum[i-1]; const int neighbors_size_i = neighbor_search_results_size_prefix_sum[i]-neighbors_size_last; const int *neighbors_i = &neighbor_search_results[neighbors_size_last]; // line 22: vorticity confinement and XSPH viscosity Vector3R f_vorticity; Vector3R omega_i; // Eq 17: Vector3R V_xsph; for (int jj = 0; jj < neighbors_size_i; jj++) { int j = neighbors_i[jj]; const auto &p_j = particle_positions[j]; const auto &p_ij = p_i-p_j; const auto &v_ij = particle_velocities[j] - particle_velocities[i]; // the smallest |p_ij| with h=0.1 gives >100 so c has to correct it to ~1 V_xsph += v_ij * W_poly6(p_ij, h); omega_i += cross(v_ij, W_spiky_gradient(p_ij, h)*p_ij); } // TODO: vorticity // const auto &eta = ; // const auto &N = eta.unit(); // f_vorticity = epsilon*cross(N, omega_i); V_xsph *= c; particle_velocities[i] += V_xsph + f_vorticity / particle_mass * delta_t; } } void copy_predicted_positions_to_position( REAL3 *particle_position, REAL3 *particle_preditced_position, size_t N ) { cudaMemcpy(particle_position, particle_preditced_position, N, cudaMemcpyDeviceToDevice); } Fluid_cuda::Fluid_cuda( unique_ptr<vector<REAL3>> &&particle_positions, unique_ptr<vector<REAL3>> &&particle_velocities, REAL h ): nsearch(h) { if (particle_positions == nullptr) { throw std::runtime_error("particle_positions == nullptr!"); } this->particle_positions = std::move(particle_positions); this->particle_velocities = std::move(particle_velocities); } Fluid_cuda::~Fluid_cuda(){ cudaFree(particle_positions_device); cudaFree(particle_velocities_device); cudaFree(particle_predicted_positions_device); cudaFree(delta_p_device); cudaFree(lambda_device); cudaFree(neighbor_search_results_dev); cudaFree(neighbor_search_results_size_prefix_sum_dev); } void Fluid_cuda::init() { const auto num_particles = particle_positions->size(); const auto SIZE_REAL3_N = sizeof(REAL3) * num_particles; cudaMalloc(&particle_positions_device, SIZE_REAL3_N); cudaMemcpy(particle_positions_device, particle_positions->data(), SIZE_REAL3_N, cudaMemcpyHostToDevice); cudaMalloc(&particle_velocities_device, SIZE_REAL3_N); if (!particle_velocities) { cudaMemset(particle_velocities_device, 0, SIZE_REAL3_N); } else { cudaMemcpy(particle_velocities_device, particle_velocities->data(), SIZE_REAL3_N, cudaMemcpyHostToDevice); } cudaMalloc(&particle_predicted_positions_device, SIZE_REAL3_N); cudaMalloc(&delta_p_device, SIZE_REAL3_N); cudaMalloc(&lambda_device, sizeof(REAL)*num_particles); neighbor_search_results_dev_capacity = num_particles * default_capacity; cudaMalloc(&neighbor_search_results_dev, sizeof(int) * neighbor_search_results_dev_capacity); cudaMalloc(&neighbor_search_results_size_prefix_sum_dev, sizeof(int) * num_particles); neighbor_search_results_host.resize(neighbor_search_results_dev_capacity); neighbor_search_results_size_prefix_sum_host.resize(num_particles); cudaDeviceSynchronize(); nsearch.add_point_set( this->particle_positions->front().data(), this->particle_positions->size(), true, true ); nsearch.find_neighbors(); } void Fluid_cuda::find_neighbors(){ int num_particles = particle_positions->size(); nsearch.find_neighbors(); // serial calculate prefix_sum for (int i = 0; i < num_particles; i++) { auto &pointSet = nsearch.point_set(0); auto count = pointSet.n_neighbors(0, i); const auto last_sum = i == 0 ? 0 : neighbor_search_results_size_prefix_sum_host[i-1]; neighbor_search_results_size_prefix_sum_host[i] = last_sum + count; // range for result_i: [sum_{i-1}, sum_{i}) } // ensure capacity const auto minCapacity = neighbor_search_results_size_prefix_sum_host.back(); if (minCapacity > neighbor_search_results_dev_capacity) { neighbor_search_results_dev_capacity = minCapacity * 1.2; neighbor_search_results_host.resize(neighbor_search_results_dev_capacity); cudaFree(neighbor_search_results_dev); cudaMalloc(&neighbor_search_results_dev, sizeof(int) * neighbor_search_results_dev_capacity); } #pragma omp parallel for for (int i = 0; i < num_particles; i++) { // line 6: find neighboring particles auto &pointSet = nsearch.point_set(0); auto count = pointSet.n_neighbors(0, i); const int start = i == 0 ? 0 : neighbor_search_results_size_prefix_sum_host[i-1]; for (int j = 0; j < count; j++) { neighbor_search_results_host[start + j] = pointSet.neighbor(0, i, j); } } // copy neighbor results to device cudaMemcpy( neighbor_search_results_dev, neighbor_search_results_host.data(), sizeof(int) * neighbor_search_results_dev_capacity, cudaMemcpyHostToDevice ); // update size prefix sum to device cudaMemcpy( neighbor_search_results_size_prefix_sum_dev, neighbor_search_results_size_prefix_sum_host.data(), sizeof(int) * num_particles, cudaMemcpyHostToDevice ); cudaDeviceSynchronize(); } void Fluid_cuda::simulate(REAL delta_t, const FluidParameters *fp, thrust::device_vector<Plane_cuda> &collision_objects) { int num_particles = particle_positions->size(); const auto particle_positions_dev = REAL3AsVector3R(particle_positions_device); const auto particle_predicted_positions = REAL3AsVector3R(particle_predicted_positions_device); const auto particle_velocities = REAL3AsVector3R(particle_velocities_device); const auto delta_p = REAL3AsVector3R(delta_p_device); const auto density = fp->density; const auto particle_mass = fp->particle_mass; const auto damping = fp->damping; const auto solverIterations = fp->solverIterations; const auto h = fp->h; const auto epsilon = fp->epsilon; const auto n = fp->n; const auto k = fp->k; const auto c = fp->c; const Vector3R &external_accelerations = fp->external_forces; simulate_update_position_predict_position<<<num_particles,1>>>( num_particles, particle_positions_dev, particle_predicted_positions, particle_velocities, external_accelerations, delta_t ); find_neighbors(); for (int iter = 0; iter < solverIterations; iter++) { calculate_lambda<<<num_particles,1>>>( num_particles, particle_positions_dev, neighbor_search_results_dev, neighbor_search_results_size_prefix_sum_dev, particle_mass, density, epsilon, h, lambda_device ); calculate_delta_pi_and_collision_response<<<num_particles,1>>>( num_particles, particle_positions_dev, neighbor_search_results_dev, neighbor_search_results_size_prefix_sum_dev, delta_p, n, k, h, density, lambda_device, collision_objects.size(), thrust::raw_pointer_cast(collision_objects.data()) ); update_predicted_positions<<<num_particles,1>>>( num_particles, particle_predicted_positions, delta_p ); } update_velocities<<<num_particles,1>>>( num_particles, particle_positions_dev, particle_predicted_positions, particle_velocities, delta_t ); apply_XSPH_viscosity<<<num_particles,1>>>( num_particles, particle_positions_dev, particle_velocities, neighbor_search_results_dev, neighbor_search_results_size_prefix_sum_dev, particle_mass, h, c, delta_t ); const auto SIZE_REAL3_N = sizeof(REAL3) * num_particles; copy_predicted_positions_to_position(particle_positions_device, particle_predicted_positions_device, SIZE_REAL3_N); // copy result back to host cudaMemcpy(particle_positions->data(), particle_positions_device, SIZE_REAL3_N, cudaMemcpyDeviceToHost); } #endif
81c5f1a53aa695fd2aac3f89eceed4e15bc7c9bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "chainerx/cuda/cuda_device.h" #include <algorithm> #include <cstdint> #include <memory> #include <tuple> #include <utility> #include <absl/types/optional.h> #include <cudnn.h> #include "chainerx/array.h" #include "chainerx/backend_util.h" #include "chainerx/constant.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/cudnn.h" #include "chainerx/cuda/data_type.cuh" #include "chainerx/cuda/kernel_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/error.h" #include "chainerx/indexable_array.h" #include "chainerx/indexer.h" #include "chainerx/kernels/pooling.h" #include "chainerx/macro.h" #include "chainerx/numeric_limits.h" #include "chainerx/routines/connection.h" #include "chainerx/routines/creation.h" #include "chainerx/routines/pooling.h" #include "chainerx/shape.h" #include "chainerx/stack_vector.h" namespace chainerx { namespace cuda { namespace { // Struct that allows passing StackVectors to CUDA kernels. struct CudaDims { explicit CudaDims(const Dims& stack_vector) { std::copy_n(stack_vector.begin(), stack_vector.size(), data); } int64_t data[kMaxNdim]; }; // Uses the previously computed out to find the indices for which the upstream gradients should be propagated. // It is faster than looking for the argmax again since we only have to do a single comparison. // TODO(hvy): Make the spatial dimensionality a template parameter to allow unrolling the loops. template <typename T> __global__ void MaxPoolDoubleBackwardKernel( IndexableArray<const T> ggx_iarray, IndexableArray<const T> x_iarray, IndexableArray<const T> out_iarray, IndexableArray<T> ggout_iarray, Indexer<> x_indexer, Indexer<> out_indexer, Indexer<> kernel_indexer, CudaDims stride, CudaDims pad) { auto it_kernel = kernel_indexer.It(kernel_indexer.total_size() - 1); auto it_x = x_indexer.It(0); for (auto it_out = out_indexer.It(blockIdx.x * blockDim.x + threadIdx.x, blockDim.x * gridDim.x); it_out; ++it_out) { it_x.index()[0] = it_out.index()[0]; // batch. it_x.index()[1] = it_out.index()[1]; // channel. cuda_internal::StorageType<T> out = out_iarray[it_out]; // Iterate over the kernel in the reverse order, since the resulting index should the be first match. for (it_kernel.Restart(); it_kernel.raw_index() >= 0; --it_kernel) { for (int8_t i = 2; i < x_indexer.ndim(); ++i) { int64_t idx = it_out.index()[i] * stride.data[i - 2] - pad.data[i - 2] + it_kernel.index()[i - 2]; idx = max(idx, int64_t{0}); idx = min(idx, x_indexer.shape()[i] - 1); it_x.index()[i] = idx; } if (out == x_iarray[it_x]) { ggout_iarray[it_out] = ggx_iarray[it_x]; } } } } Array Pool( cudnnPoolingMode_t cudnn_pooling_mode, const Array& x, Dims kernel_size, Dims stride, Dims pad, bool cover_all, const absl::optional<Array>& out) { CHAINERX_ASSERT(kernel_size.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(stride.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(pad.size() == static_cast<size_t>(x.ndim() - 2)); // TODO(hvy): Implement and test the `out` argument. if (out.has_value()) { throw NotImplementedError{"Passing out as an argument is not yet supported."}; } int8_t ndim = x.ndim() - 2; // Number of spatial dimensions if (ndim != 2 && ndim != 3) { throw DimensionError{"ChainerX cuDNN pooling supports only 2 and 3 spatial dimensions."}; } // out_shape = (batch_size, out_channels, out_1, out_2, ..., out_N) Shape out_shape{x.shape()[0], x.shape()[1]}; for (int8_t i = 0; i < ndim; ++i) { out_shape.emplace_back(internal::GetConvOutDim(x.shape()[i + 2], kernel_size[i], stride[i], pad[i], cover_all)); CHAINERX_ASSERT(out_shape.back() > 0); } CudaDevice& device = dynamic_cast<CudaDevice&>(x.device()); Dtype dtype = x.dtype(); CudaSetDeviceScope scope{device.index()}; Array actual_out = Empty(out_shape, dtype, device); Array x_cont = AsContiguousArray(x); cuda_internal::CudnnTensorDescriptor x_desc{x_cont}; cuda_internal::CudnnTensorDescriptor out_desc{actual_out}; cuda_internal::CudnnPoolingDescriptor pool_desc{cudnn_pooling_mode, CUDNN_NOT_PROPAGATE_NAN, kernel_size, pad, stride}; cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(device); device_internals.cudnn_handle().Call( cudnnPoolingForward, *pool_desc, cuda_internal::GetCudnnCoefficientPtr<1>(dtype), *x_desc, internal::GetRawOffsetData(x_cont), cuda_internal::GetCudnnCoefficientPtr<0>(dtype), *out_desc, internal::GetRawOffsetData(actual_out)); return actual_out; } Array PoolGrad( cudnnPoolingMode_t cudnn_pooling_mode, const Array& x, const Array& out, const Array& gout, Dims kernel_size, Dims stride, Dims pad, const absl::optional<Array>& gx) { CHAINERX_ASSERT(out.shape() == gout.shape()); CHAINERX_ASSERT(kernel_size.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(stride.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(pad.size() == static_cast<size_t>(x.ndim() - 2)); // TODO(hvy): Implement and test the `gx` argument. if (gx.has_value()) { throw NotImplementedError{"Passing gx as an argument is not yet supported."}; } int8_t ndim = x.ndim() - 2; // Number of spatial dimensions if (ndim < 2) { throw DimensionError{"CUDA pooling requires number of spatial dimensions to be greater than or equal to 2"}; } CudaDevice& device = dynamic_cast<CudaDevice&>(x.device()); Dtype dtype = x.dtype(); CudaSetDeviceScope scope{device.index()}; Array actual_gx = EmptyLike(x, device); Array out_cont = AsContiguousArray(out); Array gout_cont = AsContiguousArray(gout); Array x_cont = AsContiguousArray(x); cuda_internal::CudnnTensorDescriptor out_desc{out_cont}; cuda_internal::CudnnTensorDescriptor gout_desc{gout_cont}; cuda_internal::CudnnTensorDescriptor x_desc{x_cont}; cuda_internal::CudnnTensorDescriptor gx_desc{actual_gx}; cuda_internal::CudnnPoolingDescriptor pool_desc{cudnn_pooling_mode, CUDNN_NOT_PROPAGATE_NAN, kernel_size, pad, stride}; cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(device); device_internals.cudnn_handle().Call( cudnnPoolingBackward, *pool_desc, cuda_internal::GetCudnnCoefficientPtr<1>(dtype), *out_desc, internal::GetRawOffsetData(out_cont), *gout_desc, internal::GetRawOffsetData(gout_cont), *x_desc, internal::GetRawOffsetData(x_cont), cuda_internal::GetCudnnCoefficientPtr<0>(dtype), *gx_desc, internal::GetRawOffsetData(actual_gx)); return actual_gx; } Array MaxPoolGradGrad( const Array& x, const Array& out, const Array& ggx, Dims kernel_size, Dims stride, Dims pad, const absl::optional<Array>& ggout) { CHAINERX_ASSERT(x.shape() == ggx.shape()); CHAINERX_ASSERT(kernel_size.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(stride.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(pad.size() == static_cast<size_t>(x.ndim() - 2)); // TODO(hvy): Implement and test the `ggout` argument. if (ggout.has_value()) { throw NotImplementedError{"Passing ggout as an argument is not yet supported."}; } int8_t ndim = x.ndim() - 2; // Number of spatial dimensions if (ndim < 2) { throw DimensionError{"CUDA pooling requires number of spatial dimensions to be greater than or equal to 2"}; } Device& device = ggx.device(); CudaSetDeviceScope scope{device.index()}; Array actual_ggout = EmptyLike(out, device); VisitFloatingPointDtype(actual_ggout.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; IndexableArray<const T> ggx_iarray{ggx}; IndexableArray<const T> x_iarray{x}; IndexableArray<const T> out_iarray{out}; IndexableArray<T> ggout_iarray{actual_ggout}; Indexer<> x_indexer{x.shape()}; Indexer<> out_indexer{out.shape()}; Indexer<> kernel_indexer{Shape{kernel_size.begin(), kernel_size.end()}}; static const int kMaxBlockSize = CudaOccupancyMaxPotentialBlockSize(&MaxPoolDoubleBackwardKernel<T>).block_size; int64_t total_size = out_indexer.total_size(); int64_t grid_size = (total_size + kMaxBlockSize - 1) / kMaxBlockSize; int64_t block_size = std::min<int64_t>(total_size, kMaxBlockSize); hipLaunchKernelGGL(( MaxPoolDoubleBackwardKernel), dim3(grid_size), dim3(block_size), 0, 0, ggx_iarray, x_iarray, out_iarray, ggout_iarray, x_indexer, out_indexer, kernel_indexer, CudaDims{stride}, CudaDims{pad}); }); return actual_ggout; } class CudaMaxPoolKernel : public MaxPoolKernel { public: std::tuple<Array, std::unique_ptr<MaxPoolGradState>> Call( const Array& x, Dims kernel_size, Dims stride, Dims pad, bool cover_all, bool return_state, const absl::optional<Array>& out) override { CHAINERX_ASSERT(internal::GetArrayBody(x)->nodes().empty()); Array actual_out = Pool(CUDNN_POOLING_MAX, x, kernel_size, stride, pad, cover_all, out); std::unique_ptr<MaxPoolGradState> state = return_state ? std::make_unique<CudaMaxPoolGradState>(x, actual_out) : nullptr; return std::make_tuple(std::move(actual_out), std::move(state)); } }; CHAINERX_CUDA_REGISTER_KERNEL(MaxPoolKernel, CudaMaxPoolKernel); class CudaMaxPoolGradKernel : public MaxPoolGradKernel { public: std::tuple<Array, std::unique_ptr<MaxPoolGradGradState>> Call( const Array& gout, const Dims& kernel_size, const Dims& stride, const Dims& pad, const std::shared_ptr<MaxPoolGradState>& state, bool return_state, const absl::optional<Array>& gx) override { CHAINERX_ASSERT(internal::GetArrayBody(gout)->nodes().empty()); CHAINERX_ASSERT(state != nullptr); CudaMaxPoolGradState& cuda_state = dynamic_cast<CudaMaxPoolGradState&>(*state); const Array& x = cuda_state.x(); const Array& out = cuda_state.out(); Array actual_gx = PoolGrad(CUDNN_POOLING_MAX, x, out, gout, kernel_size, stride, pad, gx); std::unique_ptr<MaxPoolGradGradState> grad_grad_state = return_state ? std::make_unique<CudaMaxPoolGradGradState>(x, out) : nullptr; return std::make_tuple(std::move(actual_gx), std::move(grad_grad_state)); } }; CHAINERX_CUDA_REGISTER_KERNEL(MaxPoolGradKernel, CudaMaxPoolGradKernel); class CudaMaxPoolGradGradKernel : public MaxPoolGradGradKernel { public: Array Call( const Array& ggx, const Dims& kernel_size, const Dims& stride, const Dims& pad, bool /*cover_all*/, const std::shared_ptr<MaxPoolGradGradState>& state, const absl::optional<Array>& ggout) override { CHAINERX_ASSERT(internal::GetArrayBody(ggx)->nodes().empty()); CHAINERX_ASSERT(state != nullptr); CudaMaxPoolGradGradState& cuda_state = dynamic_cast<CudaMaxPoolGradGradState&>(*state); const Array& x = cuda_state.x(); const Array& out = cuda_state.out(); return MaxPoolGradGrad(x, out, ggx, kernel_size, stride, pad, ggout); } }; CHAINERX_CUDA_REGISTER_KERNEL(MaxPoolGradGradKernel, CudaMaxPoolGradGradKernel); cudnnPoolingMode_t GetCudnnPoolingMode(AveragePoolPadMode pad_mode) { switch (pad_mode) { case AveragePoolPadMode::kZero: return CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; case AveragePoolPadMode::kIgnore: return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; default: CHAINERX_NEVER_REACH(); } } class CudaAveragePoolKernel : public AveragePoolKernel { public: std::tuple<Array, std::unique_ptr<AveragePoolGradState>> Call( const Array& x, const Dims& kernel_size, const Dims& stride, const Dims& pad, AveragePoolPadMode pad_mode, bool return_state, const absl::optional<Array>& out) override { CHAINERX_ASSERT(internal::GetArrayBody(x)->nodes().empty()); Array actual_out = Pool(GetCudnnPoolingMode(pad_mode), x, kernel_size, stride, pad, false, out); std::unique_ptr<AveragePoolGradState> state = return_state ? std::make_unique<CudaAveragePoolGradState>(x, actual_out) : nullptr; return std::make_tuple(std::move(actual_out), std::move(state)); } }; CHAINERX_CUDA_REGISTER_KERNEL(AveragePoolKernel, CudaAveragePoolKernel); class CudaAveragePoolGradKernel : public AveragePoolGradKernel { public: Array Call( const Array& gout, const Dims& kernel_size, const Dims& stride, const Dims& pad, AveragePoolPadMode pad_mode, const std::shared_ptr<AveragePoolGradState>& state, const absl::optional<Array>& gx) override { CHAINERX_ASSERT(internal::GetArrayBody(gout)->nodes().empty()); CHAINERX_ASSERT(state != nullptr); CudaAveragePoolGradState& cuda_state = dynamic_cast<CudaAveragePoolGradState&>(*state); const Array& x = cuda_state.x(); const Array& out = cuda_state.out(); return PoolGrad(GetCudnnPoolingMode(pad_mode), x, out, gout, kernel_size, stride, pad, gx); } }; CHAINERX_CUDA_REGISTER_KERNEL(AveragePoolGradKernel, CudaAveragePoolGradKernel); } // namespace } // namespace cuda } // namespace chainerx
81c5f1a53aa695fd2aac3f89eceed4e15bc7c9bd.cu
#include "chainerx/cuda/cuda_device.h" #include <algorithm> #include <cstdint> #include <memory> #include <tuple> #include <utility> #include <absl/types/optional.h> #include <cudnn.h> #include "chainerx/array.h" #include "chainerx/backend_util.h" #include "chainerx/constant.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/cudnn.h" #include "chainerx/cuda/data_type.cuh" #include "chainerx/cuda/kernel_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/error.h" #include "chainerx/indexable_array.h" #include "chainerx/indexer.h" #include "chainerx/kernels/pooling.h" #include "chainerx/macro.h" #include "chainerx/numeric_limits.h" #include "chainerx/routines/connection.h" #include "chainerx/routines/creation.h" #include "chainerx/routines/pooling.h" #include "chainerx/shape.h" #include "chainerx/stack_vector.h" namespace chainerx { namespace cuda { namespace { // Struct that allows passing StackVectors to CUDA kernels. struct CudaDims { explicit CudaDims(const Dims& stack_vector) { std::copy_n(stack_vector.begin(), stack_vector.size(), data); } int64_t data[kMaxNdim]; }; // Uses the previously computed out to find the indices for which the upstream gradients should be propagated. // It is faster than looking for the argmax again since we only have to do a single comparison. // TODO(hvy): Make the spatial dimensionality a template parameter to allow unrolling the loops. template <typename T> __global__ void MaxPoolDoubleBackwardKernel( IndexableArray<const T> ggx_iarray, IndexableArray<const T> x_iarray, IndexableArray<const T> out_iarray, IndexableArray<T> ggout_iarray, Indexer<> x_indexer, Indexer<> out_indexer, Indexer<> kernel_indexer, CudaDims stride, CudaDims pad) { auto it_kernel = kernel_indexer.It(kernel_indexer.total_size() - 1); auto it_x = x_indexer.It(0); for (auto it_out = out_indexer.It(blockIdx.x * blockDim.x + threadIdx.x, blockDim.x * gridDim.x); it_out; ++it_out) { it_x.index()[0] = it_out.index()[0]; // batch. it_x.index()[1] = it_out.index()[1]; // channel. cuda_internal::StorageType<T> out = out_iarray[it_out]; // Iterate over the kernel in the reverse order, since the resulting index should the be first match. for (it_kernel.Restart(); it_kernel.raw_index() >= 0; --it_kernel) { for (int8_t i = 2; i < x_indexer.ndim(); ++i) { int64_t idx = it_out.index()[i] * stride.data[i - 2] - pad.data[i - 2] + it_kernel.index()[i - 2]; idx = max(idx, int64_t{0}); idx = min(idx, x_indexer.shape()[i] - 1); it_x.index()[i] = idx; } if (out == x_iarray[it_x]) { ggout_iarray[it_out] = ggx_iarray[it_x]; } } } } Array Pool( cudnnPoolingMode_t cudnn_pooling_mode, const Array& x, Dims kernel_size, Dims stride, Dims pad, bool cover_all, const absl::optional<Array>& out) { CHAINERX_ASSERT(kernel_size.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(stride.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(pad.size() == static_cast<size_t>(x.ndim() - 2)); // TODO(hvy): Implement and test the `out` argument. if (out.has_value()) { throw NotImplementedError{"Passing out as an argument is not yet supported."}; } int8_t ndim = x.ndim() - 2; // Number of spatial dimensions if (ndim != 2 && ndim != 3) { throw DimensionError{"ChainerX cuDNN pooling supports only 2 and 3 spatial dimensions."}; } // out_shape = (batch_size, out_channels, out_1, out_2, ..., out_N) Shape out_shape{x.shape()[0], x.shape()[1]}; for (int8_t i = 0; i < ndim; ++i) { out_shape.emplace_back(internal::GetConvOutDim(x.shape()[i + 2], kernel_size[i], stride[i], pad[i], cover_all)); CHAINERX_ASSERT(out_shape.back() > 0); } CudaDevice& device = dynamic_cast<CudaDevice&>(x.device()); Dtype dtype = x.dtype(); CudaSetDeviceScope scope{device.index()}; Array actual_out = Empty(out_shape, dtype, device); Array x_cont = AsContiguousArray(x); cuda_internal::CudnnTensorDescriptor x_desc{x_cont}; cuda_internal::CudnnTensorDescriptor out_desc{actual_out}; cuda_internal::CudnnPoolingDescriptor pool_desc{cudnn_pooling_mode, CUDNN_NOT_PROPAGATE_NAN, kernel_size, pad, stride}; cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(device); device_internals.cudnn_handle().Call( cudnnPoolingForward, *pool_desc, cuda_internal::GetCudnnCoefficientPtr<1>(dtype), *x_desc, internal::GetRawOffsetData(x_cont), cuda_internal::GetCudnnCoefficientPtr<0>(dtype), *out_desc, internal::GetRawOffsetData(actual_out)); return actual_out; } Array PoolGrad( cudnnPoolingMode_t cudnn_pooling_mode, const Array& x, const Array& out, const Array& gout, Dims kernel_size, Dims stride, Dims pad, const absl::optional<Array>& gx) { CHAINERX_ASSERT(out.shape() == gout.shape()); CHAINERX_ASSERT(kernel_size.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(stride.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(pad.size() == static_cast<size_t>(x.ndim() - 2)); // TODO(hvy): Implement and test the `gx` argument. if (gx.has_value()) { throw NotImplementedError{"Passing gx as an argument is not yet supported."}; } int8_t ndim = x.ndim() - 2; // Number of spatial dimensions if (ndim < 2) { throw DimensionError{"CUDA pooling requires number of spatial dimensions to be greater than or equal to 2"}; } CudaDevice& device = dynamic_cast<CudaDevice&>(x.device()); Dtype dtype = x.dtype(); CudaSetDeviceScope scope{device.index()}; Array actual_gx = EmptyLike(x, device); Array out_cont = AsContiguousArray(out); Array gout_cont = AsContiguousArray(gout); Array x_cont = AsContiguousArray(x); cuda_internal::CudnnTensorDescriptor out_desc{out_cont}; cuda_internal::CudnnTensorDescriptor gout_desc{gout_cont}; cuda_internal::CudnnTensorDescriptor x_desc{x_cont}; cuda_internal::CudnnTensorDescriptor gx_desc{actual_gx}; cuda_internal::CudnnPoolingDescriptor pool_desc{cudnn_pooling_mode, CUDNN_NOT_PROPAGATE_NAN, kernel_size, pad, stride}; cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(device); device_internals.cudnn_handle().Call( cudnnPoolingBackward, *pool_desc, cuda_internal::GetCudnnCoefficientPtr<1>(dtype), *out_desc, internal::GetRawOffsetData(out_cont), *gout_desc, internal::GetRawOffsetData(gout_cont), *x_desc, internal::GetRawOffsetData(x_cont), cuda_internal::GetCudnnCoefficientPtr<0>(dtype), *gx_desc, internal::GetRawOffsetData(actual_gx)); return actual_gx; } Array MaxPoolGradGrad( const Array& x, const Array& out, const Array& ggx, Dims kernel_size, Dims stride, Dims pad, const absl::optional<Array>& ggout) { CHAINERX_ASSERT(x.shape() == ggx.shape()); CHAINERX_ASSERT(kernel_size.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(stride.size() == static_cast<size_t>(x.ndim() - 2)); CHAINERX_ASSERT(pad.size() == static_cast<size_t>(x.ndim() - 2)); // TODO(hvy): Implement and test the `ggout` argument. if (ggout.has_value()) { throw NotImplementedError{"Passing ggout as an argument is not yet supported."}; } int8_t ndim = x.ndim() - 2; // Number of spatial dimensions if (ndim < 2) { throw DimensionError{"CUDA pooling requires number of spatial dimensions to be greater than or equal to 2"}; } Device& device = ggx.device(); CudaSetDeviceScope scope{device.index()}; Array actual_ggout = EmptyLike(out, device); VisitFloatingPointDtype(actual_ggout.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; IndexableArray<const T> ggx_iarray{ggx}; IndexableArray<const T> x_iarray{x}; IndexableArray<const T> out_iarray{out}; IndexableArray<T> ggout_iarray{actual_ggout}; Indexer<> x_indexer{x.shape()}; Indexer<> out_indexer{out.shape()}; Indexer<> kernel_indexer{Shape{kernel_size.begin(), kernel_size.end()}}; static const int kMaxBlockSize = CudaOccupancyMaxPotentialBlockSize(&MaxPoolDoubleBackwardKernel<T>).block_size; int64_t total_size = out_indexer.total_size(); int64_t grid_size = (total_size + kMaxBlockSize - 1) / kMaxBlockSize; int64_t block_size = std::min<int64_t>(total_size, kMaxBlockSize); MaxPoolDoubleBackwardKernel<<<grid_size, block_size>>>( ggx_iarray, x_iarray, out_iarray, ggout_iarray, x_indexer, out_indexer, kernel_indexer, CudaDims{stride}, CudaDims{pad}); }); return actual_ggout; } class CudaMaxPoolKernel : public MaxPoolKernel { public: std::tuple<Array, std::unique_ptr<MaxPoolGradState>> Call( const Array& x, Dims kernel_size, Dims stride, Dims pad, bool cover_all, bool return_state, const absl::optional<Array>& out) override { CHAINERX_ASSERT(internal::GetArrayBody(x)->nodes().empty()); Array actual_out = Pool(CUDNN_POOLING_MAX, x, kernel_size, stride, pad, cover_all, out); std::unique_ptr<MaxPoolGradState> state = return_state ? std::make_unique<CudaMaxPoolGradState>(x, actual_out) : nullptr; return std::make_tuple(std::move(actual_out), std::move(state)); } }; CHAINERX_CUDA_REGISTER_KERNEL(MaxPoolKernel, CudaMaxPoolKernel); class CudaMaxPoolGradKernel : public MaxPoolGradKernel { public: std::tuple<Array, std::unique_ptr<MaxPoolGradGradState>> Call( const Array& gout, const Dims& kernel_size, const Dims& stride, const Dims& pad, const std::shared_ptr<MaxPoolGradState>& state, bool return_state, const absl::optional<Array>& gx) override { CHAINERX_ASSERT(internal::GetArrayBody(gout)->nodes().empty()); CHAINERX_ASSERT(state != nullptr); CudaMaxPoolGradState& cuda_state = dynamic_cast<CudaMaxPoolGradState&>(*state); const Array& x = cuda_state.x(); const Array& out = cuda_state.out(); Array actual_gx = PoolGrad(CUDNN_POOLING_MAX, x, out, gout, kernel_size, stride, pad, gx); std::unique_ptr<MaxPoolGradGradState> grad_grad_state = return_state ? std::make_unique<CudaMaxPoolGradGradState>(x, out) : nullptr; return std::make_tuple(std::move(actual_gx), std::move(grad_grad_state)); } }; CHAINERX_CUDA_REGISTER_KERNEL(MaxPoolGradKernel, CudaMaxPoolGradKernel); class CudaMaxPoolGradGradKernel : public MaxPoolGradGradKernel { public: Array Call( const Array& ggx, const Dims& kernel_size, const Dims& stride, const Dims& pad, bool /*cover_all*/, const std::shared_ptr<MaxPoolGradGradState>& state, const absl::optional<Array>& ggout) override { CHAINERX_ASSERT(internal::GetArrayBody(ggx)->nodes().empty()); CHAINERX_ASSERT(state != nullptr); CudaMaxPoolGradGradState& cuda_state = dynamic_cast<CudaMaxPoolGradGradState&>(*state); const Array& x = cuda_state.x(); const Array& out = cuda_state.out(); return MaxPoolGradGrad(x, out, ggx, kernel_size, stride, pad, ggout); } }; CHAINERX_CUDA_REGISTER_KERNEL(MaxPoolGradGradKernel, CudaMaxPoolGradGradKernel); cudnnPoolingMode_t GetCudnnPoolingMode(AveragePoolPadMode pad_mode) { switch (pad_mode) { case AveragePoolPadMode::kZero: return CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; case AveragePoolPadMode::kIgnore: return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; default: CHAINERX_NEVER_REACH(); } } class CudaAveragePoolKernel : public AveragePoolKernel { public: std::tuple<Array, std::unique_ptr<AveragePoolGradState>> Call( const Array& x, const Dims& kernel_size, const Dims& stride, const Dims& pad, AveragePoolPadMode pad_mode, bool return_state, const absl::optional<Array>& out) override { CHAINERX_ASSERT(internal::GetArrayBody(x)->nodes().empty()); Array actual_out = Pool(GetCudnnPoolingMode(pad_mode), x, kernel_size, stride, pad, false, out); std::unique_ptr<AveragePoolGradState> state = return_state ? std::make_unique<CudaAveragePoolGradState>(x, actual_out) : nullptr; return std::make_tuple(std::move(actual_out), std::move(state)); } }; CHAINERX_CUDA_REGISTER_KERNEL(AveragePoolKernel, CudaAveragePoolKernel); class CudaAveragePoolGradKernel : public AveragePoolGradKernel { public: Array Call( const Array& gout, const Dims& kernel_size, const Dims& stride, const Dims& pad, AveragePoolPadMode pad_mode, const std::shared_ptr<AveragePoolGradState>& state, const absl::optional<Array>& gx) override { CHAINERX_ASSERT(internal::GetArrayBody(gout)->nodes().empty()); CHAINERX_ASSERT(state != nullptr); CudaAveragePoolGradState& cuda_state = dynamic_cast<CudaAveragePoolGradState&>(*state); const Array& x = cuda_state.x(); const Array& out = cuda_state.out(); return PoolGrad(GetCudnnPoolingMode(pad_mode), x, out, gout, kernel_size, stride, pad, gx); } }; CHAINERX_CUDA_REGISTER_KERNEL(AveragePoolGradKernel, CudaAveragePoolGradKernel); } // namespace } // namespace cuda } // namespace chainerx
814767ba3a6d48b8e6dd50f1a2c5c1035b66c333.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Adds up 1,000,000 times of the block ID to * a variable. * What to observe/ponder: * - Any difference between shared and global memory? * - Does the result differ between runs? */ #include <stdio.h> __device__ __managed__ volatile int global_counter[2]; void check_cuda_errors() { hipError_t rc; rc = hipGetLastError(); if (rc != hipSuccess) { printf("Last CUDA error %s\n", hipGetErrorString(rc)); } } __global__ void shared_mem(int times) { __shared__ int shared_counter[2]; int i; // Zero out both counters shared_counter[threadIdx.x] = 0; for (i = 0; i < times; i++) { shared_counter[threadIdx.x] += blockIdx.x; } printf("Shared (Blk: %d, Th: %d): %d\n", blockIdx.x, threadIdx.x, shared_counter[threadIdx.x]); } __global__ void global_mem(int times) { int i; // Zero out both counters global_counter[threadIdx.x] = 0; for (i = 0; i < times; i++) { global_counter[threadIdx.x] += blockIdx.x; } printf("Global (Blk: %d, Th: %d): %d\n", blockIdx.x, threadIdx.x, global_counter[threadIdx.x]); } int main(int argc, char **argv) { printf("Shared Memory version:\n"); hipLaunchKernelGGL(( shared_mem), dim3(10), dim3(2), 0, 0, 1000000); hipDeviceSynchronize(); check_cuda_errors(); printf("Global Memory version:\n"); hipLaunchKernelGGL(( global_mem), dim3(10), dim3(2), 0, 0, 1000000); hipDeviceSynchronize(); check_cuda_errors(); return 0; }
814767ba3a6d48b8e6dd50f1a2c5c1035b66c333.cu
/** * Adds up 1,000,000 times of the block ID to * a variable. * What to observe/ponder: * - Any difference between shared and global memory? * - Does the result differ between runs? */ #include <stdio.h> __device__ __managed__ volatile int global_counter[2]; void check_cuda_errors() { cudaError_t rc; rc = cudaGetLastError(); if (rc != cudaSuccess) { printf("Last CUDA error %s\n", cudaGetErrorString(rc)); } } __global__ void shared_mem(int times) { __shared__ int shared_counter[2]; int i; // Zero out both counters shared_counter[threadIdx.x] = 0; for (i = 0; i < times; i++) { shared_counter[threadIdx.x] += blockIdx.x; } printf("Shared (Blk: %d, Th: %d): %d\n", blockIdx.x, threadIdx.x, shared_counter[threadIdx.x]); } __global__ void global_mem(int times) { int i; // Zero out both counters global_counter[threadIdx.x] = 0; for (i = 0; i < times; i++) { global_counter[threadIdx.x] += blockIdx.x; } printf("Global (Blk: %d, Th: %d): %d\n", blockIdx.x, threadIdx.x, global_counter[threadIdx.x]); } int main(int argc, char **argv) { printf("Shared Memory version:\n"); shared_mem<<<10, 2>>>(1000000); cudaDeviceSynchronize(); check_cuda_errors(); printf("Global Memory version:\n"); global_mem<<<10, 2>>>(1000000); cudaDeviceSynchronize(); check_cuda_errors(); return 0; }
76877b941fea7852d4733cab692ecd6bf776037f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "extractValues.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; void *fb = NULL; hipMalloc(&fb, XSIZE*YSIZE); int *voxels = NULL; hipMalloc(&voxels, XSIZE*YSIZE); int num_voxels = 1; int *values = NULL; hipMalloc(&values, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( extractValues), dim3(gridBlock),dim3(threadBlock), 0, 0, fb,voxels,num_voxels,values); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( extractValues), dim3(gridBlock),dim3(threadBlock), 0, 0, fb,voxels,num_voxels,values); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( extractValues), dim3(gridBlock),dim3(threadBlock), 0, 0, fb,voxels,num_voxels,values); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
76877b941fea7852d4733cab692ecd6bf776037f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "extractValues.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; void *fb = NULL; cudaMalloc(&fb, XSIZE*YSIZE); int *voxels = NULL; cudaMalloc(&voxels, XSIZE*YSIZE); int num_voxels = 1; int *values = NULL; cudaMalloc(&values, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); extractValues<<<gridBlock,threadBlock>>>(fb,voxels,num_voxels,values); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { extractValues<<<gridBlock,threadBlock>>>(fb,voxels,num_voxels,values); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { extractValues<<<gridBlock,threadBlock>>>(fb,voxels,num_voxels,values); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b724f0b3ce50aea3465fe93e6297b69fcbe7d6be.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cfloat> #include <iostream> #include "common.hpp" using std::cout; using std::endl; #define BLOCKSIZE 512 // TODO: // at::numeric_limits<scalar_t>::lowest; // implement like pytorch-softmax: two kernels: one is for inner size to be 1, and the other is for spatial. Besides, in the spatial kernel method, we should use threadIdx.x and threadIdx.y for dimsize and inner size parallelization // define spatial kernel block like this: /* * inline dim3 SpatialSoftMax_getBlockSize( * uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { * uint32_t inner_threads = inner_size; const int max_threads = 1024; * inner_threads = ::min(inner_threads, static_cast<uint32_t>(max_threads)); * uint32_t dim_threads = 1; * if (inner_threads <= 64 && dim_size >= 64) { * while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size) * dim_threads *= 2; * dim_threads /= 2; * } * return dim3(dim_threads, inner_threads); * } * */ // consider max_active_blocks when assign grid blocks, the total number of blocks should not be greater than max_active_blocks which is multiProcessCount namespace large_margin_space { template<typename scalar_t> __forceinline__ __device__ void reduce_max(scalar_t* sdata, int tid) { __syncthreads(); for (unsigned int s{blockDim.x / 2}; s > 0; s >>= 1) { if (tid < s) { if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s]; } __syncthreads(); } } template<typename scalar_t> __forceinline__ __device__ void reduce_sum(scalar_t* sdata, int tid) { __syncthreads(); for (unsigned int s{blockDim.x / 2}; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } } template<typename scalar_t> __forceinline__ __device__ void compute_reduce_values( const scalar_t* logits, scalar_t* sdata, const int dimsize, const int m_size, int n_idx, int m_idx, int64_t lb, int tid) { // b is max logits without target // b+1 is max logits with target // b+2 is sum of exp without target // b+3 is sum of exp with target // compute max with and without label index const scalar_t zero(0.); __syncthreads(); sdata[tid] = scalar_t(-10000.); __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (val > sdata[tid]) sdata[tid] = val; } reduce_max(sdata, tid); if (tid == 0) { sdata[blockDim.x] = sdata[0]; sdata[blockDim.x + 1] = sdata[0]; int idx = n_idx * dimsize * m_size + lb * m_size + m_idx; scalar_t val = logits[idx]; if (val > sdata[0]) sdata[blockDim.x + 1] = val; } __syncthreads(); // compute sum of exp with and without label index sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sdata[tid] += exp(val - sdata[blockDim.x]); } reduce_sum<scalar_t>(sdata, tid); if (tid == 0) sdata[blockDim.x + 2] = sdata[0]; __syncthreads(); sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sdata[tid] += exp(val - sdata[blockDim.x + 1]); } reduce_sum<scalar_t>(sdata, tid); if (tid == 0) sdata[blockDim.x + 3] = sdata[0]; } template<typename scalar_t> __forceinline__ __device__ void compute_sum_of_qx( const scalar_t* logits, scalar_t* sdata, const int dimsize, const int m_size, int n_idx, int m_idx, int64_t lb, int tid) { // compute sum of q * x to sdata[blockDim.x + 5] const scalar_t zero(0.); __syncthreads(); sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sdata[tid] += val * exp(val - sdata[blockDim.x]); } reduce_sum<scalar_t>(sdata, tid); if (tid == 0) { sdata[blockDim.x + 5] = sdata[0] / sdata[blockDim.x + 2]; } } } // kernel function for forward and backward template<typename scalar_t> __global__ void LMarginLossForward(const int n_size, const int dimsize, const int m_size, const scalar_t *logits, const int64_t *labels, scalar_t *losses, const int64_t ignore_index, const float lam) { // shared memory // b+4 is coeff of 1/(dimsize - 1) extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw); sdata = sdata + (blockDim.x + 8) * threadIdx.y; scalar_t zero(0.f); int tid = threadIdx.x; int sample_id = blockIdx.x * blockDim.y + threadIdx.y; int sample_offset = gridDim.x * blockDim.y; if (tid == 0) { sdata[blockDim.x + 4] = scalar_t(1.) / (dimsize - 1); } int samplesize = n_size * m_size; for (int i{sample_id}; i < samplesize; i += sample_offset) { int64_t lb = labels[i]; if (lb == ignore_index) { if (tid == 0) losses[i] = zero; continue; } int n_idx = i / m_size; int m_idx = i % m_size; large_margin_space::compute_reduce_values<scalar_t>(logits, sdata, dimsize, m_size, n_idx, m_idx, lb, tid); sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j+=blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t dval = logits[idx]; scalar_t term(0); if (j == lb) { term = -(dval - sdata[blockDim.x + 1]); term += log(sdata[blockDim.x + 3]); } else { dval -= sdata[blockDim.x]; term = exp(dval) / sdata[blockDim.x + 2]; term -= sdata[blockDim.x + 4]; term *= (dval - log(sdata[blockDim.x + 2])); term *= scalar_t(lam / 2.f); } sdata[tid] += term; } large_margin_space::reduce_sum<scalar_t>(sdata, tid); if (tid == 0) losses[i] = sdata[0]; } } template<typename scalar_t> __global__ void LMarginLossBackward(const int n_size, const int dimsize, const int m_size, scalar_t *grad_logits, const scalar_t *logits, const int64_t *labels, const int64_t ignore_index, const float lam) { extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw); sdata = sdata + (blockDim.x + 8) * threadIdx.y; scalar_t zero(0.f); int tid = threadIdx.x; int sample_id = blockIdx.x * blockDim.y + threadIdx.y; int sample_offset = gridDim.x * blockDim.y; if (tid == 0) { sdata[blockDim.x + 4] = 1. / (dimsize - 1); } int samplesize = n_size * m_size; for (int i{sample_id}; i < samplesize; i += sample_offset) { int64_t lb = labels[i]; int n_idx = i / m_size; int m_idx = i % m_size; if (lb == ignore_index) { for (int j{tid}; j < dimsize; j += blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; grad_logits[idx] = zero; } continue; } large_margin_space::compute_reduce_values<scalar_t>(logits, sdata, dimsize, m_size, n_idx, m_idx, lb, tid); large_margin_space::compute_sum_of_qx<scalar_t>(logits, sdata, dimsize, m_size, n_idx, m_idx, lb, tid); const scalar_t one(1.f); for (int j{tid}; j < dimsize; j += blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; scalar_t pc = exp(val - sdata[blockDim.x + 1]) / sdata[blockDim.x + 3]; scalar_t gval; if (j == lb) { gval = pc - one; } else { gval = val - sdata[blockDim.x + 5] + one; gval *= exp(val - sdata[blockDim.x]) / sdata[blockDim.x + 2]; gval = pc + (gval - sdata[blockDim.x + 4]) * scalar_t(lam / 2.); } grad_logits[idx] = gval; } } } template<typename scalar_t> __global__ void SpatialLMarginLossForward(const int n_size, const int dimsize, const int m_size, const scalar_t *logits, const int64_t *labels, scalar_t *losses, const int64_t ignore_index, const float lam) { // shared memory __shared__ int sdata[BLOCKSIZE]; sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid sdata[1] = n_size * m_size; // samplesize sdata[2] = gridDim.x * blockDim.x; // sample_offset for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) { int lb = static_cast<int>(labels[i]); if (lb == ignore_index) { losses[i] = scalar_t(0.f); continue; } int n_idx = i / m_size; int m_idx = i % m_size; // compute max scalar_t max_with_lb(-10000.f); scalar_t max_no_lb(-10000.f); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (val > max_with_lb) max_with_lb = val; if (j == lb) continue; if (val > max_no_lb) max_no_lb = val; } // compute sum of exp scalar_t sum_with_lb(0.); scalar_t sum_no_lb(0.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sum_with_lb += exp(val - max_with_lb); if (j == lb) continue; sum_no_lb += exp(val - max_no_lb); } // compute loss scalar_t loss_val(0.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (j == lb) { loss_val += - (val - max_with_lb) + log(sum_with_lb); } else { loss_val += scalar_t(lam / 2.) * (exp(val - max_no_lb) / sum_no_lb - (scalar_t(1.) / (dimsize - 1))) * (val - max_no_lb - log(sum_no_lb)); } } losses[i] = loss_val; } } template<typename scalar_t> __global__ void SpatialLMarginLossBackward(const int n_size, const int dimsize, const int m_size, scalar_t *grad_logits, const scalar_t *logits, const int64_t *labels, const int64_t ignore_index, const float lam) { // shared memory __shared__ int sdata[BLOCKSIZE]; sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid sdata[1] = n_size * m_size; // samplesize sdata[2] = gridDim.x * blockDim.x; // sample_offset const scalar_t one(1.); for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) { int lb = static_cast<int>(labels[i]); int n_idx = i / m_size; int m_idx = i % m_size; if (lb == ignore_index) { for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; grad_logits[idx] = scalar_t(0.f); } continue; } // compute max scalar_t max_with_lb(-10000.); scalar_t max_no_lb(-10000.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (val > max_with_lb) max_with_lb = val; if (j == lb) continue; if (val > max_no_lb) max_no_lb = val; } // compute sum of exp scalar_t sum_with_lb(0.); scalar_t sum_no_lb(0.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sum_with_lb += exp(val - max_with_lb); if (j == lb) continue; sum_no_lb += exp(val - max_no_lb); } // compute sum of qx scalar_t sum_qx(0.); for (int j{0}; j < dimsize; ++j) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sum_qx += val * exp(val - max_no_lb) / sum_no_lb; } // compute grads for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (lb == j) { grad_logits[idx] = exp(val - max_with_lb) / sum_with_lb - one; } else { grad_logits[idx] = exp(val - max_with_lb) / sum_with_lb + scalar_t(lam / 2.) * ((val + one - sum_qx) * exp(val - max_no_lb) / sum_no_lb - (one / (dimsize - 1))); } } } } // cuda forward and backward at::Tensor large_margin_forward_cuda(const at::Tensor &logits, const at::Tensor &labels, const int64_t ignore_index, const float lam) { // CHECK type and shape AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); const int n_size = logits.size(0); const int dimsize = logits.size(1); const int m_size = logits.numel() / (n_size * dimsize); const int samplesize = labels.numel(); // allocate memory and cuda grid/block auto losses = torch::empty_like(labels, logits.options()); if (losses.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return losses; } // call kernel if (dimsize < 32 && samplesize > 4096) { int gridx = ::max(::min(4096, samplesize / BLOCKSIZE), 1); dim3 block(BLOCKSIZE); dim3 grid(gridx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "large margin forward", [&] { int shm_size = BLOCKSIZE * sizeof(scalar_t); hipLaunchKernelGGL(( SpatialLMarginLossForward<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), n_size, dimsize, m_size, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), losses.contiguous().data_ptr<scalar_t>(), ignore_index, lam ); }); } else { int blockx = 32; while (blockx < dimsize) blockx *= 2; blockx = ::max(::min(BLOCKSIZE, blockx / 2), 32); int blocky = ::max(::min(samplesize, BLOCKSIZE / blockx), 1); int gridx = ::max(::min(4096, samplesize / blocky), 1); int n_shm = (blockx + 8) * blocky; dim3 block(blockx, blocky); dim3 grid(gridx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "large margin forward", [&] { int shm_size = n_shm * sizeof(scalar_t); hipLaunchKernelGGL(( LMarginLossForward<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), n_size, dimsize, m_size, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), losses.contiguous().data_ptr<scalar_t>(), ignore_index, lam ); }); } AT_CUDA_CHECK(hipGetLastError()); return losses; } at::Tensor large_margin_backward_cuda(const at::Tensor &logits, const at::Tensor &labels, const int64_t ignore_index, const float lam) { // CHECK type and shape AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); const int n_size = logits.size(0); const int dimsize = logits.size(1); const int m_size = logits.numel() / (n_size * dimsize); const int samplesize = labels.numel(); // allocate memory and cuda grid/block auto grad_logits = torch::empty_like(logits); if (grad_logits.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_logits; } if (dimsize < 32 && samplesize > 4096) { int gridx = ::max(::min(4096, samplesize / BLOCKSIZE), 1); dim3 block(BLOCKSIZE); dim3 grid(gridx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "large margin backwrd", [&] { int shm_size = BLOCKSIZE * sizeof(scalar_t); hipLaunchKernelGGL(( SpatialLMarginLossBackward<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), n_size, dimsize, m_size, grad_logits.contiguous().data_ptr<scalar_t>(), logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), ignore_index, lam ); }); } else { int blockx = 32; while (blockx < dimsize) blockx *= 2; blockx = ::max(::min(BLOCKSIZE, blockx / 2), 32); int blocky = ::max(::min(samplesize, BLOCKSIZE / blockx), 1); int gridx = ::max(::min(4096, samplesize / blocky), 1); int n_shm = (blockx + 8) * blocky; dim3 block(blockx, blocky); dim3 grid(gridx); // call kernel AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "large margin backwrd", [&] { int shm_size = n_shm * sizeof(scalar_t); hipLaunchKernelGGL(( LMarginLossBackward<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), n_size, dimsize, m_size, grad_logits.contiguous().data_ptr<scalar_t>(), logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), ignore_index, lam ); }); } AT_CUDA_CHECK(hipGetLastError()); return grad_logits; } // python inferface at::Tensor large_margin_forward(const at::Tensor &logits, const at::Tensor &labels, const float lam, const int64_t ignore_index) { if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) { AT_ERROR("this large margin loss only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return large_margin_forward_cuda(logits, labels, ignore_index, lam); } at::Tensor large_margin_backward(const at::Tensor &logits, const at::Tensor &labels, const float lam, const int64_t ignore_index) { // TODO: try AT_ASSERTM if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) { AT_ERROR("this large margin loss only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return large_margin_backward_cuda(logits, labels, ignore_index, lam); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("l_margin_forward", &large_margin_forward, "large margin forward"); m.def("l_margin_backward", &large_margin_backward, "large margin backward"); }
b724f0b3ce50aea3465fe93e6297b69fcbe7d6be.cu
#include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <cuda.h> #include <cuda_runtime.h> #include <cfloat> #include <iostream> #include "common.hpp" using std::cout; using std::endl; #define BLOCKSIZE 512 // TODO: // at::numeric_limits<scalar_t>::lowest; // implement like pytorch-softmax: two kernels: one is for inner size to be 1, and the other is for spatial. Besides, in the spatial kernel method, we should use threadIdx.x and threadIdx.y for dimsize and inner size parallelization // define spatial kernel block like this: /* * inline dim3 SpatialSoftMax_getBlockSize( * uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { * uint32_t inner_threads = inner_size; const int max_threads = 1024; * inner_threads = std::min(inner_threads, static_cast<uint32_t>(max_threads)); * uint32_t dim_threads = 1; * if (inner_threads <= 64 && dim_size >= 64) { * while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size) * dim_threads *= 2; * dim_threads /= 2; * } * return dim3(dim_threads, inner_threads); * } * */ // consider max_active_blocks when assign grid blocks, the total number of blocks should not be greater than max_active_blocks which is multiProcessCount namespace large_margin_space { template<typename scalar_t> __forceinline__ __device__ void reduce_max(scalar_t* sdata, int tid) { __syncthreads(); for (unsigned int s{blockDim.x / 2}; s > 0; s >>= 1) { if (tid < s) { if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s]; } __syncthreads(); } } template<typename scalar_t> __forceinline__ __device__ void reduce_sum(scalar_t* sdata, int tid) { __syncthreads(); for (unsigned int s{blockDim.x / 2}; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } } template<typename scalar_t> __forceinline__ __device__ void compute_reduce_values( const scalar_t* logits, scalar_t* sdata, const int dimsize, const int m_size, int n_idx, int m_idx, int64_t lb, int tid) { // b is max logits without target // b+1 is max logits with target // b+2 is sum of exp without target // b+3 is sum of exp with target // compute max with and without label index const scalar_t zero(0.); __syncthreads(); sdata[tid] = scalar_t(-10000.); __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (val > sdata[tid]) sdata[tid] = val; } reduce_max(sdata, tid); if (tid == 0) { sdata[blockDim.x] = sdata[0]; sdata[blockDim.x + 1] = sdata[0]; int idx = n_idx * dimsize * m_size + lb * m_size + m_idx; scalar_t val = logits[idx]; if (val > sdata[0]) sdata[blockDim.x + 1] = val; } __syncthreads(); // compute sum of exp with and without label index sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sdata[tid] += exp(val - sdata[blockDim.x]); } reduce_sum<scalar_t>(sdata, tid); if (tid == 0) sdata[blockDim.x + 2] = sdata[0]; __syncthreads(); sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sdata[tid] += exp(val - sdata[blockDim.x + 1]); } reduce_sum<scalar_t>(sdata, tid); if (tid == 0) sdata[blockDim.x + 3] = sdata[0]; } template<typename scalar_t> __forceinline__ __device__ void compute_sum_of_qx( const scalar_t* logits, scalar_t* sdata, const int dimsize, const int m_size, int n_idx, int m_idx, int64_t lb, int tid) { // compute sum of q * x to sdata[blockDim.x + 5] const scalar_t zero(0.); __syncthreads(); sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sdata[tid] += val * exp(val - sdata[blockDim.x]); } reduce_sum<scalar_t>(sdata, tid); if (tid == 0) { sdata[blockDim.x + 5] = sdata[0] / sdata[blockDim.x + 2]; } } } // kernel function for forward and backward template<typename scalar_t> __global__ void LMarginLossForward(const int n_size, const int dimsize, const int m_size, const scalar_t *logits, const int64_t *labels, scalar_t *losses, const int64_t ignore_index, const float lam) { // shared memory // b+4 is coeff of 1/(dimsize - 1) extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw); sdata = sdata + (blockDim.x + 8) * threadIdx.y; scalar_t zero(0.f); int tid = threadIdx.x; int sample_id = blockIdx.x * blockDim.y + threadIdx.y; int sample_offset = gridDim.x * blockDim.y; if (tid == 0) { sdata[blockDim.x + 4] = scalar_t(1.) / (dimsize - 1); } int samplesize = n_size * m_size; for (int i{sample_id}; i < samplesize; i += sample_offset) { int64_t lb = labels[i]; if (lb == ignore_index) { if (tid == 0) losses[i] = zero; continue; } int n_idx = i / m_size; int m_idx = i % m_size; large_margin_space::compute_reduce_values<scalar_t>(logits, sdata, dimsize, m_size, n_idx, m_idx, lb, tid); sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j+=blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t dval = logits[idx]; scalar_t term(0); if (j == lb) { term = -(dval - sdata[blockDim.x + 1]); term += log(sdata[blockDim.x + 3]); } else { dval -= sdata[blockDim.x]; term = exp(dval) / sdata[blockDim.x + 2]; term -= sdata[blockDim.x + 4]; term *= (dval - log(sdata[blockDim.x + 2])); term *= scalar_t(lam / 2.f); } sdata[tid] += term; } large_margin_space::reduce_sum<scalar_t>(sdata, tid); if (tid == 0) losses[i] = sdata[0]; } } template<typename scalar_t> __global__ void LMarginLossBackward(const int n_size, const int dimsize, const int m_size, scalar_t *grad_logits, const scalar_t *logits, const int64_t *labels, const int64_t ignore_index, const float lam) { extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw); sdata = sdata + (blockDim.x + 8) * threadIdx.y; scalar_t zero(0.f); int tid = threadIdx.x; int sample_id = blockIdx.x * blockDim.y + threadIdx.y; int sample_offset = gridDim.x * blockDim.y; if (tid == 0) { sdata[blockDim.x + 4] = 1. / (dimsize - 1); } int samplesize = n_size * m_size; for (int i{sample_id}; i < samplesize; i += sample_offset) { int64_t lb = labels[i]; int n_idx = i / m_size; int m_idx = i % m_size; if (lb == ignore_index) { for (int j{tid}; j < dimsize; j += blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; grad_logits[idx] = zero; } continue; } large_margin_space::compute_reduce_values<scalar_t>(logits, sdata, dimsize, m_size, n_idx, m_idx, lb, tid); large_margin_space::compute_sum_of_qx<scalar_t>(logits, sdata, dimsize, m_size, n_idx, m_idx, lb, tid); const scalar_t one(1.f); for (int j{tid}; j < dimsize; j += blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; scalar_t pc = exp(val - sdata[blockDim.x + 1]) / sdata[blockDim.x + 3]; scalar_t gval; if (j == lb) { gval = pc - one; } else { gval = val - sdata[blockDim.x + 5] + one; gval *= exp(val - sdata[blockDim.x]) / sdata[blockDim.x + 2]; gval = pc + (gval - sdata[blockDim.x + 4]) * scalar_t(lam / 2.); } grad_logits[idx] = gval; } } } template<typename scalar_t> __global__ void SpatialLMarginLossForward(const int n_size, const int dimsize, const int m_size, const scalar_t *logits, const int64_t *labels, scalar_t *losses, const int64_t ignore_index, const float lam) { // shared memory __shared__ int sdata[BLOCKSIZE]; sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid sdata[1] = n_size * m_size; // samplesize sdata[2] = gridDim.x * blockDim.x; // sample_offset for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) { int lb = static_cast<int>(labels[i]); if (lb == ignore_index) { losses[i] = scalar_t(0.f); continue; } int n_idx = i / m_size; int m_idx = i % m_size; // compute max scalar_t max_with_lb(-10000.f); scalar_t max_no_lb(-10000.f); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (val > max_with_lb) max_with_lb = val; if (j == lb) continue; if (val > max_no_lb) max_no_lb = val; } // compute sum of exp scalar_t sum_with_lb(0.); scalar_t sum_no_lb(0.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sum_with_lb += exp(val - max_with_lb); if (j == lb) continue; sum_no_lb += exp(val - max_no_lb); } // compute loss scalar_t loss_val(0.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (j == lb) { loss_val += - (val - max_with_lb) + log(sum_with_lb); } else { loss_val += scalar_t(lam / 2.) * (exp(val - max_no_lb) / sum_no_lb - (scalar_t(1.) / (dimsize - 1))) * (val - max_no_lb - log(sum_no_lb)); } } losses[i] = loss_val; } } template<typename scalar_t> __global__ void SpatialLMarginLossBackward(const int n_size, const int dimsize, const int m_size, scalar_t *grad_logits, const scalar_t *logits, const int64_t *labels, const int64_t ignore_index, const float lam) { // shared memory __shared__ int sdata[BLOCKSIZE]; sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid sdata[1] = n_size * m_size; // samplesize sdata[2] = gridDim.x * blockDim.x; // sample_offset const scalar_t one(1.); for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) { int lb = static_cast<int>(labels[i]); int n_idx = i / m_size; int m_idx = i % m_size; if (lb == ignore_index) { for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; grad_logits[idx] = scalar_t(0.f); } continue; } // compute max scalar_t max_with_lb(-10000.); scalar_t max_no_lb(-10000.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (val > max_with_lb) max_with_lb = val; if (j == lb) continue; if (val > max_no_lb) max_no_lb = val; } // compute sum of exp scalar_t sum_with_lb(0.); scalar_t sum_no_lb(0.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sum_with_lb += exp(val - max_with_lb); if (j == lb) continue; sum_no_lb += exp(val - max_no_lb); } // compute sum of qx scalar_t sum_qx(0.); for (int j{0}; j < dimsize; ++j) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sum_qx += val * exp(val - max_no_lb) / sum_no_lb; } // compute grads for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (lb == j) { grad_logits[idx] = exp(val - max_with_lb) / sum_with_lb - one; } else { grad_logits[idx] = exp(val - max_with_lb) / sum_with_lb + scalar_t(lam / 2.) * ((val + one - sum_qx) * exp(val - max_no_lb) / sum_no_lb - (one / (dimsize - 1))); } } } } // cuda forward and backward at::Tensor large_margin_forward_cuda(const at::Tensor &logits, const at::Tensor &labels, const int64_t ignore_index, const float lam) { // CHECK type and shape AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); const int n_size = logits.size(0); const int dimsize = logits.size(1); const int m_size = logits.numel() / (n_size * dimsize); const int samplesize = labels.numel(); // allocate memory and cuda grid/block auto losses = torch::empty_like(labels, logits.options()); if (losses.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return losses; } // call kernel if (dimsize < 32 && samplesize > 4096) { int gridx = std::max(std::min(4096, samplesize / BLOCKSIZE), 1); dim3 block(BLOCKSIZE); dim3 grid(gridx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "large margin forward", [&] { int shm_size = BLOCKSIZE * sizeof(scalar_t); SpatialLMarginLossForward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), losses.contiguous().data_ptr<scalar_t>(), ignore_index, lam ); }); } else { int blockx = 32; while (blockx < dimsize) blockx *= 2; blockx = std::max(std::min(BLOCKSIZE, blockx / 2), 32); int blocky = std::max(std::min(samplesize, BLOCKSIZE / blockx), 1); int gridx = std::max(std::min(4096, samplesize / blocky), 1); int n_shm = (blockx + 8) * blocky; dim3 block(blockx, blocky); dim3 grid(gridx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "large margin forward", [&] { int shm_size = n_shm * sizeof(scalar_t); LMarginLossForward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), losses.contiguous().data_ptr<scalar_t>(), ignore_index, lam ); }); } AT_CUDA_CHECK(cudaGetLastError()); return losses; } at::Tensor large_margin_backward_cuda(const at::Tensor &logits, const at::Tensor &labels, const int64_t ignore_index, const float lam) { // CHECK type and shape AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); const int n_size = logits.size(0); const int dimsize = logits.size(1); const int m_size = logits.numel() / (n_size * dimsize); const int samplesize = labels.numel(); // allocate memory and cuda grid/block auto grad_logits = torch::empty_like(logits); if (grad_logits.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_logits; } if (dimsize < 32 && samplesize > 4096) { int gridx = std::max(std::min(4096, samplesize / BLOCKSIZE), 1); dim3 block(BLOCKSIZE); dim3 grid(gridx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "large margin backwrd", [&] { int shm_size = BLOCKSIZE * sizeof(scalar_t); SpatialLMarginLossBackward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, grad_logits.contiguous().data_ptr<scalar_t>(), logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), ignore_index, lam ); }); } else { int blockx = 32; while (blockx < dimsize) blockx *= 2; blockx = std::max(std::min(BLOCKSIZE, blockx / 2), 32); int blocky = std::max(std::min(samplesize, BLOCKSIZE / blockx), 1); int gridx = std::max(std::min(4096, samplesize / blocky), 1); int n_shm = (blockx + 8) * blocky; dim3 block(blockx, blocky); dim3 grid(gridx); // call kernel AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "large margin backwrd", [&] { int shm_size = n_shm * sizeof(scalar_t); LMarginLossBackward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, grad_logits.contiguous().data_ptr<scalar_t>(), logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), ignore_index, lam ); }); } AT_CUDA_CHECK(cudaGetLastError()); return grad_logits; } // python inferface at::Tensor large_margin_forward(const at::Tensor &logits, const at::Tensor &labels, const float lam, const int64_t ignore_index) { if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) { AT_ERROR("this large margin loss only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return large_margin_forward_cuda(logits, labels, ignore_index, lam); } at::Tensor large_margin_backward(const at::Tensor &logits, const at::Tensor &labels, const float lam, const int64_t ignore_index) { // TODO: try AT_ASSERTM if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) { AT_ERROR("this large margin loss only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return large_margin_backward_cuda(logits, labels, ignore_index, lam); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("l_margin_forward", &large_margin_forward, "large margin forward"); m.def("l_margin_backward", &large_margin_backward, "large margin backward"); }
b3027ebd9044fda2389d40bce33380ce6a9a811d.hip
// !!! This is a file automatically generated by hipify!!! #include "vars.h" #include <hip/hip_runtime.h> extern int neuron; extern int layer; extern int batch; extern int input; extern float bias; extern int **csrdispl; extern INDPREC **csrindex; extern VALPREC **csrvalue; extern FEATPREC *currfeat; extern FEATPREC *nextfeat; extern int *active; extern int *categories; extern int *globalcategories; extern int myid; extern int numproc; extern int numthreads; extern int *numbatch; extern int *batchdispl; extern int mybatch; extern int extbatch; extern double timekernel; extern double timecopy; int **csrdispl_d; INDPREC **csrindex_d; VALPREC **csrvalue_d; int **buffdispl; int **mapdispl; int **warpdispl; MAPPREC **map; INDPREC **warpindex; VALPREC **warpvalue; int **buffdispl_d; int **mapdispl_d; int **warpdispl_d; MAPPREC *mapbuff_d; INDPREC *indbuff_d; VALPREC *valbuff_d;; #ifdef OUTOFCORE int weightsizemax; int mapsizemax; #ifdef OVERLAP MAPPREC *mapstream_d; INDPREC *indstream_d; VALPREC *valstream_d; #endif #else MAPPREC **map_d; INDPREC **warpindex_d; VALPREC **warpvalue_d; #endif FEATPREC *currfeat_d; FEATPREC *nextfeat_d; int *active_d; int *categories_d; int blocksize; int numblocks; int numwarp; int buffsize; hipEvent_t copystart, copystop; hipEvent_t kernelstart, kernelstop; hipStream_t copystream; hipStream_t kernelstream; float elapsedTime; __device__ float __ReLU(float x){ return x<0.0?0.0:x>32.0?32.0:x; }; __global__ void __launch_bounds__(256,4) dummy_kernel(FEATPREC *nextfeat, FEATPREC *currfeat, int buffsize, int *buffdispl, int *mapdispl, MAPPREC *map, int *displ, INDPREC *index, VALPREC *value, float bias , int neuron, int *categories, int *active){ extern __shared__ float shared[]; int wind = threadIdx.x%WARPSIZE; float reduce[MINIBATCH] = {0.0}; for(int buff = buffdispl[blockIdx.x]; buff < buffdispl[blockIdx.x+1]; buff++){ int mapnz = mapdispl[buff+1]-mapdispl[buff]; for(int n = threadIdx.x; n < mapnz; n += blockDim.x){ int ind = map[mapdispl[buff]+n]; for(int f = 0; f < MINIBATCH; f++) shared[f*buffsize+n] = currfeat[categories[blockIdx.y*MINIBATCH+f]*neuron+ind]; } __syncthreads(); int warp = (buff*blockDim.x+threadIdx.x)/WARPSIZE; for(int m = displ[warp]; m < displ[warp+1]; m++){ int ind = index[m*WARPSIZE+wind]; float val = value[m*WARPSIZE+wind]; for(int f = 0; f < MINIBATCH; f++) reduce[f] += shared[f*buffsize+ind]*val; } __syncthreads(); } int m = blockIdx.x*blockDim.x+threadIdx.x; for(int f = 0; f < MINIBATCH; f++) if(nextfeat[(blockIdx.y*MINIBATCH+f)*neuron+m]=__ReLU(reduce[f]+bias)) atomicAdd(active+blockIdx.y*MINIBATCH+f,1); }; void setup_gpu(){ hipSetDevice(myid%6); printf("myid %d mydevice %d\n",myid,myid%4); hipFuncSetAttribute(dummy_kernel,hipFuncAttributeMaxDynamicSharedMemorySize,98304); if(myid==0){ int deviceCount; hipGetDeviceCount(&deviceCount); printf("\n"); printf("Device Count: %d\n",deviceCount); int dev = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("Device %d name: %s\n",dev,deviceProp.name); printf("Computational Capabilities: %d, %d\n",deviceProp.major,deviceProp.minor); printf("Maximum global memory size: %lu\n",deviceProp.totalGlobalMem); printf("Maximum constant memory size: %lu\n",deviceProp.totalConstMem); printf("Maximum shared memory size per block: %lu\n",deviceProp.sharedMemPerBlock); printf("Maximum block dimensions: %dx%dx%d\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]); printf("Maximum grid dimensions: %dx%dx%d\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]); printf("Maximum threads per block: %d\n",deviceProp.maxThreadsPerBlock); printf("Warp size: %d\n",deviceProp.warpSize); printf("\n"); } hipEventCreate(&kernelstart); hipEventCreate(&kernelstop); hipEventCreate(&copystart); hipEventCreate(&copystop); hipStreamCreate(&copystream); hipStreamCreate(&kernelstream); char *chartemp; chartemp = getenv("BLOCKSIZE"); blocksize = atoi(chartemp); chartemp = getenv("BUFFER"); buffsize = atoi(chartemp)*1024/sizeof(float)/MINIBATCH; numblocks = neuron/blocksize; numwarp = blocksize/WARPSIZE; if(myid==0){ printf("MINIBATCH SIZE: %d\n",MINIBATCH); printf("BLOCK SIZE: %d\n",blocksize); printf("WARP SIZE: %d\n",WARPSIZE); printf("NUM BLOCKS: %d\n",numblocks); printf("NUMWARPS: %d\n",numwarp); printf("BUFFER SIZE: %d (%f KB) PER FEATURE: %d (%f KB)\n",buffsize*MINIBATCH,buffsize*sizeof(float)/1024.0*MINIBATCH,buffsize,buffsize*sizeof(float)/1024.0); printf("\n"); } preproc(); double memother = 0.0; hipHostMalloc((void**)&globalcategories,sizeof(int)*mybatch); hipHostMalloc((void**)&categories,sizeof(int)*mybatch); hipHostMalloc((void**)&active,sizeof(int)*mybatch); hipMalloc((void**)&active_d,sizeof(int)*extbatch); hipMalloc((void**)&categories_d,sizeof(int)*extbatch); memother += sizeof(int)*extbatch/1.0e9; memother += sizeof(int)*extbatch/1.0e9; for(int k = 0; k < mybatch; k++){ active[k] = neuron; categories[k] = k; globalcategories[k] = batchdispl[myid]+k; } hipMemset(active_d,0,sizeof(int)*extbatch); hipMemset(categories_d,0,sizeof(int)*extbatch); hipMemcpy(active_d,active,sizeof(int)*mybatch,hipMemcpyHostToDevice); hipMemcpy(categories_d,categories,sizeof(int)*mybatch,hipMemcpyHostToDevice); #ifdef OUTOFCORE if(myid==0)printf("OUT OF CORE IS ENABLED\n"); #ifdef OVERLAP if(myid==0)printf("OVERLAPPING IS ENABLED\n"); #else if(myid==0)printf("OVERLAPPING IS DISABLED\n"); #endif #else if(myid==0)printf("OUT OF CORE IS DISABLED\n"); #endif double memweight = 0.0; double memdispl = 0.0; double memmap = 0.0; buffdispl_d = new int*[layer]; mapdispl_d = new int*[layer]; warpdispl_d = new int*[layer]; #ifdef OUTOFCORE weightsizemax = 0; mapsizemax = 0; #else map_d = new MAPPREC*[layer]; warpindex_d = new INDPREC*[layer]; warpvalue_d = new VALPREC*[layer]; #endif for(int l = 0; l < layer; l++){ hipMalloc((void**)&buffdispl_d[l],sizeof(int)*(numblocks+1)); hipMalloc((void**)&mapdispl_d[l],sizeof(int)*(buffdispl[l][numblocks]+1)); hipMalloc((void**)&warpdispl_d[l],sizeof(int)*(buffdispl[l][numblocks]*numwarp+1)); memdispl += sizeof(int)*(numblocks+1)/1.0e9; memdispl += sizeof(int)*(buffdispl[l][numblocks]+1)/1.0e9; memdispl += sizeof(int)*(buffdispl[l][numblocks]*numwarp+1)/1.0e9; hipMemcpy(buffdispl_d[l],buffdispl[l],sizeof(int)*(numblocks+1),hipMemcpyHostToDevice); hipMemcpy(mapdispl_d[l],mapdispl[l],sizeof(int)*(buffdispl[l][numblocks]+1),hipMemcpyHostToDevice); hipMemcpy(warpdispl_d[l],warpdispl[l],sizeof(int)*(buffdispl[l][numblocks]*numwarp+1),hipMemcpyHostToDevice); #ifdef OUTOFCORE int mapsize = mapdispl[l][buffdispl[l][numblocks]]; if(mapsize > mapsizemax) mapsizemax = mapsize; int weightsize = warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; if(weightsize > weightsizemax) weightsizemax = weightsize; #else hipMalloc((void**)&map_d[l],sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]])); hipMalloc((void**)&warpindex_d[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE); hipMalloc((void**)&warpvalue_d[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE); memmap += sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]])/1.0e9; memweight += sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE/1.0e9; memweight += sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE/1.0e9; hipMemcpy(map_d[l],map[l],sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]]),hipMemcpyHostToDevice); hipMemcpy(warpindex_d[l],warpindex[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice); hipMemcpy(warpvalue_d[l],warpvalue[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice); #endif } #ifdef OUTOFCORE if(myid==0)printf("\n"); if(myid==0)printf("mapsizemax: %d (%f KB)\n",mapsizemax,sizeof(MAPPREC)*mapsizemax/1.0e6); if(myid==0)printf("weightsizemax: %d (%f KB)\n",weightsizemax,(sizeof(INDPREC)+sizeof(VALPREC))*weightsizemax/1.0e6); #ifdef OVERLAP hipMalloc((void**)&mapstream_d,sizeof(MAPPREC)*mapsizemax*2); hipMalloc((void**)&indstream_d,sizeof(INDPREC)*weightsizemax*2); hipMalloc((void**)&valstream_d,sizeof(VALPREC)*weightsizemax*2); memmap += 2*sizeof(MAPPREC)*mapsizemax/1.0e9; memweight += 2*sizeof(INDPREC)*weightsizemax/1.0e9; memweight += 2*sizeof(VALPREC)*weightsizemax/1.0e9; hipMemcpy(mapstream_d,map[0],sizeof(MAPPREC)*mapdispl[0][buffdispl[0][numblocks]],hipMemcpyHostToDevice); hipMemcpy(indstream_d,warpindex[0],sizeof(INDPREC)*warpdispl[0][buffdispl[0][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice); hipMemcpy(valstream_d,warpvalue[0],sizeof(VALPREC)*warpdispl[0][buffdispl[0][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice); #else hipMalloc((void**)&mapbuff_d,sizeof(MAPPREC)*mapsizemax); hipMalloc((void**)&indbuff_d,sizeof(INDPREC)*weightsizemax); hipMalloc((void**)&valbuff_d,sizeof(VALPREC)*weightsizemax); memmap += sizeof(MAPPREC)*mapsizemax/1.0e9; memweight += sizeof(INDPREC)*weightsizemax/1.0e9; memweight += sizeof(VALPREC)*weightsizemax/1.0e9; #endif #endif double memfeat = 0.0; hipMalloc((void**)&currfeat_d,sizeof(FEATPREC)*extbatch*neuron); hipMalloc((void**)&nextfeat_d,sizeof(FEATPREC)*extbatch*neuron); memfeat += sizeof(FEATPREC)*extbatch*neuron/1.0e9; memfeat += sizeof(FEATPREC)*extbatch*neuron/1.0e9; hipMemset(currfeat_d,0,sizeof(FEATPREC)*extbatch*neuron); hipMemset(nextfeat_d,0,sizeof(FEATPREC)*extbatch*neuron); hipMemcpy(currfeat_d,currfeat,sizeof(FEATPREC)*mybatch*neuron,hipMemcpyHostToDevice); double memothers[numproc]; double memweights[numproc]; double memdispls[numproc]; double memmaps[numproc]; double memfeats[numproc]; MPI_Allgather(&memother,1,MPI_DOUBLE,memothers,1,MPI_DOUBLE,MPI_COMM_WORLD); MPI_Allgather(&memweight,1,MPI_DOUBLE,memweights,1,MPI_DOUBLE,MPI_COMM_WORLD); MPI_Allgather(&memdispl,1,MPI_DOUBLE,memdispls,1,MPI_DOUBLE,MPI_COMM_WORLD); MPI_Allgather(&memmap,1,MPI_DOUBLE,memmaps,1,MPI_DOUBLE,MPI_COMM_WORLD); MPI_Allgather(&memfeat,1,MPI_DOUBLE,memfeats,1,MPI_DOUBLE,MPI_COMM_WORLD); if(myid==0){ double memmax = 0.0; printf("\n"); for(int p = 0; p < numproc; p++){ double memtot = memdispls[p]+memmaps[p]+memweights[p]+memfeats[p]; printf("GPU %d: OTHERS: %f DISPLS: %f MAPS: %f WEIGHTS: %f FEATURES: %f TOTAL: %f GB\n",p,memothers[p],memdispls[p],memmaps[p],memweights[p],memfeats[p],memtot); if(memtot>memmax)memmax=memtot; } printf("MAX GPU MEM: %f GB\n",memmax); } } /* Simultaneously launch the kernel and copy weights for the next layer. Two streams: kernelStream and copyStream. kernelStream contains the kernel, as well as the associated memset, and bookkeeping operations copyStream just has the copy operations for the next layer use copyStart / copyStop events to time the stream, and start/stop events to time the kernel */ void infer_gpu(int l){ /* if OUTOFCORE and OVERLAP, point at the right part of the double-buffer to get the weights from the previous iteration if OUTOFCORE and !OVERLAP, copy arguments into the kernel otherwise, just get the right layer pointers */ #ifdef OUTOFCORE #ifdef OVERLAP mapbuff_d = mapstream_d+(l%2)*mapsizemax; indbuff_d = indstream_d+(l%2)*weightsizemax; valbuff_d = valstream_d+(l%2)*weightsizemax; hipStreamSynchronize(copystream); #else hipEventRecord(copystart,kernelstream); int weightsize = warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; hipMemcpyAsync(indbuff_d,warpindex[l],sizeof(INDPREC)*weightsize,hipMemcpyHostToDevice,kernelstream); hipMemcpyAsync(valbuff_d,warpvalue[l],sizeof(VALPREC)*weightsize,hipMemcpyHostToDevice,kernelstream); int mapsize = mapdispl[l][buffdispl[l][numblocks]]; hipMemcpyAsync(mapbuff_d,map[l],sizeof(MAPPREC)*mapsize,hipMemcpyHostToDevice,kernelstream); hipEventRecord(copystop,kernelstream); #endif #else mapbuff_d = map_d[l]; indbuff_d = warpindex_d[l]; valbuff_d = warpvalue_d[l]; #endif dim3 block(blocksize); dim3 grid(numblocks,(mybatch+MINIBATCH-1)/MINIBATCH); // initialize active features in the batch hipMemsetAsync(active_d,0,sizeof(int)*mybatch,kernelstream); hipEventRecord(kernelstart,kernelstream); hipLaunchKernelGGL(( dummy_kernel), dim3(grid),dim3(block),sizeof(float)*buffsize*MINIBATCH,kernelstream, nextfeat_d,currfeat_d,buffsize,buffdispl_d[l],mapdispl_d[l],mapbuff_d,warpdispl_d[l],indbuff_d,valbuff_d,bias,neuron,categories_d,active_d); hipEventRecord(kernelstop,kernelstream); hipMemcpyAsync(active,active_d,sizeof(int)*mybatch,hipMemcpyDeviceToHost,kernelstream); #ifdef OUTOFCORE #ifdef OVERLAP if(l+1 < layer){ hipMemcpyAsync(mapstream_d+((l+1)%2)*mapsizemax,map[l+1],sizeof(MAPPREC)*mapdispl[l+1][buffdispl[l+1][numblocks]],hipMemcpyHostToDevice,copystream); hipMemcpyAsync(indstream_d+((l+1)%2)*weightsizemax,warpindex[l+1],sizeof(INDPREC)*warpdispl[l+1][buffdispl[l+1][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice,copystream); hipMemcpyAsync(valstream_d+((l+1)%2)*weightsizemax,warpvalue[l+1],sizeof(VALPREC)*warpdispl[l+1][buffdispl[l+1][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice,copystream); } #else hipEventElapsedTime(&elapsedTime,copystart,copystop); timecopy += elapsedTime/1.0e3; #endif #endif hipStreamSynchronize(kernelstream); int feature = 0; for(int k = 0; k < mybatch; k++) if(active[k]){ globalcategories[feature] = globalcategories[k]; categories[feature] = k; feature++; } mybatch = feature; hipMemcpyAsync(categories_d,categories,sizeof(int)*feature,hipMemcpyHostToDevice,kernelstream); hipEventElapsedTime(&elapsedTime,kernelstart,kernelstop); timekernel += elapsedTime/1.0e3; FEATPREC *tempfeat_d = currfeat_d; currfeat_d = nextfeat_d; nextfeat_d = tempfeat_d; //int allfeature = 0; //MPI_Allreduce(&feature,&allfeature,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); //if(myid==0)printf("layer %d features %d\n",l,allfeature); }; void preproc(){ buffdispl = new int*[layer]; mapdispl = new int*[layer]; warpdispl = new int*[layer]; map = new MAPPREC*[layer]; warpindex = new INDPREC*[layer]; warpvalue = new VALPREC*[layer]; int totbuff = 0; int totmapnz = 0; int totwarpnz = 0; int *temptag = new int[neuron*numthreads]; for(int l = 0; l < layer; l++){ //if(myid==0)printf("preprocessing layer %d\n",l); int *numbuff = new int[numblocks]; buffdispl[l] = new int[numblocks+1]; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++){ if(temp[n]) footprint++; } numbuff[b] = (footprint+buffsize-1)/buffsize; } buffdispl[l][0] = 0; for(int b = 0; b < numblocks; b++) buffdispl[l][b+1] = buffdispl[l][b]+numbuff[b]; totbuff += buffdispl[l][numblocks]; int *warpnz = new int[buffdispl[l][numblocks]*numwarp]; #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]*numwarp; n++) warpnz[n] = 0; int *mapnz = new int[buffdispl[l][numblocks]]; #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]; n++) mapnz[n] = 0; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++) if(temp[n]){ int buff = footprint/buffsize; mapnz[buffdispl[l][b]+buff]++; temp[n] = buff; footprint++; } for(int buff = 0; buff < numbuff[b]; buff++) for(int warp = 0; warp < numwarp; warp++){ int tempnz[WARPSIZE] = {0}; for(int t = 0; t < WARPSIZE; t++) for(int n = csrdispl[l][b*blocksize+warp*WARPSIZE+t]; n < csrdispl[l][b*blocksize+warp*WARPSIZE+t+1]; n++) if(temp[csrindex[l][n]]==buff) tempnz[t]++; int warpmax = 0; for(int t = 0; t < WARPSIZE; t++) if(tempnz[t]>warpmax) warpmax = tempnz[t]; warpnz[(buffdispl[l][b]+buff)*numwarp+warp] = warpmax; } } warpdispl[l] = new int[buffdispl[l][numblocks]*numwarp+1]; warpdispl[l][0] = 0; for(int warp = 0; warp < buffdispl[l][numblocks]*numwarp; warp++) warpdispl[l][warp+1] = warpdispl[l][warp]+warpnz[warp]; totwarpnz += warpdispl[l][buffdispl[l][numblocks]*numwarp]; hipHostMalloc((void**)&warpindex[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE); hipHostMalloc((void**)&warpvalue[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE); #pragma omp parallel for for(int n = 0; n < warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; n++){ warpindex[l][n] = 0; warpvalue[l][n] = 0.0; } mapdispl[l] = new int[buffdispl[l][numblocks]+1]; mapdispl[l][0] = 0; for(int buff = 0; buff < buffdispl[l][numblocks]; buff++) mapdispl[l][buff+1] = mapdispl[l][buff] + mapnz[buff]; totmapnz += mapdispl[l][buffdispl[l][numblocks]]; hipHostMalloc((void**)&map[l],sizeof(MAPPREC)*mapdispl[l][buffdispl[l][numblocks]]); #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]; n++) mapnz[n] = 0; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++) if(temp[n]){ int buff = footprint/buffsize; map[l][mapdispl[l][buffdispl[l][b]+buff]+mapnz[buffdispl[l][b]+buff]] = n; mapnz[buffdispl[l][b]+buff]++; temp[n] = footprint; footprint++; } for(int buff = 0; buff < numbuff[b]; buff++) for(int warp = 0; warp < numwarp; warp++){ int tempnz[WARPSIZE] = {0}; for(int t = 0; t < WARPSIZE; t++) for(int n = csrdispl[l][b*blocksize+warp*WARPSIZE+t]; n < csrdispl[l][b*blocksize+warp*WARPSIZE+t+1]; n++) if(temp[csrindex[l][n]]/buffsize==buff){ int ind = (warpdispl[l][(buffdispl[l][b]+buff)*numwarp+warp]+tempnz[t])*WARPSIZE+t; warpindex[l][ind] = temp[csrindex[l][n]]%buffsize; warpvalue[l][ind] = csrvalue[l][n]; tempnz[t]++; } } } delete[] numbuff; delete[] mapnz; delete[] warpnz; delete[] csrdispl[l]; delete[] csrindex[l]; delete[] csrvalue[l]; } delete[] temptag; delete[] csrdispl; delete[] csrindex; delete[] csrvalue; if(myid==0)printf("total buffers: %d (%f per block)\n",totbuff,totbuff/(float)layer/numblocks); if(myid==0)printf("total map: %d (%f per block)\n",totmapnz,totmapnz/(float)layer/numblocks); if(myid==0)printf("total warpnz: %d (%f per buffer)\n",totwarpnz,totwarpnz/(float)totbuff); if(myid==0)printf("iefficiency: %f\n",totwarpnz*(float)WARPSIZE/(layer*(float)neuron*32)); if(myid==0)printf("\n"); /*if(myid==0) for(int l = 0; l < 5; l++) for(int buff = 0; buff < 15; buff++) for(int warp = 0; warp < numwarp; warp++){ int nz = warpdispl[l][buff*numwarp+warp+1]-warpdispl[l][buff*numwarp+warp]; printf("Layer %d buff %d warp %d nz %d\n",l,buff,buff*numwarp+warp,nz); for(int m = warpdispl[l][buff*numwarp+warp]; m < warpdispl[l][buff*numwarp+warp+1]; m++){ for(int t = 0; t < WARPSIZE; t++) printf("%e ",__half2float(warpvalue[l][m*WARPSIZE+t])); printf("\n"); } }*/ };
b3027ebd9044fda2389d40bce33380ce6a9a811d.cu
#include "vars.h" #include <cuda.h> extern int neuron; extern int layer; extern int batch; extern int input; extern float bias; extern int **csrdispl; extern INDPREC **csrindex; extern VALPREC **csrvalue; extern FEATPREC *currfeat; extern FEATPREC *nextfeat; extern int *active; extern int *categories; extern int *globalcategories; extern int myid; extern int numproc; extern int numthreads; extern int *numbatch; extern int *batchdispl; extern int mybatch; extern int extbatch; extern double timekernel; extern double timecopy; int **csrdispl_d; INDPREC **csrindex_d; VALPREC **csrvalue_d; int **buffdispl; int **mapdispl; int **warpdispl; MAPPREC **map; INDPREC **warpindex; VALPREC **warpvalue; int **buffdispl_d; int **mapdispl_d; int **warpdispl_d; MAPPREC *mapbuff_d; INDPREC *indbuff_d; VALPREC *valbuff_d;; #ifdef OUTOFCORE int weightsizemax; int mapsizemax; #ifdef OVERLAP MAPPREC *mapstream_d; INDPREC *indstream_d; VALPREC *valstream_d; #endif #else MAPPREC **map_d; INDPREC **warpindex_d; VALPREC **warpvalue_d; #endif FEATPREC *currfeat_d; FEATPREC *nextfeat_d; int *active_d; int *categories_d; int blocksize; int numblocks; int numwarp; int buffsize; cudaEvent_t copystart, copystop; cudaEvent_t kernelstart, kernelstop; cudaStream_t copystream; cudaStream_t kernelstream; float elapsedTime; __device__ float __ReLU(float x){ return x<0.0?0.0:x>32.0?32.0:x; }; __global__ void __launch_bounds__(256,4) dummy_kernel(FEATPREC *nextfeat, FEATPREC *currfeat, int buffsize, int *buffdispl, int *mapdispl, MAPPREC *map, int *displ, INDPREC *index, VALPREC *value, float bias , int neuron, int *categories, int *active){ extern __shared__ float shared[]; int wind = threadIdx.x%WARPSIZE; float reduce[MINIBATCH] = {0.0}; for(int buff = buffdispl[blockIdx.x]; buff < buffdispl[blockIdx.x+1]; buff++){ int mapnz = mapdispl[buff+1]-mapdispl[buff]; for(int n = threadIdx.x; n < mapnz; n += blockDim.x){ int ind = map[mapdispl[buff]+n]; for(int f = 0; f < MINIBATCH; f++) shared[f*buffsize+n] = currfeat[categories[blockIdx.y*MINIBATCH+f]*neuron+ind]; } __syncthreads(); int warp = (buff*blockDim.x+threadIdx.x)/WARPSIZE; for(int m = displ[warp]; m < displ[warp+1]; m++){ int ind = index[m*WARPSIZE+wind]; float val = value[m*WARPSIZE+wind]; for(int f = 0; f < MINIBATCH; f++) reduce[f] += shared[f*buffsize+ind]*val; } __syncthreads(); } int m = blockIdx.x*blockDim.x+threadIdx.x; for(int f = 0; f < MINIBATCH; f++) if(nextfeat[(blockIdx.y*MINIBATCH+f)*neuron+m]=__ReLU(reduce[f]+bias)) atomicAdd(active+blockIdx.y*MINIBATCH+f,1); }; void setup_gpu(){ cudaSetDevice(myid%6); printf("myid %d mydevice %d\n",myid,myid%4); cudaFuncSetAttribute(dummy_kernel,cudaFuncAttributeMaxDynamicSharedMemorySize,98304); if(myid==0){ int deviceCount; cudaGetDeviceCount(&deviceCount); printf("\n"); printf("Device Count: %d\n",deviceCount); int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Device %d name: %s\n",dev,deviceProp.name); printf("Computational Capabilities: %d, %d\n",deviceProp.major,deviceProp.minor); printf("Maximum global memory size: %lu\n",deviceProp.totalGlobalMem); printf("Maximum constant memory size: %lu\n",deviceProp.totalConstMem); printf("Maximum shared memory size per block: %lu\n",deviceProp.sharedMemPerBlock); printf("Maximum block dimensions: %dx%dx%d\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]); printf("Maximum grid dimensions: %dx%dx%d\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]); printf("Maximum threads per block: %d\n",deviceProp.maxThreadsPerBlock); printf("Warp size: %d\n",deviceProp.warpSize); printf("\n"); } cudaEventCreate(&kernelstart); cudaEventCreate(&kernelstop); cudaEventCreate(&copystart); cudaEventCreate(&copystop); cudaStreamCreate(&copystream); cudaStreamCreate(&kernelstream); char *chartemp; chartemp = getenv("BLOCKSIZE"); blocksize = atoi(chartemp); chartemp = getenv("BUFFER"); buffsize = atoi(chartemp)*1024/sizeof(float)/MINIBATCH; numblocks = neuron/blocksize; numwarp = blocksize/WARPSIZE; if(myid==0){ printf("MINIBATCH SIZE: %d\n",MINIBATCH); printf("BLOCK SIZE: %d\n",blocksize); printf("WARP SIZE: %d\n",WARPSIZE); printf("NUM BLOCKS: %d\n",numblocks); printf("NUMWARPS: %d\n",numwarp); printf("BUFFER SIZE: %d (%f KB) PER FEATURE: %d (%f KB)\n",buffsize*MINIBATCH,buffsize*sizeof(float)/1024.0*MINIBATCH,buffsize,buffsize*sizeof(float)/1024.0); printf("\n"); } preproc(); double memother = 0.0; cudaMallocHost((void**)&globalcategories,sizeof(int)*mybatch); cudaMallocHost((void**)&categories,sizeof(int)*mybatch); cudaMallocHost((void**)&active,sizeof(int)*mybatch); cudaMalloc((void**)&active_d,sizeof(int)*extbatch); cudaMalloc((void**)&categories_d,sizeof(int)*extbatch); memother += sizeof(int)*extbatch/1.0e9; memother += sizeof(int)*extbatch/1.0e9; for(int k = 0; k < mybatch; k++){ active[k] = neuron; categories[k] = k; globalcategories[k] = batchdispl[myid]+k; } cudaMemset(active_d,0,sizeof(int)*extbatch); cudaMemset(categories_d,0,sizeof(int)*extbatch); cudaMemcpy(active_d,active,sizeof(int)*mybatch,cudaMemcpyHostToDevice); cudaMemcpy(categories_d,categories,sizeof(int)*mybatch,cudaMemcpyHostToDevice); #ifdef OUTOFCORE if(myid==0)printf("OUT OF CORE IS ENABLED\n"); #ifdef OVERLAP if(myid==0)printf("OVERLAPPING IS ENABLED\n"); #else if(myid==0)printf("OVERLAPPING IS DISABLED\n"); #endif #else if(myid==0)printf("OUT OF CORE IS DISABLED\n"); #endif double memweight = 0.0; double memdispl = 0.0; double memmap = 0.0; buffdispl_d = new int*[layer]; mapdispl_d = new int*[layer]; warpdispl_d = new int*[layer]; #ifdef OUTOFCORE weightsizemax = 0; mapsizemax = 0; #else map_d = new MAPPREC*[layer]; warpindex_d = new INDPREC*[layer]; warpvalue_d = new VALPREC*[layer]; #endif for(int l = 0; l < layer; l++){ cudaMalloc((void**)&buffdispl_d[l],sizeof(int)*(numblocks+1)); cudaMalloc((void**)&mapdispl_d[l],sizeof(int)*(buffdispl[l][numblocks]+1)); cudaMalloc((void**)&warpdispl_d[l],sizeof(int)*(buffdispl[l][numblocks]*numwarp+1)); memdispl += sizeof(int)*(numblocks+1)/1.0e9; memdispl += sizeof(int)*(buffdispl[l][numblocks]+1)/1.0e9; memdispl += sizeof(int)*(buffdispl[l][numblocks]*numwarp+1)/1.0e9; cudaMemcpy(buffdispl_d[l],buffdispl[l],sizeof(int)*(numblocks+1),cudaMemcpyHostToDevice); cudaMemcpy(mapdispl_d[l],mapdispl[l],sizeof(int)*(buffdispl[l][numblocks]+1),cudaMemcpyHostToDevice); cudaMemcpy(warpdispl_d[l],warpdispl[l],sizeof(int)*(buffdispl[l][numblocks]*numwarp+1),cudaMemcpyHostToDevice); #ifdef OUTOFCORE int mapsize = mapdispl[l][buffdispl[l][numblocks]]; if(mapsize > mapsizemax) mapsizemax = mapsize; int weightsize = warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; if(weightsize > weightsizemax) weightsizemax = weightsize; #else cudaMalloc((void**)&map_d[l],sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]])); cudaMalloc((void**)&warpindex_d[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE); cudaMalloc((void**)&warpvalue_d[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE); memmap += sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]])/1.0e9; memweight += sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE/1.0e9; memweight += sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE/1.0e9; cudaMemcpy(map_d[l],map[l],sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]]),cudaMemcpyHostToDevice); cudaMemcpy(warpindex_d[l],warpindex[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice); cudaMemcpy(warpvalue_d[l],warpvalue[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice); #endif } #ifdef OUTOFCORE if(myid==0)printf("\n"); if(myid==0)printf("mapsizemax: %d (%f KB)\n",mapsizemax,sizeof(MAPPREC)*mapsizemax/1.0e6); if(myid==0)printf("weightsizemax: %d (%f KB)\n",weightsizemax,(sizeof(INDPREC)+sizeof(VALPREC))*weightsizemax/1.0e6); #ifdef OVERLAP cudaMalloc((void**)&mapstream_d,sizeof(MAPPREC)*mapsizemax*2); cudaMalloc((void**)&indstream_d,sizeof(INDPREC)*weightsizemax*2); cudaMalloc((void**)&valstream_d,sizeof(VALPREC)*weightsizemax*2); memmap += 2*sizeof(MAPPREC)*mapsizemax/1.0e9; memweight += 2*sizeof(INDPREC)*weightsizemax/1.0e9; memweight += 2*sizeof(VALPREC)*weightsizemax/1.0e9; cudaMemcpy(mapstream_d,map[0],sizeof(MAPPREC)*mapdispl[0][buffdispl[0][numblocks]],cudaMemcpyHostToDevice); cudaMemcpy(indstream_d,warpindex[0],sizeof(INDPREC)*warpdispl[0][buffdispl[0][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice); cudaMemcpy(valstream_d,warpvalue[0],sizeof(VALPREC)*warpdispl[0][buffdispl[0][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice); #else cudaMalloc((void**)&mapbuff_d,sizeof(MAPPREC)*mapsizemax); cudaMalloc((void**)&indbuff_d,sizeof(INDPREC)*weightsizemax); cudaMalloc((void**)&valbuff_d,sizeof(VALPREC)*weightsizemax); memmap += sizeof(MAPPREC)*mapsizemax/1.0e9; memweight += sizeof(INDPREC)*weightsizemax/1.0e9; memweight += sizeof(VALPREC)*weightsizemax/1.0e9; #endif #endif double memfeat = 0.0; cudaMalloc((void**)&currfeat_d,sizeof(FEATPREC)*extbatch*neuron); cudaMalloc((void**)&nextfeat_d,sizeof(FEATPREC)*extbatch*neuron); memfeat += sizeof(FEATPREC)*extbatch*neuron/1.0e9; memfeat += sizeof(FEATPREC)*extbatch*neuron/1.0e9; cudaMemset(currfeat_d,0,sizeof(FEATPREC)*extbatch*neuron); cudaMemset(nextfeat_d,0,sizeof(FEATPREC)*extbatch*neuron); cudaMemcpy(currfeat_d,currfeat,sizeof(FEATPREC)*mybatch*neuron,cudaMemcpyHostToDevice); double memothers[numproc]; double memweights[numproc]; double memdispls[numproc]; double memmaps[numproc]; double memfeats[numproc]; MPI_Allgather(&memother,1,MPI_DOUBLE,memothers,1,MPI_DOUBLE,MPI_COMM_WORLD); MPI_Allgather(&memweight,1,MPI_DOUBLE,memweights,1,MPI_DOUBLE,MPI_COMM_WORLD); MPI_Allgather(&memdispl,1,MPI_DOUBLE,memdispls,1,MPI_DOUBLE,MPI_COMM_WORLD); MPI_Allgather(&memmap,1,MPI_DOUBLE,memmaps,1,MPI_DOUBLE,MPI_COMM_WORLD); MPI_Allgather(&memfeat,1,MPI_DOUBLE,memfeats,1,MPI_DOUBLE,MPI_COMM_WORLD); if(myid==0){ double memmax = 0.0; printf("\n"); for(int p = 0; p < numproc; p++){ double memtot = memdispls[p]+memmaps[p]+memweights[p]+memfeats[p]; printf("GPU %d: OTHERS: %f DISPLS: %f MAPS: %f WEIGHTS: %f FEATURES: %f TOTAL: %f GB\n",p,memothers[p],memdispls[p],memmaps[p],memweights[p],memfeats[p],memtot); if(memtot>memmax)memmax=memtot; } printf("MAX GPU MEM: %f GB\n",memmax); } } /* Simultaneously launch the kernel and copy weights for the next layer. Two streams: kernelStream and copyStream. kernelStream contains the kernel, as well as the associated memset, and bookkeeping operations copyStream just has the copy operations for the next layer use copyStart / copyStop events to time the stream, and start/stop events to time the kernel */ void infer_gpu(int l){ /* if OUTOFCORE and OVERLAP, point at the right part of the double-buffer to get the weights from the previous iteration if OUTOFCORE and !OVERLAP, copy arguments into the kernel otherwise, just get the right layer pointers */ #ifdef OUTOFCORE #ifdef OVERLAP mapbuff_d = mapstream_d+(l%2)*mapsizemax; indbuff_d = indstream_d+(l%2)*weightsizemax; valbuff_d = valstream_d+(l%2)*weightsizemax; cudaStreamSynchronize(copystream); #else cudaEventRecord(copystart,kernelstream); int weightsize = warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; cudaMemcpyAsync(indbuff_d,warpindex[l],sizeof(INDPREC)*weightsize,cudaMemcpyHostToDevice,kernelstream); cudaMemcpyAsync(valbuff_d,warpvalue[l],sizeof(VALPREC)*weightsize,cudaMemcpyHostToDevice,kernelstream); int mapsize = mapdispl[l][buffdispl[l][numblocks]]; cudaMemcpyAsync(mapbuff_d,map[l],sizeof(MAPPREC)*mapsize,cudaMemcpyHostToDevice,kernelstream); cudaEventRecord(copystop,kernelstream); #endif #else mapbuff_d = map_d[l]; indbuff_d = warpindex_d[l]; valbuff_d = warpvalue_d[l]; #endif dim3 block(blocksize); dim3 grid(numblocks,(mybatch+MINIBATCH-1)/MINIBATCH); // initialize active features in the batch cudaMemsetAsync(active_d,0,sizeof(int)*mybatch,kernelstream); cudaEventRecord(kernelstart,kernelstream); dummy_kernel<<<grid,block,sizeof(float)*buffsize*MINIBATCH,kernelstream>>>(nextfeat_d,currfeat_d,buffsize,buffdispl_d[l],mapdispl_d[l],mapbuff_d,warpdispl_d[l],indbuff_d,valbuff_d,bias,neuron,categories_d,active_d); cudaEventRecord(kernelstop,kernelstream); cudaMemcpyAsync(active,active_d,sizeof(int)*mybatch,cudaMemcpyDeviceToHost,kernelstream); #ifdef OUTOFCORE #ifdef OVERLAP if(l+1 < layer){ cudaMemcpyAsync(mapstream_d+((l+1)%2)*mapsizemax,map[l+1],sizeof(MAPPREC)*mapdispl[l+1][buffdispl[l+1][numblocks]],cudaMemcpyHostToDevice,copystream); cudaMemcpyAsync(indstream_d+((l+1)%2)*weightsizemax,warpindex[l+1],sizeof(INDPREC)*warpdispl[l+1][buffdispl[l+1][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice,copystream); cudaMemcpyAsync(valstream_d+((l+1)%2)*weightsizemax,warpvalue[l+1],sizeof(VALPREC)*warpdispl[l+1][buffdispl[l+1][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice,copystream); } #else cudaEventElapsedTime(&elapsedTime,copystart,copystop); timecopy += elapsedTime/1.0e3; #endif #endif cudaStreamSynchronize(kernelstream); int feature = 0; for(int k = 0; k < mybatch; k++) if(active[k]){ globalcategories[feature] = globalcategories[k]; categories[feature] = k; feature++; } mybatch = feature; cudaMemcpyAsync(categories_d,categories,sizeof(int)*feature,cudaMemcpyHostToDevice,kernelstream); cudaEventElapsedTime(&elapsedTime,kernelstart,kernelstop); timekernel += elapsedTime/1.0e3; FEATPREC *tempfeat_d = currfeat_d; currfeat_d = nextfeat_d; nextfeat_d = tempfeat_d; //int allfeature = 0; //MPI_Allreduce(&feature,&allfeature,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); //if(myid==0)printf("layer %d features %d\n",l,allfeature); }; void preproc(){ buffdispl = new int*[layer]; mapdispl = new int*[layer]; warpdispl = new int*[layer]; map = new MAPPREC*[layer]; warpindex = new INDPREC*[layer]; warpvalue = new VALPREC*[layer]; int totbuff = 0; int totmapnz = 0; int totwarpnz = 0; int *temptag = new int[neuron*numthreads]; for(int l = 0; l < layer; l++){ //if(myid==0)printf("preprocessing layer %d\n",l); int *numbuff = new int[numblocks]; buffdispl[l] = new int[numblocks+1]; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++){ if(temp[n]) footprint++; } numbuff[b] = (footprint+buffsize-1)/buffsize; } buffdispl[l][0] = 0; for(int b = 0; b < numblocks; b++) buffdispl[l][b+1] = buffdispl[l][b]+numbuff[b]; totbuff += buffdispl[l][numblocks]; int *warpnz = new int[buffdispl[l][numblocks]*numwarp]; #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]*numwarp; n++) warpnz[n] = 0; int *mapnz = new int[buffdispl[l][numblocks]]; #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]; n++) mapnz[n] = 0; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++) if(temp[n]){ int buff = footprint/buffsize; mapnz[buffdispl[l][b]+buff]++; temp[n] = buff; footprint++; } for(int buff = 0; buff < numbuff[b]; buff++) for(int warp = 0; warp < numwarp; warp++){ int tempnz[WARPSIZE] = {0}; for(int t = 0; t < WARPSIZE; t++) for(int n = csrdispl[l][b*blocksize+warp*WARPSIZE+t]; n < csrdispl[l][b*blocksize+warp*WARPSIZE+t+1]; n++) if(temp[csrindex[l][n]]==buff) tempnz[t]++; int warpmax = 0; for(int t = 0; t < WARPSIZE; t++) if(tempnz[t]>warpmax) warpmax = tempnz[t]; warpnz[(buffdispl[l][b]+buff)*numwarp+warp] = warpmax; } } warpdispl[l] = new int[buffdispl[l][numblocks]*numwarp+1]; warpdispl[l][0] = 0; for(int warp = 0; warp < buffdispl[l][numblocks]*numwarp; warp++) warpdispl[l][warp+1] = warpdispl[l][warp]+warpnz[warp]; totwarpnz += warpdispl[l][buffdispl[l][numblocks]*numwarp]; cudaMallocHost((void**)&warpindex[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE); cudaMallocHost((void**)&warpvalue[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE); #pragma omp parallel for for(int n = 0; n < warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; n++){ warpindex[l][n] = 0; warpvalue[l][n] = 0.0; } mapdispl[l] = new int[buffdispl[l][numblocks]+1]; mapdispl[l][0] = 0; for(int buff = 0; buff < buffdispl[l][numblocks]; buff++) mapdispl[l][buff+1] = mapdispl[l][buff] + mapnz[buff]; totmapnz += mapdispl[l][buffdispl[l][numblocks]]; cudaMallocHost((void**)&map[l],sizeof(MAPPREC)*mapdispl[l][buffdispl[l][numblocks]]); #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]; n++) mapnz[n] = 0; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++) if(temp[n]){ int buff = footprint/buffsize; map[l][mapdispl[l][buffdispl[l][b]+buff]+mapnz[buffdispl[l][b]+buff]] = n; mapnz[buffdispl[l][b]+buff]++; temp[n] = footprint; footprint++; } for(int buff = 0; buff < numbuff[b]; buff++) for(int warp = 0; warp < numwarp; warp++){ int tempnz[WARPSIZE] = {0}; for(int t = 0; t < WARPSIZE; t++) for(int n = csrdispl[l][b*blocksize+warp*WARPSIZE+t]; n < csrdispl[l][b*blocksize+warp*WARPSIZE+t+1]; n++) if(temp[csrindex[l][n]]/buffsize==buff){ int ind = (warpdispl[l][(buffdispl[l][b]+buff)*numwarp+warp]+tempnz[t])*WARPSIZE+t; warpindex[l][ind] = temp[csrindex[l][n]]%buffsize; warpvalue[l][ind] = csrvalue[l][n]; tempnz[t]++; } } } delete[] numbuff; delete[] mapnz; delete[] warpnz; delete[] csrdispl[l]; delete[] csrindex[l]; delete[] csrvalue[l]; } delete[] temptag; delete[] csrdispl; delete[] csrindex; delete[] csrvalue; if(myid==0)printf("total buffers: %d (%f per block)\n",totbuff,totbuff/(float)layer/numblocks); if(myid==0)printf("total map: %d (%f per block)\n",totmapnz,totmapnz/(float)layer/numblocks); if(myid==0)printf("total warpnz: %d (%f per buffer)\n",totwarpnz,totwarpnz/(float)totbuff); if(myid==0)printf("iefficiency: %f\n",totwarpnz*(float)WARPSIZE/(layer*(float)neuron*32)); if(myid==0)printf("\n"); /*if(myid==0) for(int l = 0; l < 5; l++) for(int buff = 0; buff < 15; buff++) for(int warp = 0; warp < numwarp; warp++){ int nz = warpdispl[l][buff*numwarp+warp+1]-warpdispl[l][buff*numwarp+warp]; printf("Layer %d buff %d warp %d nz %d\n",l,buff,buff*numwarp+warp,nz); for(int m = warpdispl[l][buff*numwarp+warp]; m < warpdispl[l][buff*numwarp+warp+1]; m++){ for(int t = 0; t < WARPSIZE; t++) printf("%e ",__half2float(warpvalue[l][m*WARPSIZE+t])); printf("\n"); } }*/ };
cb9df2124b6458589a2787029127f41e2c67c256.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/weighted_softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void WeightedSoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const Dtype pos_mult_, const int pos_cid_, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); Dtype w = (label_value == pos_cid_) ? pos_mult_ : 1; if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -w * log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( WeightedSoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, pos_mult_, pos_cid_, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const Dtype pos_mult_, const int pos_cid_, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); Dtype w = (label_value == pos_cid_) ? pos_mult_ : 1; if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] *= w; } } } } template <typename Dtype> void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( WeightedSoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, pos_mult_, pos_cid_, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer); } // namespace caffe
cb9df2124b6458589a2787029127f41e2c67c256.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/weighted_softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void WeightedSoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const Dtype pos_mult_, const int pos_cid_, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); Dtype w = (label_value == pos_cid_) ? pos_mult_ : 1; if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -w * log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) WeightedSoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, pos_mult_, pos_cid_, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const Dtype pos_mult_, const int pos_cid_, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); Dtype w = (label_value == pos_cid_) ? pos_mult_ : 1; if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] *= w; } } } } template <typename Dtype> void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) WeightedSoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, pos_mult_, pos_cid_, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer); } // namespace caffe
bfee24cdd7efc64b941285906ab8b2853af65daf.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define ITERATIONS 40 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive Addition access if( ((i%2)==0) ){ for(unsigned k=0; k<ITERATIONS;k++) { Value2= I1+I2; Value3=I1-I2; Value1-=Value2; Value3+=Value1; Value2-=Value3; Value1+=Value3; // Value2= I1+I2; // Value3=I1-I2; // Value1=I1-Value2; // Value3+=Value1; // Value2-=Value3; // Value1+=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
bfee24cdd7efc64b941285906ab8b2853af65daf.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define ITERATIONS 40 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive Addition access if( ((i%2)==0) ){ for(unsigned k=0; k<ITERATIONS;k++) { Value2= I1+I2; Value3=I1-I2; Value1-=Value2; Value3+=Value1; Value2-=Value3; Value1+=Value3; // Value2= I1+I2; // Value3=I1-I2; // Value1=I1-Value2; // Value3+=Value1; // Value2-=Value3; // Value1+=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
71db30cec034c1b1c289072f6541840608eceaea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <unistd.h> __global__ void saxpy(unsigned num_streams, unsigned addr1, unsigned addr2, unsigned addr3, unsigned addr4, unsigned addr5, unsigned addr6, unsigned addr7, unsigned addr8, int dummy, float *x) { __shared__ float A[1000]; int id = blockIdx.x*blockDim.x + threadIdx.x; float a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0, h = 0; for (int i = 0; i < 1000 - 8; i += 8) { a = A[id + 8*i*dummy]; b = A[id + 1*i*dummy]; c = A[id + 2*i*dummy]; d = A[id + 3*i*dummy]; e = A[id + 4*i*dummy]; f = A[id + 5*i*dummy]; g = A[id + 6*i*dummy]; h = A[id + 7*i*dummy]; } x[id] = a + b + c + d + e + f + g + h; } int main(int argc, char *argv[]) { int N = 1000; // Perform SAXPY on 1M elements float *h_x = (float *)malloc(N*sizeof(float)); float *d_x = (float *)100; float *d_x_copy; hipMalloc(&d_x_copy, N*sizeof(float)); // hipMalloc(&d_x, 2*sizeof(float)); for (int i = 1 ; i <= N ; i++) h_x[i-1] = (float)i; hipMemcpy(d_x, h_x, N*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( saxpy), dim3(1), dim3(8), 0, 0, 8, 100, 100, 100, 100, 100, 100, 100, 100, atoi(argv[1]), d_x); hipMemcpy(h_x, d_x, sizeof(float), hipMemcpyDeviceToHost); printf("%f\n", *h_x); }
71db30cec034c1b1c289072f6541840608eceaea.cu
#include <stdio.h> #include <stdlib.h> #include <unistd.h> __global__ void saxpy(unsigned num_streams, unsigned addr1, unsigned addr2, unsigned addr3, unsigned addr4, unsigned addr5, unsigned addr6, unsigned addr7, unsigned addr8, int dummy, float *x) { __shared__ float A[1000]; int id = blockIdx.x*blockDim.x + threadIdx.x; float a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0, h = 0; for (int i = 0; i < 1000 - 8; i += 8) { a = A[id + 8*i*dummy]; b = A[id + 1*i*dummy]; c = A[id + 2*i*dummy]; d = A[id + 3*i*dummy]; e = A[id + 4*i*dummy]; f = A[id + 5*i*dummy]; g = A[id + 6*i*dummy]; h = A[id + 7*i*dummy]; } x[id] = a + b + c + d + e + f + g + h; } int main(int argc, char *argv[]) { int N = 1000; // Perform SAXPY on 1M elements float *h_x = (float *)malloc(N*sizeof(float)); float *d_x = (float *)100; float *d_x_copy; cudaMalloc(&d_x_copy, N*sizeof(float)); // cudaMalloc(&d_x, 2*sizeof(float)); for (int i = 1 ; i <= N ; i++) h_x[i-1] = (float)i; cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice); saxpy<<<1, 8>>>(8, 100, 100, 100, 100, 100, 100, 100, 100, atoi(argv[1]), d_x); cudaMemcpy(h_x, d_x, sizeof(float), cudaMemcpyDeviceToHost); printf("%f\n", *h_x); }
40455cb2b519e9d3f928c16e2674b80863153121.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ /** * Copyright (c) 2017 Darius Rckert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/device_helper.h" #include "Fast.h" namespace Saiga { namespace CUDA { __constant__ unsigned char c_table[] = { 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; // 1 -> v > x + th // 2 -> v < x - th // 0 -> x - th <= v <= x + th __device__ __forceinline__ int diffType(const int v, const int x, const int th) { const int diff = x - v; return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1); } __device__ void calcMask(const uint C[4], const int v, const int th, int& mask1, int& mask2) { mask1 = 0; mask2 = 0; int d1, d2; d1 = diffType(v, C[0] & 0xff, th); d2 = diffType(v, C[2] & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 0; mask2 |= ((d1 & 2) >> 1) << 0; mask1 |= (d2 & 1) << 8; mask2 |= ((d2 & 2) >> 1) << 8; d1 = diffType(v, C[1] & 0xff, th); d2 = diffType(v, C[3] & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 4; mask2 |= ((d1 & 2) >> 1) << 4; mask1 |= (d2 & 1) << 12; mask2 |= ((d2 & 2) >> 1) << 12; d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 2; mask2 |= ((d1 & 2) >> 1) << 2; mask1 |= (d2 & 1) << 10; mask2 |= ((d2 & 2) >> 1) << 10; d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 6; mask2 |= ((d1 & 2) >> 1) << 6; mask1 |= (d2 & 1) << 14; mask2 |= ((d2 & 2) >> 1) << 14; d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 1; mask2 |= ((d1 & 2) >> 1) << 1; mask1 |= (d2 & 1) << 9; mask2 |= ((d2 & 2) >> 1) << 9; d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 3; mask2 |= ((d1 & 2) >> 1) << 3; mask1 |= (d2 & 1) << 11; mask2 |= ((d2 & 2) >> 1) << 11; d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 5; mask2 |= ((d1 & 2) >> 1) << 5; mask1 |= (d2 & 1) << 13; mask2 |= ((d2 & 2) >> 1) << 13; d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th); mask1 |= (d1 & 1) << 7; mask2 |= ((d1 & 2) >> 1) << 7; mask1 |= (d2 & 1) << 15; mask2 |= ((d2 & 2) >> 1) << 15; } // 1 -> v > x + th // 2 -> v < x - th // 0 -> not a keypoint __device__ __forceinline__ bool isKeyPoint(int mask1, int mask2) { return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) || (__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7)))); } __device__ int cornerScore(const uint C[4], const int v, const int threshold) { // binary search in [threshold + 1, 255] int min = threshold + 1; int max = 255; while (min <= max) { const int mid = (min + max) >> 1; int mask1 = 0; int mask2 = 0; calcMask(C, v, mid, mask1, mask2); int isKp = static_cast<int>(isKeyPoint(mask1, mask2)); min = isKp * (mid + 1) + (isKp ^ 1) * min; max = (isKp ^ 1) * (mid - 1) + isKp * max; } return min - 1; } __device__ int isKeyPoint2(Saiga::ImageView<unsigned char> img, const int i, const int j, const int threshold) { int v; uint C[4] = {0, 0, 0, 0}; C[2] |= static_cast<uint>(img(i - 3, j - 1)) << 8; C[2] |= static_cast<uint>(img(i - 3, j)); C[1] |= static_cast<uint>(img(i - 3, j + 1)) << (3 * 8); C[2] |= static_cast<uint>(img(i - 2, j - 2)) << (2 * 8); C[1] |= static_cast<uint>(img(i - 2, j + 2)) << (2 * 8); C[2] |= static_cast<uint>(img(i - 1, j - 3)) << (3 * 8); C[1] |= static_cast<uint>(img(i - 1, j + 3)) << 8; C[3] |= static_cast<uint>(img(i, j - 3)); v = static_cast<int>(img(i, j)); C[1] |= static_cast<uint>(img(i, j + 3)); int d1 = diffType(v, C[1] & 0xff, threshold); int d2 = diffType(v, C[3] & 0xff, threshold); if ((d1 | d2) == 0) { return 0; } C[3] |= static_cast<uint>(img(i + 1, j - 3)) << 8; C[0] |= static_cast<uint>(img(i + 1, j + 3)) << (3 * 8); C[3] |= static_cast<uint>(img(i + 2, j - 2)) << (2 * 8); C[0] |= static_cast<uint>(img(i + 2, j + 2)) << (2 * 8); C[3] |= static_cast<uint>(img(i + 3, j - 1)) << (3 * 8); C[0] |= static_cast<uint>(img(i + 3, j)); C[0] |= static_cast<uint>(img(i + 3, j + 1)) << 8; int mask1 = 0; int mask2 = 0; calcMask(C, v, threshold, mask1, mask2); if (isKeyPoint(mask1, mask2)) { return cornerScore(C, v, threshold); } return 0; } __device__ bool isMax(int2 loc, Saiga::ImageView<int> scoreMat) { int score = scoreMat(loc.y, loc.x); bool ismax = score > scoreMat(loc.y - 1, loc.x - 1) && score > scoreMat(loc.y - 1, loc.x) && score > scoreMat(loc.y - 1, loc.x + 1) && score > scoreMat(loc.y, loc.x - 1) && score > scoreMat(loc.y, loc.x + 1) && score > scoreMat(loc.y + 1, loc.x - 1) && score > scoreMat(loc.y + 1, loc.x) && score > scoreMat(loc.y + 1, loc.x + 1); return ismax; } template <int TILE_SIZE_X, int TILE_SIZE_Y> __global__ void tileCalcKeypoints_kernel(Saiga::ImageView<unsigned char> img_, short2* kpLoc, float* kpScore, const unsigned int maxKeypoints, const int highThreshold, const int lowThreshold, unsigned int* counter_ptr) { int max_kps_high = 50; int max_kps_low = 50; const int required_border = 4; const int local_image_w = TILE_SIZE_X + 2 * required_border; const int local_image_h = TILE_SIZE_Y + 2 * required_border; static_assert(local_image_w % 4 == 0, "sdjf"); static_assert(local_image_h % 4 == 0, "sdjf"); CUDA_ASSERT(img_.pitchBytes % 4 == 0); __shared__ int local_image_i[local_image_h][local_image_w / 4]; __shared__ int local_score[local_image_h][local_image_w]; __shared__ unsigned int num_kps; const int2 global_inner_start = {int(blockIdx.x * blockDim.x), int((blockIdx.y * blockDim.y) * 4)}; const int2 global_outer_start = {global_inner_start.x - 4, global_inner_start.y - 4}; const int block_start_x = blockIdx.x * blockDim.x; const int block_start_y = (blockIdx.y * blockDim.y) * 4; const int linear_local_tid = threadIdx.y * blockDim.x + threadIdx.x; for (int t = linear_local_tid; t < (local_image_w / 4) * local_image_h; t += blockDim.x * blockDim.y) { int local_x = t % (local_image_w / 4); int local_y = t / (local_image_w / 4); int x = global_outer_start.x + local_x * 4; int y = global_outer_start.y + local_y; CUDA_ASSERT(x % 4 == 0); // clamp to border is better than conditional reads x = max(0, min(x, (int)img_.pitchBytes - 4)); y = max(0, min(y, img_.rows - 1)); CUDA_ASSERT(x % 4 == 0); reinterpret_cast<int*>(&local_image_i[local_y][local_x])[0] = reinterpret_cast<const int*>(&img_(y, x))[0]; } __syncthreads(); Saiga::ImageView<unsigned char> img; img.w = local_image_w; img.h = local_image_h; img.pitchBytes = local_image_w; img.dataT = reinterpret_cast<unsigned char*>(&local_image_i[0][0]); Saiga::ImageView<int> scoreMat; scoreMat.w = local_image_w; scoreMat.h = local_image_h; scoreMat.pitchBytes = local_image_w * 4; scoreMat.dataT = reinterpret_cast<int*>(&local_score[0][0]); if (linear_local_tid == 0) { num_kps = 0; } // compute score for (int t = linear_local_tid; t < (32 + 2) * (32 + 2); t += blockDim.x * blockDim.y) { int local_x = t % (32 + 2); int local_y = t / (32 + 2); int x = local_x + 3; int y = local_y + 3; scoreMat(y, x) = isKeyPoint2(img, y, x, highThreshold); } __syncthreads(); for (int t = 0; t < 4; ++t) { int inner_x = threadIdx.x; int inner_y = threadIdx.y + t * 8; int x = inner_x + 4; int y = inner_y + 4; int global_x = inner_x + global_inner_start.x; int global_y = inner_y + global_inner_start.y; int score = scoreMat(y, x); if (score == 0) continue; if (global_y >= img_.rows - 4 || global_x >= img_.cols - 4 || global_x < 4 || global_y < 4) { continue; } if (isMax(make_int2(x, y), scoreMat)) { auto local_index = atomicInc(&num_kps, (unsigned int)(-1)); if (local_index < max_kps_high) { auto global_index = atomicInc(counter_ptr, (unsigned int)(-1)); if (global_index < maxKeypoints) { short2 loc; loc.x = global_x; loc.y = global_y; kpLoc[global_index] = loc; kpScore[global_index] = static_cast<float>(score); } } } } __syncthreads(); if (num_kps > 0) return; // compute score for (int t = linear_local_tid; t < (TILE_SIZE_X + 2) * (TILE_SIZE_Y + 2); t += blockDim.x * blockDim.y) { int local_x = t % (TILE_SIZE_X + 2); int local_y = t / (TILE_SIZE_Y + 2); int x = local_x + 3; int y = local_y + 3; bool in_bounds = block_start_y + y < img_.rows - 3 & block_start_x + x < img_.cols - 3; scoreMat(y, x) = in_bounds * isKeyPoint2(img, y, x, lowThreshold); } __syncthreads(); for (int t = 0; t < 4; ++t) { int inner_x = threadIdx.x; int inner_y = threadIdx.y + t * 8; int x = inner_x + 4; int y = inner_y + 4; int global_x = inner_x + global_inner_start.x; int global_y = inner_y + global_inner_start.y; int score = scoreMat(y, x); if (score == 0) continue; if (global_y >= img_.rows - 4 || global_x >= img_.cols - 4 || global_x < 4 || global_y < 4) { continue; } if (isMax(make_int2(x, y), scoreMat)) { auto local_index = atomicInc(&num_kps, (unsigned int)(-1)); if (local_index < max_kps_low) { auto global_index = atomicInc(counter_ptr, (unsigned int)(-1)); if (global_index < maxKeypoints) { short2 loc; loc.x = global_x; loc.y = global_y; kpLoc[global_index] = loc; kpScore[global_index] = static_cast<float>(score); } } } } } __global__ void createKps(Saiga::ArrayView<Saiga::KeyPoint<float>> kps, short2* kpLoc, float* kpScore) { Saiga::CUDA::ThreadInfo<> ti; int i = ti.thread_id; if (i >= kps.size()) { return; } kps[i] = Saiga::KeyPoint<float>(kpLoc[i].x, kpLoc[i].y, 7, -1, kpScore[i]); } Fast::Fast(int highThreshold, int lowThreshold, int maxKeypoints) : highThreshold(highThreshold), lowThreshold(lowThreshold), maxKeypoints(maxKeypoints) { counter_keypoint_location.resize(maxKeypoints + 1); keypoint_score.resize(maxKeypoints); h_counter_keypoint_location.resize(maxKeypoints + 1); h_keypoint_score.resize(maxKeypoints); } Fast::~Fast() {} void Fast::Detect(Saiga::ImageView<unsigned char> d_image, hipStream_t stream) { auto h_counter = (unsigned int*)h_counter_keypoint_location.data(); auto d_counter = (unsigned int*)counter_keypoint_location.data().get(); auto keypoint_location = counter_keypoint_location.data().get() + 1; { CHECK_CUDA_ERROR(hipMemsetAsync(d_counter, 0, sizeof(unsigned int), stream)); dim3 dimBlock(32, 8); dim3 dimGrid(Saiga::iDivUp(d_image.cols, 32), Saiga::iDivUp(d_image.rows, 32)); hipLaunchKernelGGL(( tileCalcKeypoints_kernel<32, 32>), dim3(dimGrid), dim3(dimBlock), 0, stream, d_image, keypoint_location, keypoint_score.data().get(), maxKeypoints, highThreshold, lowThreshold, d_counter); CHECK_CUDA_ERROR(hipMemcpyAsync(h_counter_keypoint_location.data(), counter_keypoint_location.data().get(), sizeof(short2) * (actual_max_keypoints + 1), hipMemcpyDeviceToHost, stream)); CHECK_CUDA_ERROR(hipMemcpyAsync(h_keypoint_score.data(), keypoint_score.data().get(), sizeof(float) * actual_max_keypoints, hipMemcpyDeviceToHost, stream)); detection_finished.record(stream); } } int Fast::Download(Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints, hipStream_t stream) { detection_finished.synchronize(); auto h_counter = (unsigned int*)h_counter_keypoint_location.data(); auto keypoint_location = counter_keypoint_location.data().get() + 1; auto count = h_counter[0]; if (count > actual_max_keypoints) { auto remaining_points = count - actual_max_keypoints; CHECK_CUDA_ERROR(hipMemcpyAsync(h_counter_keypoint_location.data() + actual_max_keypoints + 1, keypoint_location + actual_max_keypoints, sizeof(short2) * remaining_points, hipMemcpyDeviceToHost, stream)); CHECK_CUDA_ERROR(hipMemcpyAsync(h_keypoint_score.data() + actual_max_keypoints, keypoint_score.data().get() + actual_max_keypoints, sizeof(float) * remaining_points, hipMemcpyDeviceToHost, stream)); actual_max_keypoints = count * 1.05; CHECK_CUDA_ERROR(hipStreamSynchronize(stream)); } SAIGA_ASSERT(keypoints.size() >= count); for (int i = 0; i < count; ++i) { Saiga::KeyPoint<float> kp(h_counter_keypoint_location[i + 1].x, h_counter_keypoint_location[i + 1].y, 0, -1, h_keypoint_score[i]); keypoints[i] = kp; } return count; } } // namespace CUDA } // namespace Saiga
40455cb2b519e9d3f928c16e2674b80863153121.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ /** * Copyright (c) 2017 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/device_helper.h" #include "Fast.h" namespace Saiga { namespace CUDA { __constant__ unsigned char c_table[] = { 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; // 1 -> v > x + th // 2 -> v < x - th // 0 -> x - th <= v <= x + th __device__ __forceinline__ int diffType(const int v, const int x, const int th) { const int diff = x - v; return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1); } __device__ void calcMask(const uint C[4], const int v, const int th, int& mask1, int& mask2) { mask1 = 0; mask2 = 0; int d1, d2; d1 = diffType(v, C[0] & 0xff, th); d2 = diffType(v, C[2] & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 0; mask2 |= ((d1 & 2) >> 1) << 0; mask1 |= (d2 & 1) << 8; mask2 |= ((d2 & 2) >> 1) << 8; d1 = diffType(v, C[1] & 0xff, th); d2 = diffType(v, C[3] & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 4; mask2 |= ((d1 & 2) >> 1) << 4; mask1 |= (d2 & 1) << 12; mask2 |= ((d2 & 2) >> 1) << 12; d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 2; mask2 |= ((d1 & 2) >> 1) << 2; mask1 |= (d2 & 1) << 10; mask2 |= ((d2 & 2) >> 1) << 10; d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 6; mask2 |= ((d1 & 2) >> 1) << 6; mask1 |= (d2 & 1) << 14; mask2 |= ((d2 & 2) >> 1) << 14; d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 1; mask2 |= ((d1 & 2) >> 1) << 1; mask1 |= (d2 & 1) << 9; mask2 |= ((d2 & 2) >> 1) << 9; d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 3; mask2 |= ((d1 & 2) >> 1) << 3; mask1 |= (d2 & 1) << 11; mask2 |= ((d2 & 2) >> 1) << 11; d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 5; mask2 |= ((d1 & 2) >> 1) << 5; mask1 |= (d2 & 1) << 13; mask2 |= ((d2 & 2) >> 1) << 13; d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th); mask1 |= (d1 & 1) << 7; mask2 |= ((d1 & 2) >> 1) << 7; mask1 |= (d2 & 1) << 15; mask2 |= ((d2 & 2) >> 1) << 15; } // 1 -> v > x + th // 2 -> v < x - th // 0 -> not a keypoint __device__ __forceinline__ bool isKeyPoint(int mask1, int mask2) { return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) || (__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7)))); } __device__ int cornerScore(const uint C[4], const int v, const int threshold) { // binary search in [threshold + 1, 255] int min = threshold + 1; int max = 255; while (min <= max) { const int mid = (min + max) >> 1; int mask1 = 0; int mask2 = 0; calcMask(C, v, mid, mask1, mask2); int isKp = static_cast<int>(isKeyPoint(mask1, mask2)); min = isKp * (mid + 1) + (isKp ^ 1) * min; max = (isKp ^ 1) * (mid - 1) + isKp * max; } return min - 1; } __device__ int isKeyPoint2(Saiga::ImageView<unsigned char> img, const int i, const int j, const int threshold) { int v; uint C[4] = {0, 0, 0, 0}; C[2] |= static_cast<uint>(img(i - 3, j - 1)) << 8; C[2] |= static_cast<uint>(img(i - 3, j)); C[1] |= static_cast<uint>(img(i - 3, j + 1)) << (3 * 8); C[2] |= static_cast<uint>(img(i - 2, j - 2)) << (2 * 8); C[1] |= static_cast<uint>(img(i - 2, j + 2)) << (2 * 8); C[2] |= static_cast<uint>(img(i - 1, j - 3)) << (3 * 8); C[1] |= static_cast<uint>(img(i - 1, j + 3)) << 8; C[3] |= static_cast<uint>(img(i, j - 3)); v = static_cast<int>(img(i, j)); C[1] |= static_cast<uint>(img(i, j + 3)); int d1 = diffType(v, C[1] & 0xff, threshold); int d2 = diffType(v, C[3] & 0xff, threshold); if ((d1 | d2) == 0) { return 0; } C[3] |= static_cast<uint>(img(i + 1, j - 3)) << 8; C[0] |= static_cast<uint>(img(i + 1, j + 3)) << (3 * 8); C[3] |= static_cast<uint>(img(i + 2, j - 2)) << (2 * 8); C[0] |= static_cast<uint>(img(i + 2, j + 2)) << (2 * 8); C[3] |= static_cast<uint>(img(i + 3, j - 1)) << (3 * 8); C[0] |= static_cast<uint>(img(i + 3, j)); C[0] |= static_cast<uint>(img(i + 3, j + 1)) << 8; int mask1 = 0; int mask2 = 0; calcMask(C, v, threshold, mask1, mask2); if (isKeyPoint(mask1, mask2)) { return cornerScore(C, v, threshold); } return 0; } __device__ bool isMax(int2 loc, Saiga::ImageView<int> scoreMat) { int score = scoreMat(loc.y, loc.x); bool ismax = score > scoreMat(loc.y - 1, loc.x - 1) && score > scoreMat(loc.y - 1, loc.x) && score > scoreMat(loc.y - 1, loc.x + 1) && score > scoreMat(loc.y, loc.x - 1) && score > scoreMat(loc.y, loc.x + 1) && score > scoreMat(loc.y + 1, loc.x - 1) && score > scoreMat(loc.y + 1, loc.x) && score > scoreMat(loc.y + 1, loc.x + 1); return ismax; } template <int TILE_SIZE_X, int TILE_SIZE_Y> __global__ void tileCalcKeypoints_kernel(Saiga::ImageView<unsigned char> img_, short2* kpLoc, float* kpScore, const unsigned int maxKeypoints, const int highThreshold, const int lowThreshold, unsigned int* counter_ptr) { int max_kps_high = 50; int max_kps_low = 50; const int required_border = 4; const int local_image_w = TILE_SIZE_X + 2 * required_border; const int local_image_h = TILE_SIZE_Y + 2 * required_border; static_assert(local_image_w % 4 == 0, "sdjf"); static_assert(local_image_h % 4 == 0, "sdjf"); CUDA_ASSERT(img_.pitchBytes % 4 == 0); __shared__ int local_image_i[local_image_h][local_image_w / 4]; __shared__ int local_score[local_image_h][local_image_w]; __shared__ unsigned int num_kps; const int2 global_inner_start = {int(blockIdx.x * blockDim.x), int((blockIdx.y * blockDim.y) * 4)}; const int2 global_outer_start = {global_inner_start.x - 4, global_inner_start.y - 4}; const int block_start_x = blockIdx.x * blockDim.x; const int block_start_y = (blockIdx.y * blockDim.y) * 4; const int linear_local_tid = threadIdx.y * blockDim.x + threadIdx.x; for (int t = linear_local_tid; t < (local_image_w / 4) * local_image_h; t += blockDim.x * blockDim.y) { int local_x = t % (local_image_w / 4); int local_y = t / (local_image_w / 4); int x = global_outer_start.x + local_x * 4; int y = global_outer_start.y + local_y; CUDA_ASSERT(x % 4 == 0); // clamp to border is better than conditional reads x = max(0, min(x, (int)img_.pitchBytes - 4)); y = max(0, min(y, img_.rows - 1)); CUDA_ASSERT(x % 4 == 0); reinterpret_cast<int*>(&local_image_i[local_y][local_x])[0] = reinterpret_cast<const int*>(&img_(y, x))[0]; } __syncthreads(); Saiga::ImageView<unsigned char> img; img.w = local_image_w; img.h = local_image_h; img.pitchBytes = local_image_w; img.dataT = reinterpret_cast<unsigned char*>(&local_image_i[0][0]); Saiga::ImageView<int> scoreMat; scoreMat.w = local_image_w; scoreMat.h = local_image_h; scoreMat.pitchBytes = local_image_w * 4; scoreMat.dataT = reinterpret_cast<int*>(&local_score[0][0]); if (linear_local_tid == 0) { num_kps = 0; } // compute score for (int t = linear_local_tid; t < (32 + 2) * (32 + 2); t += blockDim.x * blockDim.y) { int local_x = t % (32 + 2); int local_y = t / (32 + 2); int x = local_x + 3; int y = local_y + 3; scoreMat(y, x) = isKeyPoint2(img, y, x, highThreshold); } __syncthreads(); for (int t = 0; t < 4; ++t) { int inner_x = threadIdx.x; int inner_y = threadIdx.y + t * 8; int x = inner_x + 4; int y = inner_y + 4; int global_x = inner_x + global_inner_start.x; int global_y = inner_y + global_inner_start.y; int score = scoreMat(y, x); if (score == 0) continue; if (global_y >= img_.rows - 4 || global_x >= img_.cols - 4 || global_x < 4 || global_y < 4) { continue; } if (isMax(make_int2(x, y), scoreMat)) { auto local_index = atomicInc(&num_kps, (unsigned int)(-1)); if (local_index < max_kps_high) { auto global_index = atomicInc(counter_ptr, (unsigned int)(-1)); if (global_index < maxKeypoints) { short2 loc; loc.x = global_x; loc.y = global_y; kpLoc[global_index] = loc; kpScore[global_index] = static_cast<float>(score); } } } } __syncthreads(); if (num_kps > 0) return; // compute score for (int t = linear_local_tid; t < (TILE_SIZE_X + 2) * (TILE_SIZE_Y + 2); t += blockDim.x * blockDim.y) { int local_x = t % (TILE_SIZE_X + 2); int local_y = t / (TILE_SIZE_Y + 2); int x = local_x + 3; int y = local_y + 3; bool in_bounds = block_start_y + y < img_.rows - 3 & block_start_x + x < img_.cols - 3; scoreMat(y, x) = in_bounds * isKeyPoint2(img, y, x, lowThreshold); } __syncthreads(); for (int t = 0; t < 4; ++t) { int inner_x = threadIdx.x; int inner_y = threadIdx.y + t * 8; int x = inner_x + 4; int y = inner_y + 4; int global_x = inner_x + global_inner_start.x; int global_y = inner_y + global_inner_start.y; int score = scoreMat(y, x); if (score == 0) continue; if (global_y >= img_.rows - 4 || global_x >= img_.cols - 4 || global_x < 4 || global_y < 4) { continue; } if (isMax(make_int2(x, y), scoreMat)) { auto local_index = atomicInc(&num_kps, (unsigned int)(-1)); if (local_index < max_kps_low) { auto global_index = atomicInc(counter_ptr, (unsigned int)(-1)); if (global_index < maxKeypoints) { short2 loc; loc.x = global_x; loc.y = global_y; kpLoc[global_index] = loc; kpScore[global_index] = static_cast<float>(score); } } } } } __global__ void createKps(Saiga::ArrayView<Saiga::KeyPoint<float>> kps, short2* kpLoc, float* kpScore) { Saiga::CUDA::ThreadInfo<> ti; int i = ti.thread_id; if (i >= kps.size()) { return; } kps[i] = Saiga::KeyPoint<float>(kpLoc[i].x, kpLoc[i].y, 7, -1, kpScore[i]); } Fast::Fast(int highThreshold, int lowThreshold, int maxKeypoints) : highThreshold(highThreshold), lowThreshold(lowThreshold), maxKeypoints(maxKeypoints) { counter_keypoint_location.resize(maxKeypoints + 1); keypoint_score.resize(maxKeypoints); h_counter_keypoint_location.resize(maxKeypoints + 1); h_keypoint_score.resize(maxKeypoints); } Fast::~Fast() {} void Fast::Detect(Saiga::ImageView<unsigned char> d_image, cudaStream_t stream) { auto h_counter = (unsigned int*)h_counter_keypoint_location.data(); auto d_counter = (unsigned int*)counter_keypoint_location.data().get(); auto keypoint_location = counter_keypoint_location.data().get() + 1; { CHECK_CUDA_ERROR(cudaMemsetAsync(d_counter, 0, sizeof(unsigned int), stream)); dim3 dimBlock(32, 8); dim3 dimGrid(Saiga::iDivUp(d_image.cols, 32), Saiga::iDivUp(d_image.rows, 32)); tileCalcKeypoints_kernel<32, 32><<<dimGrid, dimBlock, 0, stream>>>(d_image, keypoint_location, keypoint_score.data().get(), maxKeypoints, highThreshold, lowThreshold, d_counter); CHECK_CUDA_ERROR(cudaMemcpyAsync(h_counter_keypoint_location.data(), counter_keypoint_location.data().get(), sizeof(short2) * (actual_max_keypoints + 1), cudaMemcpyDeviceToHost, stream)); CHECK_CUDA_ERROR(cudaMemcpyAsync(h_keypoint_score.data(), keypoint_score.data().get(), sizeof(float) * actual_max_keypoints, cudaMemcpyDeviceToHost, stream)); detection_finished.record(stream); } } int Fast::Download(Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints, cudaStream_t stream) { detection_finished.synchronize(); auto h_counter = (unsigned int*)h_counter_keypoint_location.data(); auto keypoint_location = counter_keypoint_location.data().get() + 1; auto count = h_counter[0]; if (count > actual_max_keypoints) { auto remaining_points = count - actual_max_keypoints; CHECK_CUDA_ERROR(cudaMemcpyAsync(h_counter_keypoint_location.data() + actual_max_keypoints + 1, keypoint_location + actual_max_keypoints, sizeof(short2) * remaining_points, cudaMemcpyDeviceToHost, stream)); CHECK_CUDA_ERROR(cudaMemcpyAsync(h_keypoint_score.data() + actual_max_keypoints, keypoint_score.data().get() + actual_max_keypoints, sizeof(float) * remaining_points, cudaMemcpyDeviceToHost, stream)); actual_max_keypoints = count * 1.05; CHECK_CUDA_ERROR(cudaStreamSynchronize(stream)); } SAIGA_ASSERT(keypoints.size() >= count); for (int i = 0; i < count; ++i) { Saiga::KeyPoint<float> kp(h_counter_keypoint_location[i + 1].x, h_counter_keypoint_location[i + 1].y, 0, -1, h_keypoint_score[i]); keypoints[i] = kp; } return count; } } // namespace CUDA } // namespace Saiga
edcded76681042f96bab49f9e60dd2e53fb7bd58.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "findID.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( findID), dim3(gridBlock),dim3(threadBlock), 0, 0, a,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( findID), dim3(gridBlock),dim3(threadBlock), 0, 0, a,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( findID), dim3(gridBlock),dim3(threadBlock), 0, 0, a,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
edcded76681042f96bab49f9e60dd2e53fb7bd58.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "findID.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); findID<<<gridBlock,threadBlock>>>(a,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { findID<<<gridBlock,threadBlock>>>(a,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { findID<<<gridBlock,threadBlock>>>(a,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a9ca0c59aac13a7150b29d6da77f8558b719188a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void global_scan(float* d_out,float* d_in){ int idx = threadIdx.x; float out = 0.00f; d_out[idx] = d_in[idx]; __syncthreads(); for(int interpre=1;interpre<sizeof(d_in);interpre*=2){ if(idx-interpre>=0){ out = d_out[idx]+d_out[idx-interpre]; } __syncthreads(); if(idx-interpre>=0){ d_out[idx] = out; out = 0.00f; } } } int main(int argc,char** argv){ const int ARRAY_SIZE = 8; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for(int i=0;i<ARRAY_SIZE;i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float* d_in; float* d_out; // allocate GPU memory hipMalloc((void**) &d_in,ARRAY_BYTES); hipMalloc((void**) &d_out,ARRAY_BYTES); // transfer the array to GPU hipMemcpy(d_in,h_in,ARRAY_BYTES,hipMemcpyHostToDevice); // launch the kernel hipLaunchKernelGGL(( global_scan), dim3(1),dim3(ARRAY_SIZE), 0, 0, d_out,d_in); // copy back the result array to the GPU hipMemcpy(h_out,d_out,ARRAY_BYTES,hipMemcpyDeviceToHost); // print out the resulting array for(int i=0;i<ARRAY_SIZE;i++){ printf("%f",h_out[i]); printf(((i%4) != 3) ? "\t" : "\n"); } // free GPU memory allocation hipFree(d_in); hipFree(d_out); return 0; }
a9ca0c59aac13a7150b29d6da77f8558b719188a.cu
#include <stdio.h> __global__ void global_scan(float* d_out,float* d_in){ int idx = threadIdx.x; float out = 0.00f; d_out[idx] = d_in[idx]; __syncthreads(); for(int interpre=1;interpre<sizeof(d_in);interpre*=2){ if(idx-interpre>=0){ out = d_out[idx]+d_out[idx-interpre]; } __syncthreads(); if(idx-interpre>=0){ d_out[idx] = out; out = 0.00f; } } } int main(int argc,char** argv){ const int ARRAY_SIZE = 8; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for(int i=0;i<ARRAY_SIZE;i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float* d_in; float* d_out; // allocate GPU memory cudaMalloc((void**) &d_in,ARRAY_BYTES); cudaMalloc((void**) &d_out,ARRAY_BYTES); // transfer the array to GPU cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice); // launch the kernel global_scan<<<1,ARRAY_SIZE>>>(d_out,d_in); // copy back the result array to the GPU cudaMemcpy(h_out,d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost); // print out the resulting array for(int i=0;i<ARRAY_SIZE;i++){ printf("%f",h_out[i]); printf(((i%4) != 3) ? "\t" : "\n"); } // free GPU memory allocation cudaFree(d_in); cudaFree(d_out); return 0; }
af468ad780357ee6c7f44745ade497deb6204f72.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <string.h> #include <math.h> #include <stdbool.h> #define BNUM 180 #define TNUM 1024 unsigned long long MakeNum(bool *number,unsigned long long size){ unsigned long long i,j,now=0; for(i=0;i<size;i++) number[i]=0; number[2]=1;number[3]=1; for(i=5,j=2;i<size;i+=j,j=6-j){ number[i]=1; now++; }//printf("%llu %llu\n",now,number[now-1]); return size; //number[0] = 2; } __global__ void running(bool *deviceArr,unsigned long long arrSize){ int BID=blockIdx.x; // int TID=threadIdx.x; // //int n=blockDim.x; // //int x=BID*n+TID; // //deviceArr[arrSize-1]++; unsigned long long i,j,k; for(i = BID * TNUM + TID; i < arrSize;i += BNUM * TNUM){ if(deviceArr[i]==1){ //for(j=2;i*j<arrSize;j++) for (j = 5,k=2; j * i < arrSize;j+=k,k=6-k) { deviceArr[i * j] = 0; } } } }; int main(){ unsigned long long MAXNUM=16000000000; while(1){ bool *arr; bool *hostArr; bool *deviceArr; unsigned long long i,arrSize,temp,biggest; float dTime; hipEvent_t start,end; arr = (bool *)malloc(MAXNUM*sizeof(bool)); hostArr = (bool *)malloc(MAXNUM*sizeof(bool)); hipEventCreate(&start); hipEventCreate(&end); arrSize=MakeNum(arr,MAXNUM); //printf("%llu %llu\n",arrSize,arr[arrSize-1]); /*for(i=0;i<arrSize;i++) if(arr[i]==1) printf("%llu ",i); printf("\n",arr[i]);*/ hipMalloc((void**) &deviceArr, MAXNUM*sizeof(bool)); hipMemcpy(deviceArr,arr,sizeof(bool)*MAXNUM,hipMemcpyHostToDevice); hipEventRecord(start, 0); hipLaunchKernelGGL(( running), dim3(BNUM),dim3(TNUM), 0, 0, deviceArr,arrSize); hipEventRecord(end, 0); hipEventSynchronize(end); hipMemcpy(hostArr, deviceArr, MAXNUM*sizeof(bool), hipMemcpyDeviceToHost); temp=0; for(i=0;i<arrSize;i++){ if(hostArr[i]==1){ temp++; biggest=i; //printf("%llu ",i); } }/*printf("\n");*/ hipEventElapsedTime(&dTime, start, end); printf("2~%llu num:%llu biggest:%llu time:%f ms.\n",MAXNUM,temp,biggest,dTime); hipFree(deviceArr); free(arr);free(hostArr); MAXNUM+=100000000; //if(MAXNUM>=16505000000)break; } //return 0; }
af468ad780357ee6c7f44745ade497deb6204f72.cu
#include <stdio.h> #include <cuda.h> #include <string.h> #include <math.h> #include <stdbool.h> #define BNUM 180 #define TNUM 1024 unsigned long long MakeNum(bool *number,unsigned long long size){ unsigned long long i,j,now=0; for(i=0;i<size;i++) number[i]=0; number[2]=1;number[3]=1; for(i=5,j=2;i<size;i+=j,j=6-j){ number[i]=1; now++; }//printf("%llu %llu\n",now,number[now-1]); return size; //number[0] = 2; } __global__ void running(bool *deviceArr,unsigned long long arrSize){ int BID=blockIdx.x; //區塊索引 int TID=threadIdx.x; //執行緒索引 //int n=blockDim.x; //區塊中包含的執行緒數目 //int x=BID*n+TID; //執行緒在陣列中對應的位置 //deviceArr[arrSize-1]++; unsigned long long i,j,k; for(i = BID * TNUM + TID; i < arrSize;i += BNUM * TNUM){ if(deviceArr[i]==1){ //for(j=2;i*j<arrSize;j++) for (j = 5,k=2; j * i < arrSize;j+=k,k=6-k) { deviceArr[i * j] = 0; } } } }; int main(){ unsigned long long MAXNUM=16000000000; while(1){ bool *arr; bool *hostArr; bool *deviceArr; unsigned long long i,arrSize,temp,biggest; float dTime; cudaEvent_t start,end; arr = (bool *)malloc(MAXNUM*sizeof(bool)); hostArr = (bool *)malloc(MAXNUM*sizeof(bool)); cudaEventCreate(&start); cudaEventCreate(&end); arrSize=MakeNum(arr,MAXNUM); //printf("%llu %llu\n",arrSize,arr[arrSize-1]); /*for(i=0;i<arrSize;i++) if(arr[i]==1) printf("%llu ",i); printf("\n",arr[i]);*/ cudaMalloc((void**) &deviceArr, MAXNUM*sizeof(bool)); cudaMemcpy(deviceArr,arr,sizeof(bool)*MAXNUM,cudaMemcpyHostToDevice); cudaEventRecord(start, 0); running<<<BNUM,TNUM>>>(deviceArr,arrSize); cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaMemcpy(hostArr, deviceArr, MAXNUM*sizeof(bool), cudaMemcpyDeviceToHost); temp=0; for(i=0;i<arrSize;i++){ if(hostArr[i]==1){ temp++; biggest=i; //printf("%llu ",i); } }/*printf("\n");*/ cudaEventElapsedTime(&dTime, start, end); printf("2~%llu num:%llu biggest:%llu time:%f ms.\n",MAXNUM,temp,biggest,dTime); cudaFree(deviceArr); free(arr);free(hostArr); MAXNUM+=100000000; //if(MAXNUM>=16505000000)break; } //return 0; }
b51d59add32414e34283295f2192e149e69b36e1.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <ctime> #include <cstdlib> #include <cstdint> #include <cmath> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/transform_reduce.h> #include <thrust/pair.h> #define rnd( x ) (x * std::rand() / RAND_MAX) #define SPHERES 20 #define INF 2e10f #define DIM 2048 struct Sphere { float r, g, b; float radius; float x, y, z; __host__ __device__ Sphere() : r(0), g(0), b(0), radius(0), x(0), y(0), z(0) {} __host__ __device__ Sphere(const Sphere& s) { this->r = s.r; this->g = s.g; this->b = s.b; this->radius = s.radius; this->x = s.x; this->y = s.y; this->z = s.z; } __device__ float hit(const float& ox, const float& oy, float& n) const { float dx = ox - x; float dy = oy - y; if (dx * dx + dy * dy < radius * radius) { float dz = std::sqrt(radius * radius - dx * dx - dy * dy); n = dz / radius; return dz + z; } return -INF; } }; __host__ Sphere generate_random_sphere() { Sphere s = Sphere(); s.r = rnd(1.0f); s.g = rnd(1.0f); s.b = rnd(1.0f); s.x = rnd(2000.0f) - 1000; s.y = rnd(2000.0f) - 1000; s.z = rnd(2000.0f) - 1000; s.radius = rnd(200.0f) + 40; return s; } struct Color { uint8_t r, g, b, a; __host__ __device__ Color() : r(0), g(0), b(0), a(255) {} __host__ __device__ Color(uint8_t _r, uint8_t _g, uint8_t _b, uint8_t _a = 255) : r(_r), g(_g), b(_b), a(_a) {} __host__ __device__ Color(const Color& c) { this->r = c.r; this->g = c.g; this->b = c.b; this->a = c.a; } }; struct color_from_sphere : public thrust::unary_function<const Sphere&, thrust::pair<Color, float>> { int ox; int oy; __host__ __device__ color_from_sphere(const int& _ox, const int& _oy) : ox(_ox), oy(_oy) {} __device__ thrust::pair<Color, float> operator() (const Sphere& s) { float fscale; float t = s.hit(ox, oy, fscale); Color color(0, 0, 0, 255); if (t > -INF) { color.r = (uint8_t)(s.r * fscale * 255); color.g = (uint8_t)(s.g * fscale * 255); color.b = (uint8_t)(s.b * fscale * 255); } return thrust::make_pair<Color, float>(color, t); } }; struct get_color_max_distance : public thrust::binary_function<thrust::pair<Color, float>, thrust::pair<Color, float>, thrust::pair<Color, float>> { __device__ thrust::pair<Color, float> operator() (thrust::pair<Color, float> const& lhs, thrust::pair<Color, float> const& rhs) { if (lhs.second > rhs.second) { return lhs; } else { return rhs; } } }; struct determine_color_functor : public thrust::unary_function<const int&, Color> { thrust::device_vector<Sphere>::iterator start; thrust::device_vector<Sphere>::iterator end; determine_color_functor(thrust::device_vector<Sphere>::iterator _start, thrust::device_vector<Sphere>::iterator _end) : start(_start), end(_end) {} __device__ Color operator() (const int& offset) const { int x = offset % DIM; int y = (offset - x) / DIM; float ox = x - DIM / 2.0; float oy = y - DIM / 2.0; thrust::pair<Color, float> ray_color; ray_color.first = Color(0, 0, 0, 255); ray_color.second = -INF; // Spheres -> transform to Color, distance(T) -> reduce to Color thrust::pair<Color, float> result = thrust::transform_reduce(thrust::device, start, end, color_from_sphere(ox, oy), ray_color, get_color_max_distance()); return result.first; } }; void write_ppm(thrust::host_vector<Color> const& bitmap, const int& xdim, const int& ydim, FILE* fp) { fprintf(fp, "P3\n"); fprintf(fp, "%d %d\n", xdim, ydim); fprintf(fp, "255\n"); for (int y = 0; y < ydim; ++y) { for (int x = 0; x < xdim; ++x) { int i = x + y * xdim; fprintf(fp, "%d %d %d ", bitmap[i].r, bitmap[i].g, bitmap[i].b); } fprintf(fp, "\n"); } } int main(int argc, char *argv[]) { if (argc != 2) { exit(-1); } FILE* fp = fopen(argv[1], "w"); std::srand((unsigned int)std::time(NULL)); thrust::host_vector<Color> image(DIM * DIM); thrust::host_vector<Sphere> spheres(SPHERES); thrust::generate(spheres.begin(), spheres.end(), generate_random_sphere); thrust::device_vector<Sphere> device_spheres = spheres; thrust::device_vector<int> offset_equence(DIM * DIM); thrust::device_vector<Color> bitmap(image.size()); thrust::sequence(offset_equence.begin(), offset_equence.end()); clock_t startTime = clock(); thrust::transform(offset_equence.cbegin(), offset_equence.cend(), bitmap.begin(), determine_color_functor(device_spheres.begin(), device_spheres.end())); hipDeviceSynchronize(); clock_t endTime = clock(); write_ppm(bitmap, DIM, DIM, fp); fclose(fp); double execute_time = (double)(endTime - startTime) / CLOCKS_PER_SEC; std::cout << "Thrust ray tracing: " << std::fixed << execute_time << " sec" << std::endl; return 0; }
b51d59add32414e34283295f2192e149e69b36e1.cu
#include <iostream> #include <ctime> #include <cstdlib> #include <cstdint> #include <cmath> #include <cuda.h> #include <cuda_runtime.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/transform_reduce.h> #include <thrust/pair.h> #define rnd( x ) (x * std::rand() / RAND_MAX) #define SPHERES 20 #define INF 2e10f #define DIM 2048 struct Sphere { float r, g, b; float radius; float x, y, z; __host__ __device__ Sphere() : r(0), g(0), b(0), radius(0), x(0), y(0), z(0) {} __host__ __device__ Sphere(const Sphere& s) { this->r = s.r; this->g = s.g; this->b = s.b; this->radius = s.radius; this->x = s.x; this->y = s.y; this->z = s.z; } __device__ float hit(const float& ox, const float& oy, float& n) const { float dx = ox - x; float dy = oy - y; if (dx * dx + dy * dy < radius * radius) { float dz = std::sqrt(radius * radius - dx * dx - dy * dy); n = dz / radius; return dz + z; } return -INF; } }; __host__ Sphere generate_random_sphere() { Sphere s = Sphere(); s.r = rnd(1.0f); s.g = rnd(1.0f); s.b = rnd(1.0f); s.x = rnd(2000.0f) - 1000; s.y = rnd(2000.0f) - 1000; s.z = rnd(2000.0f) - 1000; s.radius = rnd(200.0f) + 40; return s; } struct Color { uint8_t r, g, b, a; __host__ __device__ Color() : r(0), g(0), b(0), a(255) {} __host__ __device__ Color(uint8_t _r, uint8_t _g, uint8_t _b, uint8_t _a = 255) : r(_r), g(_g), b(_b), a(_a) {} __host__ __device__ Color(const Color& c) { this->r = c.r; this->g = c.g; this->b = c.b; this->a = c.a; } }; struct color_from_sphere : public thrust::unary_function<const Sphere&, thrust::pair<Color, float>> { int ox; int oy; __host__ __device__ color_from_sphere(const int& _ox, const int& _oy) : ox(_ox), oy(_oy) {} __device__ thrust::pair<Color, float> operator() (const Sphere& s) { float fscale; float t = s.hit(ox, oy, fscale); Color color(0, 0, 0, 255); if (t > -INF) { color.r = (uint8_t)(s.r * fscale * 255); color.g = (uint8_t)(s.g * fscale * 255); color.b = (uint8_t)(s.b * fscale * 255); } return thrust::make_pair<Color, float>(color, t); } }; struct get_color_max_distance : public thrust::binary_function<thrust::pair<Color, float>, thrust::pair<Color, float>, thrust::pair<Color, float>> { __device__ thrust::pair<Color, float> operator() (thrust::pair<Color, float> const& lhs, thrust::pair<Color, float> const& rhs) { if (lhs.second > rhs.second) { return lhs; } else { return rhs; } } }; struct determine_color_functor : public thrust::unary_function<const int&, Color> { thrust::device_vector<Sphere>::iterator start; thrust::device_vector<Sphere>::iterator end; determine_color_functor(thrust::device_vector<Sphere>::iterator _start, thrust::device_vector<Sphere>::iterator _end) : start(_start), end(_end) {} __device__ Color operator() (const int& offset) const { int x = offset % DIM; int y = (offset - x) / DIM; float ox = x - DIM / 2.0; float oy = y - DIM / 2.0; thrust::pair<Color, float> ray_color; ray_color.first = Color(0, 0, 0, 255); ray_color.second = -INF; // Spheres -> transform to Color, distance(T) -> reduce to Color thrust::pair<Color, float> result = thrust::transform_reduce(thrust::device, start, end, color_from_sphere(ox, oy), ray_color, get_color_max_distance()); return result.first; } }; void write_ppm(thrust::host_vector<Color> const& bitmap, const int& xdim, const int& ydim, FILE* fp) { fprintf(fp, "P3\n"); fprintf(fp, "%d %d\n", xdim, ydim); fprintf(fp, "255\n"); for (int y = 0; y < ydim; ++y) { for (int x = 0; x < xdim; ++x) { int i = x + y * xdim; fprintf(fp, "%d %d %d ", bitmap[i].r, bitmap[i].g, bitmap[i].b); } fprintf(fp, "\n"); } } int main(int argc, char *argv[]) { if (argc != 2) { exit(-1); } FILE* fp = fopen(argv[1], "w"); std::srand((unsigned int)std::time(NULL)); thrust::host_vector<Color> image(DIM * DIM); thrust::host_vector<Sphere> spheres(SPHERES); thrust::generate(spheres.begin(), spheres.end(), generate_random_sphere); thrust::device_vector<Sphere> device_spheres = spheres; thrust::device_vector<int> offset_equence(DIM * DIM); thrust::device_vector<Color> bitmap(image.size()); thrust::sequence(offset_equence.begin(), offset_equence.end()); clock_t startTime = clock(); thrust::transform(offset_equence.cbegin(), offset_equence.cend(), bitmap.begin(), determine_color_functor(device_spheres.begin(), device_spheres.end())); cudaThreadSynchronize(); clock_t endTime = clock(); write_ppm(bitmap, DIM, DIM, fp); fclose(fp); double execute_time = (double)(endTime - startTime) / CLOCKS_PER_SEC; std::cout << "Thrust ray tracing: " << std::fixed << execute_time << " sec" << std::endl; return 0; }
2200ca782c1f843f5cbe4007d136ac41ffbd94a4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "rinit.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *init = NULL; hipMalloc(&init, XSIZE*YSIZE); const unsigned int *fsum = NULL; hipMalloc(&fsum, XSIZE*YSIZE); const float *ncrs = NULL; hipMalloc(&ncrs, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( rinit), dim3(gridBlock),dim3(threadBlock), 0, 0, init,fsum,ncrs); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( rinit), dim3(gridBlock),dim3(threadBlock), 0, 0, init,fsum,ncrs); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( rinit), dim3(gridBlock),dim3(threadBlock), 0, 0, init,fsum,ncrs); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2200ca782c1f843f5cbe4007d136ac41ffbd94a4.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "rinit.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *init = NULL; cudaMalloc(&init, XSIZE*YSIZE); const unsigned int *fsum = NULL; cudaMalloc(&fsum, XSIZE*YSIZE); const float *ncrs = NULL; cudaMalloc(&ncrs, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); rinit<<<gridBlock,threadBlock>>>(init,fsum,ncrs); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { rinit<<<gridBlock,threadBlock>>>(init,fsum,ncrs); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { rinit<<<gridBlock,threadBlock>>>(init,fsum,ncrs); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
55cff511af993cddbebf52fa675c4daadeed6211.hip
// !!! This is a file automatically generated by hipify!!! #ifdef SPIRIT_USE_CUDA #include <engine/FFT.hpp> #include <hipfft.h> namespace Engine { namespace FFT { // Dont need the single transforms because cuFFT can do real batch transforms void Four_3D( FFT_cfg cfg, FFT_real_type * in, FFT_cpx_type * out ) { std::cerr << "NOT IMPLEMENTED FOR cuFFT" << std::endl; } void iFour_3D( FFT_cfg cfg, FFT_cpx_type * in, FFT_real_type * out ) { std::cerr << "NOT IMPLEMENTED FOR cuFFT" << std::endl; } void batch_Four_3D( FFT_Plan & plan ) { auto res = hipfftExecR2C( plan.cfg, plan.real_ptr.data(), plan.cpx_ptr.data() ); if( res != HIPFFT_SUCCESS ) { Log( Utility::Log_Level::Error, Utility::Log_Sender::All, fmt::format( "hipfftExecR2C failed with error: {}", res ) ); } hipDeviceSynchronize(); } void batch_iFour_3D( FFT_Plan & plan ) { auto res = hipfftExecC2R( plan.cfg, plan.cpx_ptr.data(), plan.real_ptr.data() ); if( res != HIPFFT_SUCCESS ) { Log( Utility::Log_Level::Error, Utility::Log_Sender::All, fmt::format( "hipfftExecC2R failed with error: {}", res ) ); } hipDeviceSynchronize(); } void FFT_Plan::Create_Configuration() { int rank = this->dims.size(); int * n = this->dims.data(); int n_transforms = this->n_transforms; int istride = n_transforms, ostride = n_transforms; int *inembed = n, *onembed = n; int size = 1; for( auto k : dims ) { size *= k; } int idist = 1, odist = 1; if( this->inverse == false ) { auto res = hipfftPlanMany( &this->cfg, rank, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_R2C, n_transforms ); if( res != HIPFFT_SUCCESS ) { Log( Utility::Log_Level::Error, Utility::Log_Sender::All, fmt::format( "hipfftPlanMany failed with error: {}", res ) ); } } else { auto res = hipfftPlanMany( &this->cfg, rank, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_C2R, n_transforms ); if( res != HIPFFT_SUCCESS ) { Log( Utility::Log_Level::Error, Utility::Log_Sender::All, fmt::format( "hipfftPlanMany failed with error: {}", res ) ); } } } void FFT_Plan::Free_Configuration() { auto res = hipfftDestroy( this->cfg ); if( res != HIPFFT_SUCCESS ) { Log( Utility::Log_Level::Error, Utility::Log_Sender::All, fmt::format( "hipfftDestroy failed with error: {}", res ) ); } } } // namespace FFT } // namespace Engine #endif
55cff511af993cddbebf52fa675c4daadeed6211.cu
#ifdef SPIRIT_USE_CUDA #include <engine/FFT.hpp> #include <cufft.h> namespace Engine { namespace FFT { // Dont need the single transforms because cuFFT can do real batch transforms void Four_3D( FFT_cfg cfg, FFT_real_type * in, FFT_cpx_type * out ) { std::cerr << "NOT IMPLEMENTED FOR cuFFT" << std::endl; } void iFour_3D( FFT_cfg cfg, FFT_cpx_type * in, FFT_real_type * out ) { std::cerr << "NOT IMPLEMENTED FOR cuFFT" << std::endl; } void batch_Four_3D( FFT_Plan & plan ) { auto res = cufftExecR2C( plan.cfg, plan.real_ptr.data(), plan.cpx_ptr.data() ); if( res != CUFFT_SUCCESS ) { Log( Utility::Log_Level::Error, Utility::Log_Sender::All, fmt::format( "cufftExecR2C failed with error: {}", res ) ); } cudaDeviceSynchronize(); } void batch_iFour_3D( FFT_Plan & plan ) { auto res = cufftExecC2R( plan.cfg, plan.cpx_ptr.data(), plan.real_ptr.data() ); if( res != CUFFT_SUCCESS ) { Log( Utility::Log_Level::Error, Utility::Log_Sender::All, fmt::format( "cufftExecC2R failed with error: {}", res ) ); } cudaDeviceSynchronize(); } void FFT_Plan::Create_Configuration() { int rank = this->dims.size(); int * n = this->dims.data(); int n_transforms = this->n_transforms; int istride = n_transforms, ostride = n_transforms; int *inembed = n, *onembed = n; int size = 1; for( auto k : dims ) { size *= k; } int idist = 1, odist = 1; if( this->inverse == false ) { auto res = cufftPlanMany( &this->cfg, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_R2C, n_transforms ); if( res != CUFFT_SUCCESS ) { Log( Utility::Log_Level::Error, Utility::Log_Sender::All, fmt::format( "cufftPlanMany failed with error: {}", res ) ); } } else { auto res = cufftPlanMany( &this->cfg, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2R, n_transforms ); if( res != CUFFT_SUCCESS ) { Log( Utility::Log_Level::Error, Utility::Log_Sender::All, fmt::format( "cufftPlanMany failed with error: {}", res ) ); } } } void FFT_Plan::Free_Configuration() { auto res = cufftDestroy( this->cfg ); if( res != CUFFT_SUCCESS ) { Log( Utility::Log_Level::Error, Utility::Log_Sender::All, fmt::format( "cufftDestroy failed with error: {}", res ) ); } } } // namespace FFT } // namespace Engine #endif
4144ce0de4753010ed300fc48f21a855f5e4ed95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * CovarianceMatrixKernels.cu * * Created on: 23/09/2014 * Author: vincentvillani */ #include "dsp/CovarianceMatrixKernels.h" __global__ void outerProductKernel(float* result, unsigned int resultLength, float* vec, unsigned int vecLength) { for(unsigned int absoluteThreadIdx = blockDim.x * blockIdx.x + threadIdx.x; absoluteThreadIdx < resultLength; absoluteThreadIdx += gridDim.x * blockDim.x) { unsigned int row = absoluteThreadIdx / vecLength; unsigned int col = absoluteThreadIdx % vecLength; if(row > col) { row = vecLength - row; col = row + col; } //compute the index int index = (row * vecLength + col) - ((row * (row + 1)) / 2); //do the outer product calculation and add it too the correct element result[index] += vec[row] * vec[col]; } } //(d_amps, ampsLength, d_hits, stokesLength) __global__ void meanStokesKernel(float* d_amps, unsigned int ampsLength, const unsigned int* d_hits, unsigned int stokesLength) { unsigned int absoluteThreadIdx = blockDim.x * blockIdx.x + threadIdx.x; if(absoluteThreadIdx >= ampsLength) return; unsigned int hitVal = d_hits[ absoluteThreadIdx / stokesLength ]; d_amps[absoluteThreadIdx] /= hitVal; } __global__ void applyScaleKernel(float* amps, unsigned int ampsLength, double scaleFactor) { unsigned int absoluteThreadIdx = blockDim.x * blockIdx.x + threadIdx.x; if(absoluteThreadIdx >= ampsLength) return; amps[absoluteThreadIdx] /= scaleFactor; } //----PHASE SERIES COMBINE STUFF---- //Kernel for generically adding things on the GPU __global__ void genericAddKernel(unsigned int n, float* original, const float* add) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < n; absIdx += gridDim.x * blockDim.x) { original[absIdx] += add[absIdx]; } } //Kernel for generically adding things on the GPU __global__ void genericAddKernel(uint64_t n, float* original, const float* add) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < n; absIdx += gridDim.x * blockDim.x) { original[absIdx] += add[absIdx]; } } //Kernel for generically adding things on the GPU __global__ void genericAddKernel(unsigned int n, unsigned int* original, const unsigned int* add) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < n; absIdx += gridDim.x * blockDim.x) { original[absIdx] += add[absIdx]; //printf("AddVal: %u\n", add[absIdx]); } } __global__ void genericSubtractionKernel(unsigned int n, float* original, const float* sub) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < n; absIdx += gridDim.x * blockDim.x) { original[absIdx] -= sub[absIdx]; } } __global__ void genericDivideKernel(unsigned int n, float* d_numerators, unsigned int denominator) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < n; absIdx += gridDim.x * blockDim.x) { d_numerators[absIdx] /= denominator; } } __global__ void checkForZeroesKernel(const unsigned int* d_hits, unsigned int hitsLength, bool* d_zeroes) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < hitsLength; absIdx += gridDim.x * blockDim.x) { if(d_hits[absIdx] == 0) { //printf("ZERO KERNEL VAL: %u\n", d_hits[absIdx]); *d_zeroes = true; } } }
4144ce0de4753010ed300fc48f21a855f5e4ed95.cu
/* * CovarianceMatrixKernels.cu * * Created on: 23/09/2014 * Author: vincentvillani */ #include "dsp/CovarianceMatrixKernels.h" __global__ void outerProductKernel(float* result, unsigned int resultLength, float* vec, unsigned int vecLength) { for(unsigned int absoluteThreadIdx = blockDim.x * blockIdx.x + threadIdx.x; absoluteThreadIdx < resultLength; absoluteThreadIdx += gridDim.x * blockDim.x) { unsigned int row = absoluteThreadIdx / vecLength; unsigned int col = absoluteThreadIdx % vecLength; if(row > col) { row = vecLength - row; col = row + col; } //compute the index int index = (row * vecLength + col) - ((row * (row + 1)) / 2); //do the outer product calculation and add it too the correct element result[index] += vec[row] * vec[col]; } } //(d_amps, ampsLength, d_hits, stokesLength) __global__ void meanStokesKernel(float* d_amps, unsigned int ampsLength, const unsigned int* d_hits, unsigned int stokesLength) { unsigned int absoluteThreadIdx = blockDim.x * blockIdx.x + threadIdx.x; if(absoluteThreadIdx >= ampsLength) return; unsigned int hitVal = d_hits[ absoluteThreadIdx / stokesLength ]; d_amps[absoluteThreadIdx] /= hitVal; } __global__ void applyScaleKernel(float* amps, unsigned int ampsLength, double scaleFactor) { unsigned int absoluteThreadIdx = blockDim.x * blockIdx.x + threadIdx.x; if(absoluteThreadIdx >= ampsLength) return; amps[absoluteThreadIdx] /= scaleFactor; } //----PHASE SERIES COMBINE STUFF---- //Kernel for generically adding things on the GPU __global__ void genericAddKernel(unsigned int n, float* original, const float* add) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < n; absIdx += gridDim.x * blockDim.x) { original[absIdx] += add[absIdx]; } } //Kernel for generically adding things on the GPU __global__ void genericAddKernel(uint64_t n, float* original, const float* add) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < n; absIdx += gridDim.x * blockDim.x) { original[absIdx] += add[absIdx]; } } //Kernel for generically adding things on the GPU __global__ void genericAddKernel(unsigned int n, unsigned int* original, const unsigned int* add) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < n; absIdx += gridDim.x * blockDim.x) { original[absIdx] += add[absIdx]; //printf("AddVal: %u\n", add[absIdx]); } } __global__ void genericSubtractionKernel(unsigned int n, float* original, const float* sub) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < n; absIdx += gridDim.x * blockDim.x) { original[absIdx] -= sub[absIdx]; } } __global__ void genericDivideKernel(unsigned int n, float* d_numerators, unsigned int denominator) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < n; absIdx += gridDim.x * blockDim.x) { d_numerators[absIdx] /= denominator; } } __global__ void checkForZeroesKernel(const unsigned int* d_hits, unsigned int hitsLength, bool* d_zeroes) { for(unsigned int absIdx = blockDim.x * blockIdx.x + threadIdx.x; absIdx < hitsLength; absIdx += gridDim.x * blockDim.x) { if(d_hits[absIdx] == 0) { //printf("ZERO KERNEL VAL: %u\n", d_hits[absIdx]); *d_zeroes = true; } } }
c8f0efb57897cadb85d35620f714935d0b00fa1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_initialise_chunk_kernel_yy; int xdim0_initialise_chunk_kernel_yy_h = -1; __constant__ int ydim0_initialise_chunk_kernel_yy; int ydim0_initialise_chunk_kernel_yy_h = -1; #undef OPS_ACC0 #define OPS_ACC0(x, y, z) \ (x + xdim0_initialise_chunk_kernel_yy * (y) + \ xdim0_initialise_chunk_kernel_yy * ydim0_initialise_chunk_kernel_yy * (z)) // user function __device__ void initialise_chunk_kernel_yy_gpu(int *yy, int *idx) { yy[OPS_ACC0(0, 0, 0)] = idx[1] - 2; } #undef OPS_ACC0 __global__ void ops_initialise_chunk_kernel_yy(int *__restrict arg0, int arg_idx0, int arg_idx1, int arg_idx2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int arg_idx[3]; arg_idx[0] = arg_idx0 + idx_x; arg_idx[1] = arg_idx1 + idx_y; arg_idx[2] = arg_idx2 + idx_z; arg0 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_yy + idx_z * 0 * 1 * xdim0_initialise_chunk_kernel_yy * ydim0_initialise_chunk_kernel_yy; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { initialise_chunk_kernel_yy_gpu(arg0, arg_idx); } } // host stub function void ops_par_loop_initialise_chunk_kernel_yy(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { // Timing double t1, t2, c1, c2; ops_arg args[2] = {arg0, arg1}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 2, range, 47)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(47, "initialise_chunk_kernel_yy"); OPS_kernels[47].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int arg_idx[3]; #ifdef OPS_MPI arg_idx[0] = sb->decomp_disp[0] + start[0]; arg_idx[1] = sb->decomp_disp[1] + start[1]; arg_idx[2] = sb->decomp_disp[2] + start[2]; #else arg_idx[0] = start[0]; arg_idx[1] = start[1]; arg_idx[2] = start[2]; #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; if (xdim0 != xdim0_initialise_chunk_kernel_yy_h || ydim0 != ydim0_initialise_chunk_kernel_yy_h) { hipMemcpyToSymbol(xdim0_initialise_chunk_kernel_yy, &xdim0, sizeof(int)); xdim0_initialise_chunk_kernel_yy_h = xdim0; hipMemcpyToSymbol(ydim0_initialise_chunk_kernel_yy, &ydim0, sizeof(int)); ydim0_initialise_chunk_kernel_yy_h = ydim0; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; char *p_a[2]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args, 2, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[47].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_initialise_chunk_kernel_yy), dim3(grid), dim3(tblock), 0, 0, (int *)p_a[0], arg_idx[0], arg_idx[1], arg_idx[2], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[47].time += t1 - t2; } ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[47].mpi_time += t2 - t1; OPS_kernels[47].transfer += ops_compute_transfer(dim, start, end, &arg0); } }
c8f0efb57897cadb85d35620f714935d0b00fa1e.cu
// // auto-generated by ops.py // __constant__ int xdim0_initialise_chunk_kernel_yy; int xdim0_initialise_chunk_kernel_yy_h = -1; __constant__ int ydim0_initialise_chunk_kernel_yy; int ydim0_initialise_chunk_kernel_yy_h = -1; #undef OPS_ACC0 #define OPS_ACC0(x, y, z) \ (x + xdim0_initialise_chunk_kernel_yy * (y) + \ xdim0_initialise_chunk_kernel_yy * ydim0_initialise_chunk_kernel_yy * (z)) // user function __device__ void initialise_chunk_kernel_yy_gpu(int *yy, int *idx) { yy[OPS_ACC0(0, 0, 0)] = idx[1] - 2; } #undef OPS_ACC0 __global__ void ops_initialise_chunk_kernel_yy(int *__restrict arg0, int arg_idx0, int arg_idx1, int arg_idx2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int arg_idx[3]; arg_idx[0] = arg_idx0 + idx_x; arg_idx[1] = arg_idx1 + idx_y; arg_idx[2] = arg_idx2 + idx_z; arg0 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_yy + idx_z * 0 * 1 * xdim0_initialise_chunk_kernel_yy * ydim0_initialise_chunk_kernel_yy; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { initialise_chunk_kernel_yy_gpu(arg0, arg_idx); } } // host stub function void ops_par_loop_initialise_chunk_kernel_yy(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { // Timing double t1, t2, c1, c2; ops_arg args[2] = {arg0, arg1}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 2, range, 47)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(47, "initialise_chunk_kernel_yy"); OPS_kernels[47].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int arg_idx[3]; #ifdef OPS_MPI arg_idx[0] = sb->decomp_disp[0] + start[0]; arg_idx[1] = sb->decomp_disp[1] + start[1]; arg_idx[2] = sb->decomp_disp[2] + start[2]; #else arg_idx[0] = start[0]; arg_idx[1] = start[1]; arg_idx[2] = start[2]; #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; if (xdim0 != xdim0_initialise_chunk_kernel_yy_h || ydim0 != ydim0_initialise_chunk_kernel_yy_h) { cudaMemcpyToSymbol(xdim0_initialise_chunk_kernel_yy, &xdim0, sizeof(int)); xdim0_initialise_chunk_kernel_yy_h = xdim0; cudaMemcpyToSymbol(ydim0_initialise_chunk_kernel_yy, &ydim0, sizeof(int)); ydim0_initialise_chunk_kernel_yy_h = ydim0; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; char *p_a[2]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args, 2, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[47].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_initialise_chunk_kernel_yy<<<grid, tblock>>>((int *)p_a[0], arg_idx[0], arg_idx[1], arg_idx[2], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[47].time += t1 - t2; } ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[47].mpi_time += t2 - t1; OPS_kernels[47].transfer += ops_compute_transfer(dim, start, end, &arg0); } }
240b601071d2370cd3facd3d98bbeb93bab8e081.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "SumaMatricesCU.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; hipMalloc(&A, XSIZE*YSIZE); int *B = NULL; hipMalloc(&B, XSIZE*YSIZE); int *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int width = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( SumaMatricesCU), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,width); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( SumaMatricesCU), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,width); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( SumaMatricesCU), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,width); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
240b601071d2370cd3facd3d98bbeb93bab8e081.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "SumaMatricesCU.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); int *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); int *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int width = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); SumaMatricesCU<<<gridBlock,threadBlock>>>(A,B,C,width); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { SumaMatricesCU<<<gridBlock,threadBlock>>>(A,B,C,width); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { SumaMatricesCU<<<gridBlock,threadBlock>>>(A,B,C,width); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e1a91b82fd60e16dac8020ccdef23ae78752930a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vecadd( int * v0, int * v1, std::size_t size ) { auto tid = threadIdx.x; v0[ tid ] += v1[ tid ]; }
e1a91b82fd60e16dac8020ccdef23ae78752930a.cu
#include "includes.h" __global__ void vecadd( int * v0, int * v1, std::size_t size ) { auto tid = threadIdx.x; v0[ tid ] += v1[ tid ]; }
88b17ab89fcbeb91a3b78666c6ca35223ded40a7.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <DllLoader.h> #include <CudaMathEngine.h> #include <CudaCommon.h> #include <CublasFunctions.h> #include <CusparseFunctions.h> #include <MathEngineCommon.h> #include <MemoryHandleInternal.h> #include <MathEngineDeviceStackAllocator.h> #include <MathEngineHostStackAllocator.h> #include <math.h> #include <float.h> #include <hip/hip_runtime.h> #include <CudaDevice.h> namespace NeoML { static __constant__ const float ZeroDev = 0; static __constant__ const float OneDev = 1; const float* CCudaConst::Zero; const float* CCudaConst::One; const int CudaMemoryAlignment = 4; //------------------------------------------------------------------------------------------------------------ CCudaMathEngine::CCudaMathEngine( const CCusparse* _cusparse, const CCublas* _cublas, std::unique_ptr<CCudaDevice>& _device ) : cusparse( _cusparse ), cublas( _cublas ), cudaStream( 0 ), cublasHandle( 0 ), cusparseHandle( 0 ) { device.swap( _device ); // CUDA ASSERT_EXPR( device != 0 ); ASSERT_ERROR_CODE( hipSetDevice( device->DeviceNumber ) ); // CUDA stream. ASSERT_ERROR_CODE( hipStreamCreate( &cudaStream ) ); // Cublas. ASSERT_ERROR_CODE( cublas->Create( &cublasHandle ) ); ASSERT_ERROR_CODE( cublas->SetAtomicsMode( cublasHandle, HIPBLAS_ATOMICS_ALLOWED ) ); ASSERT_ERROR_CODE( cublas->SetPointerMode( cublasHandle, HIPBLAS_POINTER_MODE_DEVICE ) ); ASSERT_ERROR_CODE( cublas->SetStream( cublasHandle, cudaStream ) ); // Cusparse. ASSERT_ERROR_CODE( cusparse->Create( &cusparseHandle ) ); ASSERT_ERROR_CODE( cusparse->SetStream( cusparseHandle, cudaStream ) ); // Constants ASSERT_ERROR_CODE( hipGetSymbolAddress((void**)&CCudaConst::Zero, ZeroDev) ); ASSERT_ERROR_CODE( hipGetSymbolAddress((void**)&CCudaConst::One, OneDev) ); memoryPool = std::unique_ptr<CMemoryPool>( new CMemoryPool( device->MemoryLimit, this, true ) ); deviceStackRunTime = std::unique_ptr<CDeviceStackAllocator>( new CDeviceStackAllocator( *memoryPool, CudaMemoryAlignment ) ); hostStackRunTime = std::unique_ptr<CHostStackAllocator>( new CHostStackAllocator( CudaMemoryAlignment ) ); CDllLoader::Load(CDllLoader::CUDA_DLL); } CCudaMathEngine::~CCudaMathEngine() { hostStackRunTime.reset(); deviceStackRunTime.reset(); memoryPool.reset(); hipStreamDestroy( cudaStream ); cusparse->Destroy( cusparseHandle ); cublas->Destroy( cublasHandle ); CDllLoader::Free(CDllLoader::CUDA_DLL); } /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// static inline void CudaFixGeom(int& minVal, int maxVal, unsigned int& geom) { if(minVal > maxVal) { minVal = maxVal; } if(minVal > (int)geom) { minVal = (int)geom; } if((int)geom > maxVal) { geom = maxVal; } } // The largest 2^N number smaller than this one (returns 1 for input 1) static inline int GetMax2ExpLess(int value) { const int startExp = 16; int expStep = startExp >> 1; int candidate = 1 << startExp; while(expStep > 0) { if(candidate >= value) { candidate >>= expStep; } else { candidate <<= expStep; } expStep >>= 1; } if(candidate >= value) { candidate >>= 1; } return candidate; } static inline void CudaFixMinVals(int& minZ, int& minY, int& minX, int maxThreadCount) { int nextMin = 0; while(minX * minY * minZ > maxThreadCount) { int candidate = nextMin++ % 3; switch(candidate) { case 0: minZ = GetMax2ExpLess(minZ); break; case 1: minY = GetMax2ExpLess(minY); break; case 2: minX = GetMax2ExpLess(minX); break; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// int CCudaMathEngine::alignXSizeForWarp(int xSize) { // Align the size so it is either large than warp or smaller or equal and could be presented as 2^N // Required for reduction with warps int candidate = device->WarpSize; if( xSize >= candidate ) { return ( ( xSize + candidate - 1 ) / candidate ) * candidate; } int next = candidate; do { candidate = next; next = next >> 1; } while(xSize <= next); return candidate; } void CCudaMathEngine::getCudaTaskGrid(int& blockCount, int& threadCount, int taskCount, int combineCount) { ASSERT_EXPR( taskCount > 0 ); ASSERT_EXPR( combineCount > 0 ); int runCount = (taskCount + combineCount - 1) / combineCount; threadCount = device->ThreadMaxCount; if(threadCount > runCount) { threadCount = runCount; } blockCount = (runCount + threadCount - 1) / threadCount; } void CCudaMathEngine::getCudaTaskGrid2D(dim3& blockCount, dim3& threadCount, int height, int width, int maxThreadCount) { getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, 1, height, width, maxThreadCount); } void CCudaMathEngine::getCudaTaskGrid3D(dim3& blockCount, dim3& threadCount, int batchSize, int height, int width, int maxThreadCount) { getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, batchSize, height, width, maxThreadCount); } void CCudaMathEngine::getCudaTaskGrid2DMinYX(int minY, int minX, dim3& blockCount, dim3& threadCount, int height, int width, int maxThreadCount) { getCudaTaskGrid3DMinZYX(1, minY, minX, blockCount, threadCount, 1, height, width, maxThreadCount); } void CCudaMathEngine::getCudaTaskGrid3DMinZYX(int minZ, int minY, int minX, dim3& blockCount, dim3& threadCount, int batchSize, int height, int width, int _maxThreadCount) { int maxThreadCount = min( device->ThreadMaxCount, static_cast<unsigned int>( _maxThreadCount ) ); ASSERT_EXPR(maxThreadCount >= 1); ASSERT_EXPR(minZ > 0 && minY > 0 && minX > 0); ASSERT_EXPR(batchSize > 0 && height > 0 && width > 0); dim3 geom( device->ThreadMax3DCountX, device->ThreadMax3DCountY, device->ThreadMax3DCountZ ); CudaFixGeom(minX, width, geom.x); CudaFixGeom(minY, height, geom.y); CudaFixGeom(minZ, batchSize, geom.z); CudaFixMinVals(minX, minY, minZ, maxThreadCount); unsigned int optimalGridSize = INT_MAX; threadCount = dim3(1, 1, 1); blockCount = dim3(width, height, batchSize); dim3 currentGeom; unsigned int zLimit = min(geom.z * 2, maxThreadCount + 1); for(currentGeom.z = minZ; currentGeom.z < zLimit; currentGeom.z *= 2) { unsigned int zBlock = min(currentGeom.z, geom.z); unsigned int zBlockCount = (batchSize + zBlock - 1) / zBlock; unsigned int xyMaxThreadCount = maxThreadCount / currentGeom.z; unsigned int yLimit = min(geom.y * 2, xyMaxThreadCount + 1); for(currentGeom.y = minY; currentGeom.y < yLimit; currentGeom.y *= 2) { currentGeom.x = xyMaxThreadCount / currentGeom.y; if((int)currentGeom.x < minX) { continue; } unsigned int yBlock = min(currentGeom.y, geom.y); unsigned int yBlockCount = (height + yBlock - 1) / yBlock; unsigned int xBlock = min(currentGeom.x, geom.x); unsigned int xBlockCount = (width + xBlock - 1) / xBlock; unsigned int gridSize = xBlockCount * yBlockCount * zBlockCount; if(gridSize < optimalGridSize) { optimalGridSize = gridSize; threadCount = dim3(xBlock, yBlock, zBlock); blockCount = dim3(xBlockCount, yBlockCount, zBlockCount); } } } } } // namespace NeoML #endif // NEOML_USE_CUDA
88b17ab89fcbeb91a3b78666c6ca35223ded40a7.cu
/* Copyright © 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <DllLoader.h> #include <CudaMathEngine.h> #include <CudaCommon.h> #include <CublasFunctions.h> #include <CusparseFunctions.h> #include <MathEngineCommon.h> #include <MemoryHandleInternal.h> #include <MathEngineDeviceStackAllocator.h> #include <MathEngineHostStackAllocator.h> #include <math.h> #include <float.h> #include <cuda_runtime.h> #include <CudaDevice.h> namespace NeoML { static __constant__ const float ZeroDev = 0; static __constant__ const float OneDev = 1; const float* CCudaConst::Zero; const float* CCudaConst::One; const int CudaMemoryAlignment = 4; //------------------------------------------------------------------------------------------------------------ CCudaMathEngine::CCudaMathEngine( const CCusparse* _cusparse, const CCublas* _cublas, std::unique_ptr<CCudaDevice>& _device ) : cusparse( _cusparse ), cublas( _cublas ), cudaStream( 0 ), cublasHandle( 0 ), cusparseHandle( 0 ) { device.swap( _device ); // CUDA ASSERT_EXPR( device != 0 ); ASSERT_ERROR_CODE( cudaSetDevice( device->DeviceNumber ) ); // CUDA stream. ASSERT_ERROR_CODE( cudaStreamCreate( &cudaStream ) ); // Cublas. ASSERT_ERROR_CODE( cublas->Create( &cublasHandle ) ); ASSERT_ERROR_CODE( cublas->SetAtomicsMode( cublasHandle, CUBLAS_ATOMICS_ALLOWED ) ); ASSERT_ERROR_CODE( cublas->SetPointerMode( cublasHandle, CUBLAS_POINTER_MODE_DEVICE ) ); ASSERT_ERROR_CODE( cublas->SetStream( cublasHandle, cudaStream ) ); // Cusparse. ASSERT_ERROR_CODE( cusparse->Create( &cusparseHandle ) ); ASSERT_ERROR_CODE( cusparse->SetStream( cusparseHandle, cudaStream ) ); // Constants ASSERT_ERROR_CODE( cudaGetSymbolAddress((void**)&CCudaConst::Zero, ZeroDev) ); ASSERT_ERROR_CODE( cudaGetSymbolAddress((void**)&CCudaConst::One, OneDev) ); memoryPool = std::unique_ptr<CMemoryPool>( new CMemoryPool( device->MemoryLimit, this, true ) ); deviceStackRunTime = std::unique_ptr<CDeviceStackAllocator>( new CDeviceStackAllocator( *memoryPool, CudaMemoryAlignment ) ); hostStackRunTime = std::unique_ptr<CHostStackAllocator>( new CHostStackAllocator( CudaMemoryAlignment ) ); CDllLoader::Load(CDllLoader::CUDA_DLL); } CCudaMathEngine::~CCudaMathEngine() { hostStackRunTime.reset(); deviceStackRunTime.reset(); memoryPool.reset(); cudaStreamDestroy( cudaStream ); cusparse->Destroy( cusparseHandle ); cublas->Destroy( cublasHandle ); CDllLoader::Free(CDllLoader::CUDA_DLL); } /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// static inline void CudaFixGeom(int& minVal, int maxVal, unsigned int& geom) { if(minVal > maxVal) { minVal = maxVal; } if(minVal > (int)geom) { minVal = (int)geom; } if((int)geom > maxVal) { geom = maxVal; } } // The largest 2^N number smaller than this one (returns 1 for input 1) static inline int GetMax2ExpLess(int value) { const int startExp = 16; int expStep = startExp >> 1; int candidate = 1 << startExp; while(expStep > 0) { if(candidate >= value) { candidate >>= expStep; } else { candidate <<= expStep; } expStep >>= 1; } if(candidate >= value) { candidate >>= 1; } return candidate; } static inline void CudaFixMinVals(int& minZ, int& minY, int& minX, int maxThreadCount) { int nextMin = 0; while(minX * minY * minZ > maxThreadCount) { int candidate = nextMin++ % 3; switch(candidate) { case 0: minZ = GetMax2ExpLess(minZ); break; case 1: minY = GetMax2ExpLess(minY); break; case 2: minX = GetMax2ExpLess(minX); break; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// int CCudaMathEngine::alignXSizeForWarp(int xSize) { // Align the size so it is either large than warp or smaller or equal and could be presented as 2^N // Required for reduction with warps int candidate = device->WarpSize; if( xSize >= candidate ) { return ( ( xSize + candidate - 1 ) / candidate ) * candidate; } int next = candidate; do { candidate = next; next = next >> 1; } while(xSize <= next); return candidate; } void CCudaMathEngine::getCudaTaskGrid(int& blockCount, int& threadCount, int taskCount, int combineCount) { ASSERT_EXPR( taskCount > 0 ); ASSERT_EXPR( combineCount > 0 ); int runCount = (taskCount + combineCount - 1) / combineCount; threadCount = device->ThreadMaxCount; if(threadCount > runCount) { threadCount = runCount; } blockCount = (runCount + threadCount - 1) / threadCount; } void CCudaMathEngine::getCudaTaskGrid2D(dim3& blockCount, dim3& threadCount, int height, int width, int maxThreadCount) { getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, 1, height, width, maxThreadCount); } void CCudaMathEngine::getCudaTaskGrid3D(dim3& blockCount, dim3& threadCount, int batchSize, int height, int width, int maxThreadCount) { getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, batchSize, height, width, maxThreadCount); } void CCudaMathEngine::getCudaTaskGrid2DMinYX(int minY, int minX, dim3& blockCount, dim3& threadCount, int height, int width, int maxThreadCount) { getCudaTaskGrid3DMinZYX(1, minY, minX, blockCount, threadCount, 1, height, width, maxThreadCount); } void CCudaMathEngine::getCudaTaskGrid3DMinZYX(int minZ, int minY, int minX, dim3& blockCount, dim3& threadCount, int batchSize, int height, int width, int _maxThreadCount) { int maxThreadCount = min( device->ThreadMaxCount, static_cast<unsigned int>( _maxThreadCount ) ); ASSERT_EXPR(maxThreadCount >= 1); ASSERT_EXPR(minZ > 0 && minY > 0 && minX > 0); ASSERT_EXPR(batchSize > 0 && height > 0 && width > 0); dim3 geom( device->ThreadMax3DCountX, device->ThreadMax3DCountY, device->ThreadMax3DCountZ ); CudaFixGeom(minX, width, geom.x); CudaFixGeom(minY, height, geom.y); CudaFixGeom(minZ, batchSize, geom.z); CudaFixMinVals(minX, minY, minZ, maxThreadCount); unsigned int optimalGridSize = INT_MAX; threadCount = dim3(1, 1, 1); blockCount = dim3(width, height, batchSize); dim3 currentGeom; unsigned int zLimit = min(geom.z * 2, maxThreadCount + 1); for(currentGeom.z = minZ; currentGeom.z < zLimit; currentGeom.z *= 2) { unsigned int zBlock = min(currentGeom.z, geom.z); unsigned int zBlockCount = (batchSize + zBlock - 1) / zBlock; unsigned int xyMaxThreadCount = maxThreadCount / currentGeom.z; unsigned int yLimit = min(geom.y * 2, xyMaxThreadCount + 1); for(currentGeom.y = minY; currentGeom.y < yLimit; currentGeom.y *= 2) { currentGeom.x = xyMaxThreadCount / currentGeom.y; if((int)currentGeom.x < minX) { continue; } unsigned int yBlock = min(currentGeom.y, geom.y); unsigned int yBlockCount = (height + yBlock - 1) / yBlock; unsigned int xBlock = min(currentGeom.x, geom.x); unsigned int xBlockCount = (width + xBlock - 1) / xBlock; unsigned int gridSize = xBlockCount * yBlockCount * zBlockCount; if(gridSize < optimalGridSize) { optimalGridSize = gridSize; threadCount = dim3(xBlock, yBlock, zBlock); blockCount = dim3(xBlockCount, yBlockCount, zBlockCount); } } } } } // namespace NeoML #endif // NEOML_USE_CUDA