hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
e2272fe501a35c913e2422e45a4cb43f8f103e60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
// x has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC)
// z has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimID;
__shared__ Nd4jLong zLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimID = isNCDHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 5;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
coords[dimID] /= factorD;
coords[dimID + 1] /= factorH;
coords[dimID + 2] /= factorW;
const auto xOffset = shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
hipLaunchKernelGGL(( upsampling3dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, factorD, factorH, factorW, isNCDHW);
}
//////////////////////////////////////////////////////////////////////////
ND4J_LOCAL void ConvolutionUtils::upsampling3d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
PointersManager manager(block.launchContext(), "upsampling3d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), upsampling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorD, factorH, factorW, isNCDHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
}
}
| e2272fe501a35c913e2422e45a4cb43f8f103e60.cu | /*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
// x has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC)
// z has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimID;
__shared__ Nd4jLong zLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimID = isNCDHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 5;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
coords[dimID] /= factorD;
coords[dimID + 1] /= factorH;
coords[dimID + 2] /= factorW;
const auto xOffset = shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
upsampling3dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, factorD, factorH, factorW, isNCDHW);
}
//////////////////////////////////////////////////////////////////////////
ND4J_LOCAL void ConvolutionUtils::upsampling3d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
PointersManager manager(block.launchContext(), "upsampling3d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), upsampling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorD, factorH, factorW, isNCDHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
}
}
|
760a85f606a2fdcdfd6f72c88303976bb7302a2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
__global__ void gaussianBlurKernel(const Byte* const __restrict__ input,
Byte* const __restrict__ output,
const size_t width,
const size_t height,
const float* const __restrict__ gaussianKernel)
{
//x and y maxs are width and height
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
Byte inputs[9];
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
inputs[0] = input[(y - 1) * width + (x - 1)];
inputs[1] = input[(y - 1) * width + x];
inputs[2] = input[(y - 1) * width + (x + 1)];
inputs[3] = input[y * width + (x - 1)];
inputs[4] = input[y * width + x];
inputs[5] = input[y * width + (x + 1)];
inputs[6] = input[(y + 1) * width + (x - 1)];
inputs[7] = input[(y + 1) * width + x];
inputs[8] = input[(y + 1) * width + (x + 1)];
unsigned int tempValue = 0;
for (unsigned int it = 0; it < 9; ++it)
tempValue += inputs[it] * gaussianKernel[it];
output[y * width + x] = (tempValue > 255)?255:tempValue;
}
else
output[y * width + x] = 255;
};
void gaussianBlurCuda(const Byte* const input,
Byte* const output,
const size_t width,
const size_t height,
const float* const gaussianKernel)
{
Byte* cudaInput;
Byte* cudaOutput;
float* cudaKernel;
hipMalloc(reinterpret_cast<void**>(&cudaInput), width * height * sizeof(Byte));
hipMalloc(reinterpret_cast<void**>(&cudaOutput), width * height * sizeof(Byte));
hipMalloc(reinterpret_cast<void**>(&cudaKernel), 9 * sizeof(float));
hipMemcpy(cudaInput, input, width * height * sizeof(Byte), hipMemcpyHostToDevice);
hipMemcpy(cudaKernel, gaussianKernel, 9 * sizeof(float), hipMemcpyHostToDevice);
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
hipLaunchKernelGGL(( gaussianBlurKernel), dim3(gridSize), dim3(blockSize), 0, 0, cudaInput,
cudaOutput,
width,
height,
cudaKernel);
hipDeviceSynchronize();
hipMemcpy(output, cudaOutput, width * height * sizeof(Byte), hipMemcpyDeviceToHost);
hipFree(cudaInput);
hipFree(cudaOutput);
hipFree(cudaKernel);
}
| 760a85f606a2fdcdfd6f72c88303976bb7302a2d.cu |
#include "kernel.h"
__global__ void gaussianBlurKernel(const Byte* const __restrict__ input,
Byte* const __restrict__ output,
const size_t width,
const size_t height,
const float* const __restrict__ gaussianKernel)
{
//x and y maxs are width and height
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
Byte inputs[9];
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
inputs[0] = input[(y - 1) * width + (x - 1)];
inputs[1] = input[(y - 1) * width + x];
inputs[2] = input[(y - 1) * width + (x + 1)];
inputs[3] = input[y * width + (x - 1)];
inputs[4] = input[y * width + x];
inputs[5] = input[y * width + (x + 1)];
inputs[6] = input[(y + 1) * width + (x - 1)];
inputs[7] = input[(y + 1) * width + x];
inputs[8] = input[(y + 1) * width + (x + 1)];
unsigned int tempValue = 0;
for (unsigned int it = 0; it < 9; ++it)
tempValue += inputs[it] * gaussianKernel[it];
output[y * width + x] = (tempValue > 255)?255:tempValue;
}
else
output[y * width + x] = 255;
};
void gaussianBlurCuda(const Byte* const input,
Byte* const output,
const size_t width,
const size_t height,
const float* const gaussianKernel)
{
Byte* cudaInput;
Byte* cudaOutput;
float* cudaKernel;
cudaMalloc(reinterpret_cast<void**>(&cudaInput), width * height * sizeof(Byte));
cudaMalloc(reinterpret_cast<void**>(&cudaOutput), width * height * sizeof(Byte));
cudaMalloc(reinterpret_cast<void**>(&cudaKernel), 9 * sizeof(float));
cudaMemcpy(cudaInput, input, width * height * sizeof(Byte), cudaMemcpyHostToDevice);
cudaMemcpy(cudaKernel, gaussianKernel, 9 * sizeof(float), cudaMemcpyHostToDevice);
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
gaussianBlurKernel<<<gridSize, blockSize>>>(cudaInput,
cudaOutput,
width,
height,
cudaKernel);
cudaDeviceSynchronize();
cudaMemcpy(output, cudaOutput, width * height * sizeof(Byte), cudaMemcpyDeviceToHost);
cudaFree(cudaInput);
cudaFree(cudaOutput);
cudaFree(cudaKernel);
}
|
e54db1dca0e064a65965831a24752a62a968ea3a.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, scalar_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data_ptr<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
constexpr int64_t batch_limit = 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = ::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = ::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
ALLOCATE_ARRAY(rwork, value_t, lrwork);
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<scalar_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto k = ::min(m, n);
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * k);
magmaSvd<scalar_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, iwork, &info);
lwork = magma_int_cast(wkopt, "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
scalar_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "svd_cuda", [&]{
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| e54db1dca0e064a65965831a24752a62a968ea3a.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, scalar_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data_ptr<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
constexpr int64_t batch_limit = 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = std::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = std::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
ALLOCATE_ARRAY(rwork, value_t, lrwork);
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<scalar_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto k = std::min(m, n);
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * k);
magmaSvd<scalar_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, iwork, &info);
lwork = magma_int_cast(wkopt, "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
scalar_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "svd_cuda", [&]{
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
d8523be47289836d1d3b97f1e601ff5201367b76.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "computePointHessian2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
float *z = NULL;
hipMalloc(&z, XSIZE*YSIZE);
int points_num = 1;
int *valid_points = NULL;
hipMalloc(&valid_points, XSIZE*YSIZE);
int valid_points_num = 1;
double *dh_ang = NULL;
hipMalloc(&dh_ang, XSIZE*YSIZE);
double *ph155 = NULL;
hipMalloc(&ph155, XSIZE*YSIZE);
double *ph165 = NULL;
hipMalloc(&ph165, XSIZE*YSIZE);
double *ph175 = NULL;
hipMalloc(&ph175, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
computePointHessian2), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,points_num,valid_points,valid_points_num,dh_ang,ph155,ph165,ph175);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
computePointHessian2), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,points_num,valid_points,valid_points_num,dh_ang,ph155,ph165,ph175);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
computePointHessian2), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,points_num,valid_points,valid_points_num,dh_ang,ph155,ph165,ph175);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d8523be47289836d1d3b97f1e601ff5201367b76.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "computePointHessian2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
float *z = NULL;
cudaMalloc(&z, XSIZE*YSIZE);
int points_num = 1;
int *valid_points = NULL;
cudaMalloc(&valid_points, XSIZE*YSIZE);
int valid_points_num = 1;
double *dh_ang = NULL;
cudaMalloc(&dh_ang, XSIZE*YSIZE);
double *ph155 = NULL;
cudaMalloc(&ph155, XSIZE*YSIZE);
double *ph165 = NULL;
cudaMalloc(&ph165, XSIZE*YSIZE);
double *ph175 = NULL;
cudaMalloc(&ph175, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
computePointHessian2<<<gridBlock,threadBlock>>>(x,y,z,points_num,valid_points,valid_points_num,dh_ang,ph155,ph165,ph175);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
computePointHessian2<<<gridBlock,threadBlock>>>(x,y,z,points_num,valid_points,valid_points_num,dh_ang,ph155,ph165,ph175);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
computePointHessian2<<<gridBlock,threadBlock>>>(x,y,z,points_num,valid_points,valid_points_num,dh_ang,ph155,ph165,ph175);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9292da85394fdeb69375bbb4503fc3c3778fab5c.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2014 BVLC and contributors.
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <leveldb/db.h>
#include <pthread.h>
#include <string>
#include <vector>
#include <iostream> // NOLINT(readability/streams)
#include <fstream> // NOLINT(readability/streams)
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/vision_layers.hpp"
using std::string;
using std::pair;
namespace caffe {
template <typename Dtype>
Dtype ImageDataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
// First, join the thread
JoinPrefetchThread();
// Copy the data
caffe_copy(prefetch_data_->count(), prefetch_data_->cpu_data(),
(*top)[0]->mutable_gpu_data());
caffe_copy(prefetch_label_->count(), prefetch_label_->cpu_data(),
(*top)[1]->mutable_gpu_data());
// Start a new prefetch thread
CreatePrefetchThread();
return Dtype(0.);
}
INSTANTIATE_CLASS(ImageDataLayer);
} // namespace caffe
| 9292da85394fdeb69375bbb4503fc3c3778fab5c.cu | // Copyright 2014 BVLC and contributors.
#include <cuda_runtime.h>
#include <stdint.h>
#include <leveldb/db.h>
#include <pthread.h>
#include <string>
#include <vector>
#include <iostream> // NOLINT(readability/streams)
#include <fstream> // NOLINT(readability/streams)
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/vision_layers.hpp"
using std::string;
using std::pair;
namespace caffe {
template <typename Dtype>
Dtype ImageDataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
// First, join the thread
JoinPrefetchThread();
// Copy the data
caffe_copy(prefetch_data_->count(), prefetch_data_->cpu_data(),
(*top)[0]->mutable_gpu_data());
caffe_copy(prefetch_label_->count(), prefetch_label_->cpu_data(),
(*top)[1]->mutable_gpu_data());
// Start a new prefetch thread
CreatePrefetchThread();
return Dtype(0.);
}
INSTANTIATE_CLASS(ImageDataLayer);
} // namespace caffe
|
2ff1ca4165bfdfbac475ad657369fa110a65cf3c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include "cuda_vector_routines.h"
#include "atom_class.h"
#include "config_class.h"
#include "leapfrog_cuda.h"
//#include "constants_cuda.cuh"
#include "constants.h"
//Fast integer multiplication
#define MUL(a, b) __umul24(a, b)
// CUDA Kernels
/*__device__ void thermo_kernel( float v*, float T, float mass, int atom, int block) {
float rang;
hiprandState_t state;
hiprand_init(0,blockIdx.x,atom,&state);
rang = hiprand_normal(&state);
v[0] = rang*sqrtf(T/mass);
rang = hiprand_normal(&state);
v[1] = rang*sqrtf(T/mass);
rang = hiprand_normal(&state);
v[2] = rang*sqrtf(T/mass);
}*/
__global__ void leapfrog_kernel(float4 *xyz, float4 *v, float4 *f, float T, float dt, float pnu, int nAtoms, float lbox, hiprandState_t *state) {
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
float attempt;
float4 force;
float4 tempPos;
float4 tempVel;
//hiprandState_t state;
if (index < nAtoms)
{
// generate random number
attempt = hiprand_uniform(&state[index]);
// load values
force = __ldg(f+index);
tempVel = __ldg(v+index);
tempPos = __ldg(xyz+index);
//printf("index: %d mass: %f\n",index,tempVel.w);
// anderson thermostat
if (attempt < pnu) {
tempVel.x = hiprand_normal(&state[index]) * sqrtf( T / tempVel.w );
tempVel.y = hiprand_normal(&state[index]) * sqrtf( T / tempVel.w );
tempVel.z = hiprand_normal(&state[index]) * sqrtf( T / tempVel.w );
tempVel += force *__fdividef(dt,tempVel.w*2.0f);
tempPos += tempVel*dt;
} else {
tempVel += force * __fdividef(dt,tempVel.w);
tempPos += tempVel*dt;
}
// save new velocities and positions to global memory
v[index] = tempVel;
//xyz[index] = wrap(tempPos,lbox);
xyz[index] = tempPos;
}
}
/* C wrappers for kernels */
//extern "C" void leapfrog_cuda(float *xyz_d, float *v_d, float *f_d, float *mass_d, float T, float dt, float pnu, int nAtoms, float lbox, hiprandState_t *randStates_d) {
float leapfrog_cuda(atom& atoms, config& configs)
{
float milliseconds;
// timing
hipEventRecord(atoms.leapFrogStart);
// run nonbond cuda kernel
hipLaunchKernelGGL(( leapfrog_kernel), dim3(atoms.gridSize), dim3(atoms.blockSize), 0, 0, atoms.pos_d, atoms.vel_d, atoms.for_d, configs.T, configs.dt, configs.pnu, atoms.nAtoms, configs.lbox, atoms.randStates_d);
// finalize timing
hipEventRecord(atoms.leapFrogStop);
hipEventSynchronize(atoms.leapFrogStop);
hipEventElapsedTime(&milliseconds, atoms.leapFrogStart, atoms.leapFrogStop);
return milliseconds;
}
extern "C" void leapfrog_cuda_grid_block(int nAtoms, int *gridSize, int *blockSize, int *minGridSize)
{
// determine gridSize and blockSize
hipOccupancyMaxPotentialBlockSize(minGridSize, blockSize, leapfrog_kernel, 0, nAtoms);
// Round up according to array size
*gridSize = (nAtoms + *blockSize - 1) / *blockSize;
}
| 2ff1ca4165bfdfbac475ad657369fa110a65cf3c.cu |
#include <stdio.h>
#include <stdlib.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cuda.h>
#include "cuda_vector_routines.h"
#include "atom_class.h"
#include "config_class.h"
#include "leapfrog_cuda.h"
//#include "constants_cuda.cuh"
#include "constants.h"
//Fast integer multiplication
#define MUL(a, b) __umul24(a, b)
// CUDA Kernels
/*__device__ void thermo_kernel( float v*, float T, float mass, int atom, int block) {
float rang;
curandState_t state;
curand_init(0,blockIdx.x,atom,&state);
rang = curand_normal(&state);
v[0] = rang*sqrtf(T/mass);
rang = curand_normal(&state);
v[1] = rang*sqrtf(T/mass);
rang = curand_normal(&state);
v[2] = rang*sqrtf(T/mass);
}*/
__global__ void leapfrog_kernel(float4 *xyz, float4 *v, float4 *f, float T, float dt, float pnu, int nAtoms, float lbox, curandState *state) {
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
float attempt;
float4 force;
float4 tempPos;
float4 tempVel;
//curandState_t state;
if (index < nAtoms)
{
// generate random number
attempt = curand_uniform(&state[index]);
// load values
force = __ldg(f+index);
tempVel = __ldg(v+index);
tempPos = __ldg(xyz+index);
//printf("index: %d mass: %f\n",index,tempVel.w);
// anderson thermostat
if (attempt < pnu) {
tempVel.x = curand_normal(&state[index]) * sqrtf( T / tempVel.w );
tempVel.y = curand_normal(&state[index]) * sqrtf( T / tempVel.w );
tempVel.z = curand_normal(&state[index]) * sqrtf( T / tempVel.w );
tempVel += force *__fdividef(dt,tempVel.w*2.0f);
tempPos += tempVel*dt;
} else {
tempVel += force * __fdividef(dt,tempVel.w);
tempPos += tempVel*dt;
}
// save new velocities and positions to global memory
v[index] = tempVel;
//xyz[index] = wrap(tempPos,lbox);
xyz[index] = tempPos;
}
}
/* C wrappers for kernels */
//extern "C" void leapfrog_cuda(float *xyz_d, float *v_d, float *f_d, float *mass_d, float T, float dt, float pnu, int nAtoms, float lbox, curandState *randStates_d) {
float leapfrog_cuda(atom& atoms, config& configs)
{
float milliseconds;
// timing
cudaEventRecord(atoms.leapFrogStart);
// run nonbond cuda kernel
leapfrog_kernel<<<atoms.gridSize, atoms.blockSize>>>(atoms.pos_d, atoms.vel_d, atoms.for_d, configs.T, configs.dt, configs.pnu, atoms.nAtoms, configs.lbox, atoms.randStates_d);
// finalize timing
cudaEventRecord(atoms.leapFrogStop);
cudaEventSynchronize(atoms.leapFrogStop);
cudaEventElapsedTime(&milliseconds, atoms.leapFrogStart, atoms.leapFrogStop);
return milliseconds;
}
extern "C" void leapfrog_cuda_grid_block(int nAtoms, int *gridSize, int *blockSize, int *minGridSize)
{
// determine gridSize and blockSize
cudaOccupancyMaxPotentialBlockSize(minGridSize, blockSize, leapfrog_kernel, 0, nAtoms);
// Round up according to array size
*gridSize = (nAtoms + *blockSize - 1) / *blockSize;
}
|
dc75ef37689d5833442a4778cdcb1b84fc5d2e3c.hip | // !!! This is a file automatically generated by hipify!!!
// -----------------------------------------------------------------
// Simple cuda ray tracing tutorial
// Written by Peter Trier
// Alexandra Institute august 2009
//
//
// -----------------------------------------------------------------
// includes --------------------------------------
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#pragma warning(disable:4996)
#endif
// includes, GL
#include <GL/glew.h>
#include <GL/glut.h>
// cuda includes
#include <hip/hip_runtime_api.h>
#include <cutil_inline.h>
#include <cutil_gl_inline.h>
#include <cutil_gl_error.h>
#include <cuda_gl_interop.h>
#include <hip/hip_vector_types.h>
#include <vector_functions.h>
#include <cutil_math.h>
// std
#include <AntTweakBar.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
using namespace std;
// the interface between C++ and CUDA -----------
// the implementation of RayTraceImage is in the
// "raytracer.cu" file
extern "C" void RayTraceImage(unsigned int *pbo_out, int w, int h, int number_of_triangles,
float3 a, float3 b, float3 c,
float3 campos,
float3 light_pos,
float3 light_color,
float3 scene_aabbox_min , float3 scene_aabbox_max);
// a method for binding the loaded triangles to a Cuda texture
// the implementation of bindTriangles is in the
// "raytracer.cu" file
extern "C" void bindTriangles(float *dev_triangle_p, unsigned int number_of_triangles);
// Obj loader ------------------------------------
struct TriangleFace
{
int v[3]; // vertex indices
};
struct TriangleMesh
{
vector<float3> verts;
vector<TriangleFace> faces;
float3 bounding_box[2];
};
// Globals ---------------------------------------
unsigned int window_width = 800;
unsigned int window_height = 600;
unsigned int image_width = 800;
unsigned int image_height = 600;
float delta_t = 0;
GLuint pbo; // this pbo is used to connect CUDA and openGL
GLuint result_texture; // the ray-tracing result is copied to this openGL texture
TriangleMesh mesh;
TriangleMesh ground;
TriangleMesh sphere;
TriangleMesh object;
int total_number_of_triangles = 0;
float *dev_triangle_p; // the cuda device pointer that points to the uploaded triangles
// Camera parameters -----------------------------
float3 a; float3 b; float3 c;
float3 campos;
float camera_rotation = 0;
float camera_distance = 75;
float camera_height = 25;
bool animate = true;
// Scene bounding box ----------------------------
float3 scene_aabbox_min;
float3 scene_aabbox_max;
float light_x = -23;
float light_y = 25;
float light_z = 3;
float light_color[3] = {1,1,1};
// mouse controls --------------------------------
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
bool left_down = false;
bool right_down = false;
// function declaration --------------------------
bool initGL();
bool initCUDA( int argc, char **argv);
void initCUDAmemory();
void loadObj(const std::string filename, TriangleMesh &mesh);
void Terminate(void);
void initTweakMenus();
void display();
void reshape(int width, int height);
void keyboard(unsigned char key, int x, int y);
void KeyboardUpCallback(unsigned char key, int x, int y);
void SpecialKey(int key, int x, int y);
void mouse(int button, int state, int x, int y);
void motion(int x, int y);
void rayTrace();
TwBar *bar; // Pointer to the tweak bar
void initTweakMenus()
{
if( !TwInit(TW_OPENGL, NULL) )
{
// A fatal error occurred
fprintf(stderr, "AntTweakBar initialization failed: %s\n", TwGetLastError());
exit(0);
}
bar = TwNewBar("Parameters");
TwAddVarRW(bar, "camera rotation", TW_TYPE_FLOAT, &camera_rotation,
" min=-5.0 max=5.0 step=0.01 group='Camera'");
TwAddVarRW(bar, "camera distance", TW_TYPE_FLOAT, &camera_distance,
" min= 1.0 max=125.0 step=0.1 group='Camera'");
TwAddVarRW(bar, "camera height", TW_TYPE_FLOAT, &camera_height,
" min= -35.0 max= 100.0 step=0.1 group='Camera'");
TwAddVarRW(bar, "light_pos_x", TW_TYPE_FLOAT, &light_x,
" min= -100.0 max= 100.0 step=0.1 group='Light_source'");
TwAddVarRW(bar, "light_pos_y", TW_TYPE_FLOAT, &light_y,
" min= -100.0 max= 100.0 step=0.1 group='Light_source'");
TwAddVarRW(bar, "light_pos_z", TW_TYPE_FLOAT, &light_z,
" min= -100.0 max= 100.0 step=0.1 group='Light_source'");
TwAddVarRW(bar,"light_color",TW_TYPE_COLOR3F, &light_color, " group='Light_source' ");
}
// Function called at exit
void Terminate(void)
{
TwTerminate();
}
bool initGL()
{
glewInit();
if (! glewIsSupported
(
"GL_VERSION_2_0 "
"GL_ARB_pixel_buffer_object "
"GL_EXT_framebuffer_object "
))
{
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
return CUTFalse;
}
// init openGL state
glClearColor(0, 0, 0, 1.0);
glDisable(GL_DEPTH_TEST);
// view-port
glViewport(0, 0, window_width, window_height);
initTweakMenus();
return true;
}
bool initCUDA( int argc, char **argv)
{
if ( cutCheckCmdLineFlag(argc, (const char **)argv, "device"))
{
cutilGLDeviceInit(argc, argv);
}
else
{
hipGLSetGLDevice (cutGetMaxGflopsDeviceId() );
}
return true;
}
void initCUDAmemory()
{
// initialize the PBO for transferring data from CUDA to openGL
unsigned int num_texels = image_width * image_height;
unsigned int size_tex_data = sizeof(GLubyte) * num_texels * 4;
void *data = malloc(size_tex_data);
// create buffer object
glGenBuffers(1, &pbo);
glBindBuffer(GL_ARRAY_BUFFER, pbo);
glBufferData(GL_ARRAY_BUFFER, size_tex_data, data, GL_DYNAMIC_DRAW);
free(data);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// register this buffer object with CUDA
cutilSafeCall(hipGLRegisterBufferObject(pbo));
CUT_CHECK_ERROR_GL();
// create the texture that we use to visualize the ray-tracing result
glGenTextures(1, &result_texture);
glBindTexture(GL_TEXTURE_2D, result_texture);
// set basic parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// buffer data
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image_width, image_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
CUT_CHECK_ERROR_GL();
// next we load a simple obj file and upload the triangles to an 1D texture.
loadObj("data/cube.obj",mesh);
loadObj("data/sphere.obj",sphere);
vector<float4> triangles;
for(unsigned int i = 0; i < mesh.faces.size(); i++)
{
float3 v0 = mesh.verts[mesh.faces[i].v[0]-1];
float3 v1 = mesh.verts[mesh.faces[i].v[1]-1];
float3 v2 = mesh.verts[mesh.faces[i].v[2]-1];
triangles.push_back(make_float4(v0.x,v0.y,v0.z,0));
triangles.push_back(make_float4(v1.x-v0.x, v1.y-v0.y, v1.z-v0.z,0)); // notice we store the edges instead of vertex points, to save some calculations in the
triangles.push_back(make_float4(v2.x-v0.x, v2.y-v0.y, v2.z-v0.z,0)); // ray triangle intersection test.
}
for(unsigned int i = 0; i < sphere.faces.size(); i++)
{
float3 v0 = sphere.verts[sphere.faces[i].v[0]-1];
float3 v1 = sphere.verts[sphere.faces[i].v[1]-1];
float3 v2 = sphere.verts[sphere.faces[i].v[2]-1];
triangles.push_back(make_float4(v0.x,v0.y,v0.z,0));
triangles.push_back(make_float4(v1.x-v0.x, v1.y-v0.y, v1.z-v0.z,1)); // notice we store the edges instead of vertex points, to save some calculations in the
triangles.push_back(make_float4(v2.x-v0.x, v2.y-v0.y, v2.z-v0.z,0)); // ray triangle intersection test.
}
cout << "total number of triangles check:" << mesh.faces.size() + sphere.faces.size() << " == " << triangles.size()/3 << endl;
size_t triangle_size = triangles.size() * sizeof(float4);
total_number_of_triangles = triangles.size()/3;
if(triangle_size > 0)
{
cutilSafeCall( hipMalloc((void **)&dev_triangle_p, triangle_size));
hipMemcpy(dev_triangle_p,&triangles[0],triangle_size,hipMemcpyHostToDevice);
bindTriangles(dev_triangle_p, total_number_of_triangles);
}
scene_aabbox_min = mesh.bounding_box[0];
scene_aabbox_max = mesh.bounding_box[1];
scene_aabbox_min.x = min(scene_aabbox_min.x,sphere.bounding_box[0].x);
scene_aabbox_min.y = min(scene_aabbox_min.y,sphere.bounding_box[0].y);
scene_aabbox_min.z = min(scene_aabbox_min.z,sphere.bounding_box[0].z);
scene_aabbox_max.x = max(scene_aabbox_max.x,sphere.bounding_box[1].x);
scene_aabbox_max.y = max(scene_aabbox_max.y,sphere.bounding_box[1].y);
scene_aabbox_max.z = max(scene_aabbox_max.z,sphere.bounding_box[1].z);
}
// Callback function called by GLUT when window size changes
void reshape(int width, int height)
{
// Set OpenGL view port and camera
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0f, (double)width/height, 0.1, 100);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// Send the new window size to AntTweakBar
TwWindowSize(width, height);
}
void SpecialKey(int key, int x, int y)
{
switch(key)
{
case GLUT_KEY_F1:
break;
};
}
void updateCamera()
{
campos = make_float3(cos(camera_rotation)*camera_distance,camera_height,-sin(camera_rotation)*camera_distance);
float3 cam_dir = -campos;
cam_dir = normalize(cam_dir);
float3 cam_up = make_float3(0,1,0);
float3 cam_right = cross(cam_dir,cam_up);
cam_right = normalize(cam_right);
cam_up = -cross(cam_dir,cam_right);
cam_up = normalize(cam_up);
float FOV = 60.0f;
float theta = (FOV*3.1415*0.5) / 180.0f;
float half_width = tanf(theta);
float aspect = (float)image_width / (float)image_height;
float u0 = -half_width * aspect;
float v0 = -half_width;
float u1 = half_width * aspect;
float v1 = half_width;
float dist_to_image = 1;
a = (u1-u0)*cam_right;
b = (v1-v0)*cam_up;
c = campos + u0*cam_right + v0*cam_up + dist_to_image*cam_dir;
if(animate)
camera_rotation += 0.25 * delta_t;
}
void rayTrace()
{
unsigned int* out_data;
cutilSafeCall(hipGLMapBufferObject__( (void**)&out_data, pbo));
RayTraceImage(out_data, image_width, image_height,total_number_of_triangles,
a, b, c,
campos,
make_float3(light_x,light_y,light_z),
make_float3(light_color[0],light_color[1],light_color[2]),
scene_aabbox_min , scene_aabbox_max);
cutilSafeCall(hipGLUnmapBufferObject( pbo));
// download texture from destination PBO
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBindTexture(GL_TEXTURE_2D, result_texture);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, image_width, image_height, GL_BGRA, GL_UNSIGNED_BYTE, NULL);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
CUT_CHECK_ERROR_GL();
}
// display image to the screen as textured quad
void displayTexture()
{
// render a screen sized quad
glDisable(GL_DEPTH_TEST);
glDisable(GL_LIGHTING);
glEnable(GL_TEXTURE_2D);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0);
glMatrixMode( GL_MODELVIEW);
glLoadIdentity();
glViewport(0, 0, window_width, window_height);
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 0.5);
glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, 0.5);
glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 0.5);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 0.5);
glEnd();
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glDisable(GL_TEXTURE_2D);
CUT_CHECK_ERROR_GL();
}
void display()
{
//update the delta time for animation
static int lastFrameTime = 0;
if (lastFrameTime == 0)
{
lastFrameTime = glutGet(GLUT_ELAPSED_TIME);
}
int now = glutGet(GLUT_ELAPSED_TIME);
int elapsedMilliseconds = now - lastFrameTime;
delta_t = elapsedMilliseconds / 1000.0f;
lastFrameTime = now;
updateCamera();
glClearColor(0,0,0,0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
rayTrace();
displayTexture();
TwDraw();
glutSwapBuffers();
glutPostRedisplay();
}
void keyboard(unsigned char key, int /*x*/, int /*y*/)
{
switch(key)
{
case ' ':
animate = !animate;
break;
case(27) :
Terminate();
exit(0);
}
}
void KeyboardUpCallback(unsigned char key, int x, int y)
{
if(TwEventKeyboardGLUT(key,x, y))
{
return;
}
}
int main(int argc, char** argv)
{
// Create GL context
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width,window_height);
glutCreateWindow("Alexandra Institute ray-tracing tutorial");
// initialize GL
if(CUTFalse == initGL())
{
return 0;
}
// initialize CUDA
if(CUTFalse == initCUDA(argc,argv))
{
return 0;
}
initCUDAmemory();
// register callbacks
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutKeyboardUpFunc(KeyboardUpCallback);
glutSpecialUpFunc(SpecialKey);
glutReshapeFunc(reshape);
// - Directly redirect GLUT mouse button events to AntTweakBar
glutMouseFunc((GLUTmousebuttonfun)TwEventMouseButtonGLUT);
// - Directly redirect GLUT mouse motion events to AntTweakBar
glutMotionFunc((GLUTmousemotionfun)TwEventMouseMotionGLUT);
// - Directly redirect GLUT mouse "passive" motion events to AntTweakBar (same as MouseMotion)
glutPassiveMotionFunc((GLUTmousemotionfun)TwEventMouseMotionGLUT);
// start rendering main-loop
glutMainLoop();
hipDeviceReset();
cutilExit(argc, argv);
return 0;
}
// load a simple obj file without normals or tex-coords
void loadObj( const std::string filename, TriangleMesh &mesh )
{
std::ifstream in(filename.c_str());
if(!in.good())
{
cout << "ERROR: loading obj:(" << filename << ") file is not good" << "\n";
exit(0);
}
char buffer[256], str[255];
float f1,f2,f3;
while(!in.getline(buffer,255).eof())
{
buffer[255]='\0';
sscanf_s(buffer,"%s",str,255);
// reading a vertex
if (buffer[0]=='v' && (buffer[1]==' ' || buffer[1]==32) )
{
if ( sscanf(buffer,"v %f %f %f",&f1,&f2,&f3)==3)
{
mesh.verts.push_back(make_float3(f1,f2,f3));
}
else
{
cout << "ERROR: vertex not in wanted format in OBJLoader" << "\n";
exit(-1);
}
}
// reading FaceMtls
else if (buffer[0]=='f' && (buffer[1]==' ' || buffer[1]==32) )
{
TriangleFace f;
int nt = sscanf(buffer,"f %d %d %d",&f.v[0],&f.v[1],&f.v[2]);
if( nt!=3 )
{
cout << "ERROR: I don't know the format of that FaceMtl" << "\n";
exit(-1);
}
mesh.faces.push_back(f);
}
}
// calculate the bounding box
mesh.bounding_box[0] = make_float3(1000000,1000000,1000000);
mesh.bounding_box[1] = make_float3(-1000000,-1000000,-1000000);
for(unsigned int i = 0; i < mesh.verts.size(); i++)
{
//update min value
mesh.bounding_box[0].x = min(mesh.verts[i].x,mesh.bounding_box[0].x);
mesh.bounding_box[0].y = min(mesh.verts[i].y,mesh.bounding_box[0].y);
mesh.bounding_box[0].z = min(mesh.verts[i].z,mesh.bounding_box[0].z);
//update max value
mesh.bounding_box[1].x = max(mesh.verts[i].x,mesh.bounding_box[1].x);
mesh.bounding_box[1].y = max(mesh.verts[i].y,mesh.bounding_box[1].y);
mesh.bounding_box[1].z = max(mesh.verts[i].z,mesh.bounding_box[1].z);
}
cout << "obj file loaded: number of faces:" << mesh.faces.size() << " number of vertices:" << mesh.verts.size() << endl;
cout << "obj bounding box: min:(" << mesh.bounding_box[0].x << "," << mesh.bounding_box[0].y << "," << mesh.bounding_box[0].z <<") max:"
<< mesh.bounding_box[1].x << "," << mesh.bounding_box[1].y << "," << mesh.bounding_box[1].z <<")" << endl;
}
| dc75ef37689d5833442a4778cdcb1b84fc5d2e3c.cu |
// -----------------------------------------------------------------
// Simple cuda ray tracing tutorial
// Written by Peter Trier
// Alexandra Institute august 2009
//
//
// -----------------------------------------------------------------
// includes --------------------------------------
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#pragma warning(disable:4996)
#endif
// includes, GL
#include <GL/glew.h>
#include <GL/glut.h>
// cuda includes
#include <cuda_runtime_api.h>
#include <cutil_inline.h>
#include <cutil_gl_inline.h>
#include <cutil_gl_error.h>
#include <cuda_gl_interop.h>
#include <vector_types.h>
#include <vector_functions.h>
#include <cutil_math.h>
// std
#include <AntTweakBar.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
using namespace std;
// the interface between C++ and CUDA -----------
// the implementation of RayTraceImage is in the
// "raytracer.cu" file
extern "C" void RayTraceImage(unsigned int *pbo_out, int w, int h, int number_of_triangles,
float3 a, float3 b, float3 c,
float3 campos,
float3 light_pos,
float3 light_color,
float3 scene_aabbox_min , float3 scene_aabbox_max);
// a method for binding the loaded triangles to a Cuda texture
// the implementation of bindTriangles is in the
// "raytracer.cu" file
extern "C" void bindTriangles(float *dev_triangle_p, unsigned int number_of_triangles);
// Obj loader ------------------------------------
struct TriangleFace
{
int v[3]; // vertex indices
};
struct TriangleMesh
{
vector<float3> verts;
vector<TriangleFace> faces;
float3 bounding_box[2];
};
// Globals ---------------------------------------
unsigned int window_width = 800;
unsigned int window_height = 600;
unsigned int image_width = 800;
unsigned int image_height = 600;
float delta_t = 0;
GLuint pbo; // this pbo is used to connect CUDA and openGL
GLuint result_texture; // the ray-tracing result is copied to this openGL texture
TriangleMesh mesh;
TriangleMesh ground;
TriangleMesh sphere;
TriangleMesh object;
int total_number_of_triangles = 0;
float *dev_triangle_p; // the cuda device pointer that points to the uploaded triangles
// Camera parameters -----------------------------
float3 a; float3 b; float3 c;
float3 campos;
float camera_rotation = 0;
float camera_distance = 75;
float camera_height = 25;
bool animate = true;
// Scene bounding box ----------------------------
float3 scene_aabbox_min;
float3 scene_aabbox_max;
float light_x = -23;
float light_y = 25;
float light_z = 3;
float light_color[3] = {1,1,1};
// mouse controls --------------------------------
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
bool left_down = false;
bool right_down = false;
// function declaration --------------------------
bool initGL();
bool initCUDA( int argc, char **argv);
void initCUDAmemory();
void loadObj(const std::string filename, TriangleMesh &mesh);
void Terminate(void);
void initTweakMenus();
void display();
void reshape(int width, int height);
void keyboard(unsigned char key, int x, int y);
void KeyboardUpCallback(unsigned char key, int x, int y);
void SpecialKey(int key, int x, int y);
void mouse(int button, int state, int x, int y);
void motion(int x, int y);
void rayTrace();
TwBar *bar; // Pointer to the tweak bar
void initTweakMenus()
{
if( !TwInit(TW_OPENGL, NULL) )
{
// A fatal error occurred
fprintf(stderr, "AntTweakBar initialization failed: %s\n", TwGetLastError());
exit(0);
}
bar = TwNewBar("Parameters");
TwAddVarRW(bar, "camera rotation", TW_TYPE_FLOAT, &camera_rotation,
" min=-5.0 max=5.0 step=0.01 group='Camera'");
TwAddVarRW(bar, "camera distance", TW_TYPE_FLOAT, &camera_distance,
" min= 1.0 max=125.0 step=0.1 group='Camera'");
TwAddVarRW(bar, "camera height", TW_TYPE_FLOAT, &camera_height,
" min= -35.0 max= 100.0 step=0.1 group='Camera'");
TwAddVarRW(bar, "light_pos_x", TW_TYPE_FLOAT, &light_x,
" min= -100.0 max= 100.0 step=0.1 group='Light_source'");
TwAddVarRW(bar, "light_pos_y", TW_TYPE_FLOAT, &light_y,
" min= -100.0 max= 100.0 step=0.1 group='Light_source'");
TwAddVarRW(bar, "light_pos_z", TW_TYPE_FLOAT, &light_z,
" min= -100.0 max= 100.0 step=0.1 group='Light_source'");
TwAddVarRW(bar,"light_color",TW_TYPE_COLOR3F, &light_color, " group='Light_source' ");
}
// Function called at exit
void Terminate(void)
{
TwTerminate();
}
bool initGL()
{
glewInit();
if (! glewIsSupported
(
"GL_VERSION_2_0 "
"GL_ARB_pixel_buffer_object "
"GL_EXT_framebuffer_object "
))
{
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
return CUTFalse;
}
// init openGL state
glClearColor(0, 0, 0, 1.0);
glDisable(GL_DEPTH_TEST);
// view-port
glViewport(0, 0, window_width, window_height);
initTweakMenus();
return true;
}
bool initCUDA( int argc, char **argv)
{
if ( cutCheckCmdLineFlag(argc, (const char **)argv, "device"))
{
cutilGLDeviceInit(argc, argv);
}
else
{
cudaGLSetGLDevice (cutGetMaxGflopsDeviceId() );
}
return true;
}
void initCUDAmemory()
{
// initialize the PBO for transferring data from CUDA to openGL
unsigned int num_texels = image_width * image_height;
unsigned int size_tex_data = sizeof(GLubyte) * num_texels * 4;
void *data = malloc(size_tex_data);
// create buffer object
glGenBuffers(1, &pbo);
glBindBuffer(GL_ARRAY_BUFFER, pbo);
glBufferData(GL_ARRAY_BUFFER, size_tex_data, data, GL_DYNAMIC_DRAW);
free(data);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// register this buffer object with CUDA
cutilSafeCall(cudaGLRegisterBufferObject(pbo));
CUT_CHECK_ERROR_GL();
// create the texture that we use to visualize the ray-tracing result
glGenTextures(1, &result_texture);
glBindTexture(GL_TEXTURE_2D, result_texture);
// set basic parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// buffer data
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image_width, image_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
CUT_CHECK_ERROR_GL();
// next we load a simple obj file and upload the triangles to an 1D texture.
loadObj("data/cube.obj",mesh);
loadObj("data/sphere.obj",sphere);
vector<float4> triangles;
for(unsigned int i = 0; i < mesh.faces.size(); i++)
{
float3 v0 = mesh.verts[mesh.faces[i].v[0]-1];
float3 v1 = mesh.verts[mesh.faces[i].v[1]-1];
float3 v2 = mesh.verts[mesh.faces[i].v[2]-1];
triangles.push_back(make_float4(v0.x,v0.y,v0.z,0));
triangles.push_back(make_float4(v1.x-v0.x, v1.y-v0.y, v1.z-v0.z,0)); // notice we store the edges instead of vertex points, to save some calculations in the
triangles.push_back(make_float4(v2.x-v0.x, v2.y-v0.y, v2.z-v0.z,0)); // ray triangle intersection test.
}
for(unsigned int i = 0; i < sphere.faces.size(); i++)
{
float3 v0 = sphere.verts[sphere.faces[i].v[0]-1];
float3 v1 = sphere.verts[sphere.faces[i].v[1]-1];
float3 v2 = sphere.verts[sphere.faces[i].v[2]-1];
triangles.push_back(make_float4(v0.x,v0.y,v0.z,0));
triangles.push_back(make_float4(v1.x-v0.x, v1.y-v0.y, v1.z-v0.z,1)); // notice we store the edges instead of vertex points, to save some calculations in the
triangles.push_back(make_float4(v2.x-v0.x, v2.y-v0.y, v2.z-v0.z,0)); // ray triangle intersection test.
}
cout << "total number of triangles check:" << mesh.faces.size() + sphere.faces.size() << " == " << triangles.size()/3 << endl;
size_t triangle_size = triangles.size() * sizeof(float4);
total_number_of_triangles = triangles.size()/3;
if(triangle_size > 0)
{
cutilSafeCall( cudaMalloc((void **)&dev_triangle_p, triangle_size));
cudaMemcpy(dev_triangle_p,&triangles[0],triangle_size,cudaMemcpyHostToDevice);
bindTriangles(dev_triangle_p, total_number_of_triangles);
}
scene_aabbox_min = mesh.bounding_box[0];
scene_aabbox_max = mesh.bounding_box[1];
scene_aabbox_min.x = min(scene_aabbox_min.x,sphere.bounding_box[0].x);
scene_aabbox_min.y = min(scene_aabbox_min.y,sphere.bounding_box[0].y);
scene_aabbox_min.z = min(scene_aabbox_min.z,sphere.bounding_box[0].z);
scene_aabbox_max.x = max(scene_aabbox_max.x,sphere.bounding_box[1].x);
scene_aabbox_max.y = max(scene_aabbox_max.y,sphere.bounding_box[1].y);
scene_aabbox_max.z = max(scene_aabbox_max.z,sphere.bounding_box[1].z);
}
// Callback function called by GLUT when window size changes
void reshape(int width, int height)
{
// Set OpenGL view port and camera
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0f, (double)width/height, 0.1, 100);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// Send the new window size to AntTweakBar
TwWindowSize(width, height);
}
void SpecialKey(int key, int x, int y)
{
switch(key)
{
case GLUT_KEY_F1:
break;
};
}
void updateCamera()
{
campos = make_float3(cos(camera_rotation)*camera_distance,camera_height,-sin(camera_rotation)*camera_distance);
float3 cam_dir = -campos;
cam_dir = normalize(cam_dir);
float3 cam_up = make_float3(0,1,0);
float3 cam_right = cross(cam_dir,cam_up);
cam_right = normalize(cam_right);
cam_up = -cross(cam_dir,cam_right);
cam_up = normalize(cam_up);
float FOV = 60.0f;
float theta = (FOV*3.1415*0.5) / 180.0f;
float half_width = tanf(theta);
float aspect = (float)image_width / (float)image_height;
float u0 = -half_width * aspect;
float v0 = -half_width;
float u1 = half_width * aspect;
float v1 = half_width;
float dist_to_image = 1;
a = (u1-u0)*cam_right;
b = (v1-v0)*cam_up;
c = campos + u0*cam_right + v0*cam_up + dist_to_image*cam_dir;
if(animate)
camera_rotation += 0.25 * delta_t;
}
void rayTrace()
{
unsigned int* out_data;
cutilSafeCall(cudaGLMapBufferObject( (void**)&out_data, pbo));
RayTraceImage(out_data, image_width, image_height,total_number_of_triangles,
a, b, c,
campos,
make_float3(light_x,light_y,light_z),
make_float3(light_color[0],light_color[1],light_color[2]),
scene_aabbox_min , scene_aabbox_max);
cutilSafeCall(cudaGLUnmapBufferObject( pbo));
// download texture from destination PBO
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBindTexture(GL_TEXTURE_2D, result_texture);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, image_width, image_height, GL_BGRA, GL_UNSIGNED_BYTE, NULL);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
CUT_CHECK_ERROR_GL();
}
// display image to the screen as textured quad
void displayTexture()
{
// render a screen sized quad
glDisable(GL_DEPTH_TEST);
glDisable(GL_LIGHTING);
glEnable(GL_TEXTURE_2D);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0);
glMatrixMode( GL_MODELVIEW);
glLoadIdentity();
glViewport(0, 0, window_width, window_height);
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 0.5);
glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, 0.5);
glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 0.5);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 0.5);
glEnd();
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glDisable(GL_TEXTURE_2D);
CUT_CHECK_ERROR_GL();
}
void display()
{
//update the delta time for animation
static int lastFrameTime = 0;
if (lastFrameTime == 0)
{
lastFrameTime = glutGet(GLUT_ELAPSED_TIME);
}
int now = glutGet(GLUT_ELAPSED_TIME);
int elapsedMilliseconds = now - lastFrameTime;
delta_t = elapsedMilliseconds / 1000.0f;
lastFrameTime = now;
updateCamera();
glClearColor(0,0,0,0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
rayTrace();
displayTexture();
TwDraw();
glutSwapBuffers();
glutPostRedisplay();
}
void keyboard(unsigned char key, int /*x*/, int /*y*/)
{
switch(key)
{
case ' ':
animate = !animate;
break;
case(27) :
Terminate();
exit(0);
}
}
void KeyboardUpCallback(unsigned char key, int x, int y)
{
if(TwEventKeyboardGLUT(key,x, y))
{
return;
}
}
int main(int argc, char** argv)
{
// Create GL context
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width,window_height);
glutCreateWindow("Alexandra Institute ray-tracing tutorial");
// initialize GL
if(CUTFalse == initGL())
{
return 0;
}
// initialize CUDA
if(CUTFalse == initCUDA(argc,argv))
{
return 0;
}
initCUDAmemory();
// register callbacks
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutKeyboardUpFunc(KeyboardUpCallback);
glutSpecialUpFunc(SpecialKey);
glutReshapeFunc(reshape);
// - Directly redirect GLUT mouse button events to AntTweakBar
glutMouseFunc((GLUTmousebuttonfun)TwEventMouseButtonGLUT);
// - Directly redirect GLUT mouse motion events to AntTweakBar
glutMotionFunc((GLUTmousemotionfun)TwEventMouseMotionGLUT);
// - Directly redirect GLUT mouse "passive" motion events to AntTweakBar (same as MouseMotion)
glutPassiveMotionFunc((GLUTmousemotionfun)TwEventMouseMotionGLUT);
// start rendering main-loop
glutMainLoop();
cudaThreadExit();
cutilExit(argc, argv);
return 0;
}
// load a simple obj file without normals or tex-coords
void loadObj( const std::string filename, TriangleMesh &mesh )
{
std::ifstream in(filename.c_str());
if(!in.good())
{
cout << "ERROR: loading obj:(" << filename << ") file is not good" << "\n";
exit(0);
}
char buffer[256], str[255];
float f1,f2,f3;
while(!in.getline(buffer,255).eof())
{
buffer[255]='\0';
sscanf_s(buffer,"%s",str,255);
// reading a vertex
if (buffer[0]=='v' && (buffer[1]==' ' || buffer[1]==32) )
{
if ( sscanf(buffer,"v %f %f %f",&f1,&f2,&f3)==3)
{
mesh.verts.push_back(make_float3(f1,f2,f3));
}
else
{
cout << "ERROR: vertex not in wanted format in OBJLoader" << "\n";
exit(-1);
}
}
// reading FaceMtls
else if (buffer[0]=='f' && (buffer[1]==' ' || buffer[1]==32) )
{
TriangleFace f;
int nt = sscanf(buffer,"f %d %d %d",&f.v[0],&f.v[1],&f.v[2]);
if( nt!=3 )
{
cout << "ERROR: I don't know the format of that FaceMtl" << "\n";
exit(-1);
}
mesh.faces.push_back(f);
}
}
// calculate the bounding box
mesh.bounding_box[0] = make_float3(1000000,1000000,1000000);
mesh.bounding_box[1] = make_float3(-1000000,-1000000,-1000000);
for(unsigned int i = 0; i < mesh.verts.size(); i++)
{
//update min value
mesh.bounding_box[0].x = min(mesh.verts[i].x,mesh.bounding_box[0].x);
mesh.bounding_box[0].y = min(mesh.verts[i].y,mesh.bounding_box[0].y);
mesh.bounding_box[0].z = min(mesh.verts[i].z,mesh.bounding_box[0].z);
//update max value
mesh.bounding_box[1].x = max(mesh.verts[i].x,mesh.bounding_box[1].x);
mesh.bounding_box[1].y = max(mesh.verts[i].y,mesh.bounding_box[1].y);
mesh.bounding_box[1].z = max(mesh.verts[i].z,mesh.bounding_box[1].z);
}
cout << "obj file loaded: number of faces:" << mesh.faces.size() << " number of vertices:" << mesh.verts.size() << endl;
cout << "obj bounding box: min:(" << mesh.bounding_box[0].x << "," << mesh.bounding_box[0].y << "," << mesh.bounding_box[0].z <<") max:"
<< mesh.bounding_box[1].x << "," << mesh.bounding_box[1].y << "," << mesh.bounding_box[1].z <<")" << endl;
}
|
6c0190e9349547651b3984995ffa34a2d8d597d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2016-present Jean-Noel Braun.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef BCNN_USE_CUDA
#include "bcnn_activation_layer.h"
#include <bh/bh_macros.h>
#include "bcnn_tensor.h"
#include "bcnn_utils.h"
__global__ void bcnn_forward_activation_layer_kernel(float *x, int sz,
bcnn_activation a) {
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < sz) {
switch (a) {
case BCNN_ACT_TANH:
x[i] = (exp(2 * x[i]) - 1) / (exp(2 * x[i]) + 1);
break;
case BCNN_ACT_RELU:
x[i] = x[i] * (x[i] > 0);
break;
case BCNN_ACT_LRELU:
x[i] = (x[i] > 0 ? x[i] : 0.1f * x[i]);
break;
case BCNN_ACT_RAMP:
x[i] = x[i] * (x[i] > 0) + 0.1 * x[i];
break;
case BCNN_ACT_CLAMP:
x[i] = bh_clamp(x[i], 0, 1);
break;
case BCNN_ACT_LOGISTIC:
x[i] = 1.0f / (1.0f + (float)exp(-x[i]));
break;
case BCNN_ACT_NONE:
break;
default:
break;
}
}
return;
}
void bcnn_forward_activation_gpu(float *x, int sz, bcnn_activation a) {
hipLaunchKernelGGL(( bcnn_forward_activation_layer_kernel), dim3(bcnn_cuda_blocks(sz)),
dim3(BCNN_CUDA_THREADS), 0, 0, x, sz, a);
return;
}
void bcnn_forward_activation_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_activation_param *param = (bcnn_activation_param *)node->param;
int sz = bcnn_tensor_size(dst_tensor);
dst_tensor->data_gpu = src_tensor->data_gpu;
bcnn_forward_activation_gpu(dst_tensor->data_gpu, sz, param->activation);
bcnn_cuda_check(hipPeekAtLastError());
return;
}
__global__ void bcnn_backward_activation_layer_kernel(float *x, float *dx,
int sz,
bcnn_activation a) {
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < sz) {
switch (a) {
case BCNN_ACT_TANH:
dx[i] *= (1 - x[i] * x[i]);
break;
case BCNN_ACT_RELU:
dx[i] *= ((float)(x[i] > 0));
break;
case BCNN_ACT_LRELU:
dx[i] *= (x[i] > 0 ? 1.0f : 0.1f);
break;
case BCNN_ACT_RAMP:
dx[i] *= ((float)(x[i] > 0) + 0.1f);
break;
case BCNN_ACT_CLAMP:
dx[i] *= (float)(x[i] > 0.0f && (x[i] < 1.0f));
break;
case BCNN_ACT_LOGISTIC:
dx[i] *= (1 - x[i]) * x[i];
break;
case BCNN_ACT_NONE:
break;
default:
break;
}
}
}
void bcnn_backward_activation_gpu(float *x, float *dx, int sz,
bcnn_activation a) {
hipLaunchKernelGGL(( bcnn_backward_activation_layer_kernel), dim3(bcnn_cuda_blocks(sz)),
dim3(BCNN_CUDA_THREADS), 0, 0, x, dx, sz, a);
return;
}
void bcnn_backward_activation_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_activation_param *param = (bcnn_activation_param *)node->param;
int sz = bcnn_tensor_size(dst_tensor);
bcnn_backward_activation_gpu(
dst_tensor->data_gpu, dst_tensor->grad_data_gpu, sz, param->activation);
bcnn_cuda_check(hipPeekAtLastError());
src_tensor->grad_data_gpu = dst_tensor->grad_data_gpu;
return;
}
#endif | 6c0190e9349547651b3984995ffa34a2d8d597d5.cu | /*
* Copyright (c) 2016-present Jean-Noel Braun.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef BCNN_USE_CUDA
#include "bcnn_activation_layer.h"
#include <bh/bh_macros.h>
#include "bcnn_tensor.h"
#include "bcnn_utils.h"
__global__ void bcnn_forward_activation_layer_kernel(float *x, int sz,
bcnn_activation a) {
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < sz) {
switch (a) {
case BCNN_ACT_TANH:
x[i] = (exp(2 * x[i]) - 1) / (exp(2 * x[i]) + 1);
break;
case BCNN_ACT_RELU:
x[i] = x[i] * (x[i] > 0);
break;
case BCNN_ACT_LRELU:
x[i] = (x[i] > 0 ? x[i] : 0.1f * x[i]);
break;
case BCNN_ACT_RAMP:
x[i] = x[i] * (x[i] > 0) + 0.1 * x[i];
break;
case BCNN_ACT_CLAMP:
x[i] = bh_clamp(x[i], 0, 1);
break;
case BCNN_ACT_LOGISTIC:
x[i] = 1.0f / (1.0f + (float)exp(-x[i]));
break;
case BCNN_ACT_NONE:
break;
default:
break;
}
}
return;
}
void bcnn_forward_activation_gpu(float *x, int sz, bcnn_activation a) {
bcnn_forward_activation_layer_kernel<<<bcnn_cuda_blocks(sz),
BCNN_CUDA_THREADS>>>(x, sz, a);
return;
}
void bcnn_forward_activation_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_activation_param *param = (bcnn_activation_param *)node->param;
int sz = bcnn_tensor_size(dst_tensor);
dst_tensor->data_gpu = src_tensor->data_gpu;
bcnn_forward_activation_gpu(dst_tensor->data_gpu, sz, param->activation);
bcnn_cuda_check(cudaPeekAtLastError());
return;
}
__global__ void bcnn_backward_activation_layer_kernel(float *x, float *dx,
int sz,
bcnn_activation a) {
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < sz) {
switch (a) {
case BCNN_ACT_TANH:
dx[i] *= (1 - x[i] * x[i]);
break;
case BCNN_ACT_RELU:
dx[i] *= ((float)(x[i] > 0));
break;
case BCNN_ACT_LRELU:
dx[i] *= (x[i] > 0 ? 1.0f : 0.1f);
break;
case BCNN_ACT_RAMP:
dx[i] *= ((float)(x[i] > 0) + 0.1f);
break;
case BCNN_ACT_CLAMP:
dx[i] *= (float)(x[i] > 0.0f && (x[i] < 1.0f));
break;
case BCNN_ACT_LOGISTIC:
dx[i] *= (1 - x[i]) * x[i];
break;
case BCNN_ACT_NONE:
break;
default:
break;
}
}
}
void bcnn_backward_activation_gpu(float *x, float *dx, int sz,
bcnn_activation a) {
bcnn_backward_activation_layer_kernel<<<bcnn_cuda_blocks(sz),
BCNN_CUDA_THREADS>>>(x, dx, sz, a);
return;
}
void bcnn_backward_activation_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_activation_param *param = (bcnn_activation_param *)node->param;
int sz = bcnn_tensor_size(dst_tensor);
bcnn_backward_activation_gpu(
dst_tensor->data_gpu, dst_tensor->grad_data_gpu, sz, param->activation);
bcnn_cuda_check(cudaPeekAtLastError());
src_tensor->grad_data_gpu = dst_tensor->grad_data_gpu;
return;
}
#endif |
22033a836a0bc68ceab8754a4aac2bfeed3c4c23.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <cmath>
void add (float*, float*, float*, float);
int main (float argc, char* argv[]){
//variables
int matDim;
// get inputs
if (argc < 2){
std::cout << "Not enough arguments. <<matrix dimension>>" << std::endl;
return 1;
}
else{
matDim = atoi (argv [1]);
}
//create arrays
float *MatA = new float[(int)pow(matDim, 2)];
float *MatB = new float[(int)pow(matDim, 2)];
float *MatC = new float[(int)pow(matDim, 2)];
//load
for (int i=0; i < (float)pow(matDim, 2); i++) {
MatA[i] = i;
MatB[i] = i;
}
// begin timing
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord( start, 0 );
//add
add (MatA, MatB, MatC, (int)pow(matDim, 2));
//output results
/*for (int i = 0; i < matDim; i++){
for (int j = 0; j < matDim; j++){
std::cout << MatC[(i*matDim)+j] << " ";
}
std::cout << std::endl;
}*/
//end time
hipEventRecord( end, 0 );
hipEventSynchronize( end );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, end );
std::cout << "Time: " << elapsedTime << " ms." << std::endl;
//dealloc memory
delete MatA;
MatA = NULL;
delete MatB;
MatB = NULL;
delete MatC;
MatC = NULL;
return 0;
}
void add (float* a, float* b, float* c, float size){
for (int i=0; i < size; i++) {
c [i] = a[i] + b[i];
}
} | 22033a836a0bc68ceab8754a4aac2bfeed3c4c23.cu | #include <iostream>
#include <stdlib.h>
#include <cmath>
void add (float*, float*, float*, float);
int main (float argc, char* argv[]){
//variables
int matDim;
// get inputs
if (argc < 2){
std::cout << "Not enough arguments. <<matrix dimension>>" << std::endl;
return 1;
}
else{
matDim = atoi (argv [1]);
}
//create arrays
float *MatA = new float[(int)pow(matDim, 2)];
float *MatB = new float[(int)pow(matDim, 2)];
float *MatC = new float[(int)pow(matDim, 2)];
//load
for (int i=0; i < (float)pow(matDim, 2); i++) {
MatA[i] = i;
MatB[i] = i;
}
// begin timing
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord( start, 0 );
//add
add (MatA, MatB, MatC, (int)pow(matDim, 2));
//output results
/*for (int i = 0; i < matDim; i++){
for (int j = 0; j < matDim; j++){
std::cout << MatC[(i*matDim)+j] << " ";
}
std::cout << std::endl;
}*/
//end time
cudaEventRecord( end, 0 );
cudaEventSynchronize( end );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, end );
std::cout << "Time: " << elapsedTime << " ms." << std::endl;
//dealloc memory
delete MatA;
MatA = NULL;
delete MatB;
MatB = NULL;
delete MatC;
MatC = NULL;
return 0;
}
void add (float* a, float* b, float* c, float size){
for (int i=0; i < size; i++) {
c [i] = a[i] + b[i];
}
} |
b797389a778113c7f816d34e4eb065997b535ad2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ __forceinline__ int is_between(float a, float b, float c) {
#if between_method == 0
return (b > a) != (c > a);
#elif between_method == 1
return ((b <= a) && (c > a)) || ((b > a) && (c <= a));
#elif between_method == 2
return ((a - b) == 0.0f) || ((a - b) * (a - c) < 0.0f);
#elif between_method == 3
//Interestingly enough method 3 exactly the same as method 2, only in a different order.
//the performance difference between method 2 and 3 can be huge depending on all the other optimization parameters.
return ((a - b) * (a - c) < 0.0f) || (a - b == 0.0f);
#endif
}
__global__ void cn_pnpoly(int* bitmap, float2* points, int n) {
int i = blockIdx.x * block_size_x * tile_size + threadIdx.x;
if (i < n) {
int c[tile_size];
float2 lpoints[tile_size];
#pragma unroll
for (int ti=0; ti<tile_size; ti++) {
c[ti] = 0;
if (i+block_size_x*ti < n) {
lpoints[ti] = points[i+block_size_x*ti];
}
}
int k = VERTICES-1;
for (int j=0; j<VERTICES; k = j++) { // edge from vj to vk
float2 vj = d_vertices[j];
float2 vk = d_vertices[k];
#if use_precomputed_slopes == 0
float slope = (vk.x-vj.x) / (vk.y-vj.y);
#elif use_precomputed_slopes == 1
float slope = d_slopes[j];
#endif
#pragma unroll
for (int ti=0; ti<tile_size; ti++) {
float2 p = lpoints[ti];
#if use_method == 0
if ( is_between(p.y, vj.y, vk.y) && //if p is between vj and vk vertically
(p.x < slope * (p.y-vj.y) + vj.x)
) { //if p.x crosses the line vj-vk when moved in positive x-direction
c[ti] = !c[ti];
}
#elif use_method == 1
//Same as method 0, but attempts to reduce divergence by avoiding the use of an if-statement.
//Whether this is more efficient is data dependent because there will be no divergence using method 0, when none
//of the threads within a warp evaluate is_between as true
int b = is_between(p.y, vj.y, vk.y);
c[ti] += b && (p.x < vj.x + slope * (p.y - vj.y));
#endif
}
}
#pragma unroll
for (int ti=0; ti<tile_size; ti++) {
//could do an if statement here if 1s are expected to be rare
if (i+block_size_x*ti < n) {
#if use_method == 0
bitmap[i+block_size_x*ti] = c[ti];
#elif use_method == 1
bitmap[i+block_size_x*ti] = c[ti] & 1;
#endif
}
}
}
} | b797389a778113c7f816d34e4eb065997b535ad2.cu | #include "includes.h"
__device__ __forceinline__ int is_between(float a, float b, float c) {
#if between_method == 0
return (b > a) != (c > a);
#elif between_method == 1
return ((b <= a) && (c > a)) || ((b > a) && (c <= a));
#elif between_method == 2
return ((a - b) == 0.0f) || ((a - b) * (a - c) < 0.0f);
#elif between_method == 3
//Interestingly enough method 3 exactly the same as method 2, only in a different order.
//the performance difference between method 2 and 3 can be huge depending on all the other optimization parameters.
return ((a - b) * (a - c) < 0.0f) || (a - b == 0.0f);
#endif
}
__global__ void cn_pnpoly(int* bitmap, float2* points, int n) {
int i = blockIdx.x * block_size_x * tile_size + threadIdx.x;
if (i < n) {
int c[tile_size];
float2 lpoints[tile_size];
#pragma unroll
for (int ti=0; ti<tile_size; ti++) {
c[ti] = 0;
if (i+block_size_x*ti < n) {
lpoints[ti] = points[i+block_size_x*ti];
}
}
int k = VERTICES-1;
for (int j=0; j<VERTICES; k = j++) { // edge from vj to vk
float2 vj = d_vertices[j];
float2 vk = d_vertices[k];
#if use_precomputed_slopes == 0
float slope = (vk.x-vj.x) / (vk.y-vj.y);
#elif use_precomputed_slopes == 1
float slope = d_slopes[j];
#endif
#pragma unroll
for (int ti=0; ti<tile_size; ti++) {
float2 p = lpoints[ti];
#if use_method == 0
if ( is_between(p.y, vj.y, vk.y) && //if p is between vj and vk vertically
(p.x < slope * (p.y-vj.y) + vj.x)
) { //if p.x crosses the line vj-vk when moved in positive x-direction
c[ti] = !c[ti];
}
#elif use_method == 1
//Same as method 0, but attempts to reduce divergence by avoiding the use of an if-statement.
//Whether this is more efficient is data dependent because there will be no divergence using method 0, when none
//of the threads within a warp evaluate is_between as true
int b = is_between(p.y, vj.y, vk.y);
c[ti] += b && (p.x < vj.x + slope * (p.y - vj.y));
#endif
}
}
#pragma unroll
for (int ti=0; ti<tile_size; ti++) {
//could do an if statement here if 1s are expected to be rare
if (i+block_size_x*ti < n) {
#if use_method == 0
bitmap[i+block_size_x*ti] = c[ti];
#elif use_method == 1
bitmap[i+block_size_x*ti] = c[ti] & 1;
#endif
}
}
}
} |
3009c63943b9cee1140df8733455120614bda586.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//15co154 Yeshwanth R
//15co118 Goutham M
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
__global__ void func(float *da_in,float *db_in,float *d_out)
{
int idx = blockIdx.x*100 + threadIdx.x;
d_out[idx] = da_in[idx] + db_in[idx];
}
int main()
{
float t1,t2;
const int array_size = 16000;
const int array_bytes = array_size* sizeof(float);
float a_in[array_size],b_in[array_size];
for(int i=0;i<array_size;i++)
{
a_in[i] = float(i);
}
for(int i=0;i<array_size;i++)
{
b_in[i]=rand()%16000;
}
float h_out[array_size];
float *da_in;
float *db_in;
float *d_out;
hipMalloc((void **)&da_in,array_bytes);
hipMalloc((void **)&db_in,array_bytes);
hipMalloc((void **)&d_out,array_bytes);
hipMemcpy(da_in,a_in,array_bytes,hipMemcpyHostToDevice);
hipMemcpy(db_in,b_in,array_bytes,hipMemcpyHostToDevice);
//kernelhipLaunchKernelGGL((
func), dim3(dim3(160,1,1)),dim3(dim3(100,1,1)), 0, 0, da_in,db_in,d_out);
float time;
//copying back
hipMemcpy(h_out,d_out,array_bytes,hipMemcpyDeviceToHost);
for(int i=0;i<array_size;i++)
{
printf("%f",h_out[i]);
printf(((i%12)!=3)? "\t":"\n");
}
hipFree(da_in);
hipFree(d_out);
hipFree(db_in);
printf("\n\n\n\n");
}
| 3009c63943b9cee1140df8733455120614bda586.cu | //15co154 Yeshwanth R
//15co118 Goutham M
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
__global__ void func(float *da_in,float *db_in,float *d_out)
{
int idx = blockIdx.x*100 + threadIdx.x;
d_out[idx] = da_in[idx] + db_in[idx];
}
int main()
{
float t1,t2;
const int array_size = 16000;
const int array_bytes = array_size* sizeof(float);
float a_in[array_size],b_in[array_size];
for(int i=0;i<array_size;i++)
{
a_in[i] = float(i);
}
for(int i=0;i<array_size;i++)
{
b_in[i]=rand()%16000;
}
float h_out[array_size];
float *da_in;
float *db_in;
float *d_out;
cudaMalloc((void **)&da_in,array_bytes);
cudaMalloc((void **)&db_in,array_bytes);
cudaMalloc((void **)&d_out,array_bytes);
cudaMemcpy(da_in,a_in,array_bytes,cudaMemcpyHostToDevice);
cudaMemcpy(db_in,b_in,array_bytes,cudaMemcpyHostToDevice);
//kernel
func<<<dim3(160,1,1),dim3(100,1,1)>>>(da_in,db_in,d_out);
float time;
//copying back
cudaMemcpy(h_out,d_out,array_bytes,cudaMemcpyDeviceToHost);
for(int i=0;i<array_size;i++)
{
printf("%f",h_out[i]);
printf(((i%12)!=3)? "\t":"\n");
}
cudaFree(da_in);
cudaFree(d_out);
cudaFree(db_in);
printf("\n\n\n\n");
}
|
13db205cdc821e8fcf72d11db43f6dd170f7844c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_99_1_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| 13db205cdc821e8fcf72d11db43f6dd170f7844c.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_99_1_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
ae9fa8b8f357cfb26210de7621e4d2acd006f2fd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
// :
struct type { // .
int key;
int value;
};
struct comparator {
__host__ __device__ bool operator()(type a, type b) { // "<"
return a.key < b.key; // operator() - "()"
}
};
int main() {
srand(time(NULL)); //
comparator comp;
int i, i_max = -1, n = 100000;
type *arr = (type *)malloc(sizeof(type) * n);
for(i = 0; i < n; i++) { // CPU
arr[i].key = rand();
arr[i].value = rand();
if (i_max == -1 || comp(arr[i_max], arr[i]))
i_max = i;
}
type *dev_arr;
hipMalloc(&dev_arr, sizeof(type) * n);
hipMemcpy(dev_arr, arr, sizeof(type) * n, hipMemcpyHostToDevice); // GPU
thrust::device_ptr<type> p_arr = thrust::device_pointer_cast(dev_arr); // , .
thrust::device_ptr<type> res = thrust::max_element(p_arr, p_arr + n, comp); // GPU
printf("cpu: %d\ngpu: %d\n", i_max, (int)(res - p_arr)); // CPU GPU
hipFree(dev_arr);
free(arr);
return 0;
}
| ae9fa8b8f357cfb26210de7621e4d2acd006f2fd.cu | #include <stdio.h>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
// Задача: найти максимум в массиве объектов по ключу
struct type { // Тип элемента массива. Структура из двух полей
int key;
int value;
};
struct comparator {
__host__ __device__ bool operator()(type a, type b) { // Функция которая сравнивает объекты на "<"
return a.key < b.key; // operator() - переопределение оператора "()" для экземпляра этой структуры
}
};
int main() {
srand(time(NULL)); // Инициализируем генератор случайных чисел в зависимости от времени
comparator comp;
int i, i_max = -1, n = 100000;
type *arr = (type *)malloc(sizeof(type) * n);
for(i = 0; i < n; i++) { // Здесь инициализируем массив и попутно ищем максимум на CPU
arr[i].key = rand();
arr[i].value = rand();
if (i_max == -1 || comp(arr[i_max], arr[i]))
i_max = i;
}
type *dev_arr;
cudaMalloc(&dev_arr, sizeof(type) * n);
cudaMemcpy(dev_arr, arr, sizeof(type) * n, cudaMemcpyHostToDevice); // Копируем массив на GPU
thrust::device_ptr<type> p_arr = thrust::device_pointer_cast(dev_arr); // Трастовские функции принимают свой тип указателей, поэтому выполняем приведение типов.
thrust::device_ptr<type> res = thrust::max_element(p_arr, p_arr + n, comp); // Ищем максимум в массиве на GPU
printf("cpu: %d\ngpu: %d\n", i_max, (int)(res - p_arr)); // Печатаем номер максимального элемента найденого на CPU и GPU
cudaFree(dev_arr);
free(arr);
return 0;
}
|
b24e3aab8256f2f798a4fa7a5484d34a64a03a3b.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip> // std::setfill, std::setw
#include <string>
#include <stdlib.h> // For malloc 2d
// #include <opencv2/opencv.hpp>
#include <omp.h>
#include <mpi.h>
#include <hip/hip_runtime.h>
#include <helper_math.h> //For clamp
#include <assert.h>
#include <hetero_cmdparser.hpp>
#include "median_3d.hpp"
float ReverseFloat( const float inFloat )
{
float retVal;
char *floatToConvert = ( char* ) & inFloat;
char *returnFloat = ( char* ) & retVal;
// swap the bytes into a temporary buffer
returnFloat[0] = floatToConvert[3];
returnFloat[1] = floatToConvert[2];
returnFloat[2] = floatToConvert[1];
returnFloat[3] = floatToConvert[0];
return retVal;
}
using namespace std;
// using namespace cv;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define cudaCheckLastError() { \
hipError_t error = hipGetLastError(); \
int id; hipGetDevice(&id); \
if(error != hipSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, hipGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkReadFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename.c_str(), ios::in|ios::binary); \
if (!fs->is_open()) \
{ \
printf("Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->read(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkWriteFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename, ios::out|ios::binary); \
if (!fs->is_open()) \
{ \
fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->write(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static void handle_error(int errcode, const char *str)
{
char msg[MPI_MAX_ERROR_STRING];
int resultlen;
MPI_Error_string(errcode, msg, &resultlen);
fprintf(stderr, "%s: %s\n", str, msg);
MPI_Abort(MPI_COMM_WORLD, 1);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
const char* key =
"{ h |help | | print help message }"
"{ vx |virtualDimx | | virtualDimx }"
"{ vy |virtualDimy | | virtualDimy }"
"{ vz |virtualDimz | | virtualDimz }"
"{ px |processDimx | | processDimx }"
"{ py |processDimy | | processDimy }"
"{ pz |processDimz | | processDimz }"
"{ hx |haloDimx | | haloDimx }"
"{ hy |haloDimy | | haloDimy }"
"{ hz |haloDimz | | haloDimz }"
"{ dimx|dimx | | dimensionx }"
"{ dimy|dimy | | dimensiony }"
"{ dimz|dimz | | dimensionz }"
"{ mp |maxProcs | 1 | maxProcs }"
"{ id |execId | | indicate the ith times launch mpi, act like a queue}"
"{ i |srcFile | | source of the file }"
"{ o |dstFile | | destination of the file }"
;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define at(x, y, dimx, dimy) ( clamp((int)y, 0, dimy-1)*dimx + \
clamp((int)x, 0, dimx-1) )
int main(int argc, char *argv[])
{
//================================================================================
// Initialize MPI
int rank, size;
char name[MPI_MAX_PROCESSOR_NAME];
int length;
int errCode;
MPI_File fh;
MPI_Init(&argc, &argv);
//================================================================================
// Retrieve the number of execId
// Parsing the arguments
CommandLineParser cmd(argc, argv, key);
const int execId = cmd.get<int>("execId", false);
const int maxProcs = cmd.get<int>("maxProcs", false);
const int virtualDimx = cmd.get<int>("virtualDimx", false);
const int virtualDimy = cmd.get<int>("virtualDimy", false);
const int virtualDimz = cmd.get<int>("virtualDimz", false);
const int processDimx = cmd.get<int>("processDimx", false);
const int processDimy = cmd.get<int>("processDimy", false);
const int processDimz = cmd.get<int>("processDimz", false);
const int haloDimx = cmd.get<int>("haloDimx", false);
const int haloDimy = cmd.get<int>("haloDimy", false);
const int haloDimz = cmd.get<int>("haloDimz", false);
// const int virtualSize = cmd.get<int>("virtualSize", false);
const int dimx = cmd.get<int>("dimx", false);
const int dimy = cmd.get<int>("dimy", false);
const int dimz = cmd.get<int>("dimz", false);
const string srcFile = cmd.get<string>("srcFile", false);
const string dstFile = cmd.get<string>("dstFile", false);
// printf("execId=%d, setNumProcs=%d, rank=%d\n", execId, maxProcs, rank);
// printf("virtualDimx=%02d, virtualDimy=%02d\n",
// virtualDimx, virtualDimy);
//================================================================================
// Set up Cartesian grid of processors.
// MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periodic, reorder, &comm2d );
// MPI_Cart_get(comm2d, 2, dims, periodic, coords );
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Get_processor_name(name, &length);
MPI_Barrier(MPI_COMM_WORLD);
//================================================================================
// Manually determine rank in 3d, comm3d will work in this case
int virtualRank = execId * maxProcs + rank;
int3 virtualIdx = make_int3(virtualRank % (virtualDimx*virtualDimy) % virtualDimx,
virtualRank % (virtualDimx*virtualDimy) / virtualDimx,
virtualRank / (virtualDimx*virtualDimy));
printf("execId(%d), maxProcs(%d), rank(%d)\n", execId, maxProcs, rank);
MPI_Barrier(MPI_COMM_WORLD);
printf("virtualIdx.x=%02d, virtualIdx.y=%02d, virtualIdx.z=%02d, virtualRank=%02d, at %s\n",
virtualIdx.x, virtualIdx.y, virtualIdx.z, virtualRank, name);
MPI_Barrier(MPI_COMM_WORLD);
// MPI_Finalize(); return 0;
//================================================================================
/// Data type primitives
int starts[3];
int subsizes[3];
int bigsizes[3];
MPI_Request request;
//================================================================================
///!!!Do not need to pack, calculate per-process directly
/// !! First step: Determine size of buffer
int3 featureIdx { 0, 0, 0};
int3 processDim {1, 1, 1};
int3 index_3d;
int3 closedChunkDim {0, 0, 0};
int3 halo {0, 0, 0};
//================================================================================
processDim.x = processDimx;
processDim.y = processDimy;
processDim.z = processDimz;
halo.x = haloDimx;
halo.y = haloDimy;
halo.z = haloDimz;
//================================================================================
for(featureIdx.z=0; featureIdx.z<processDim.z; featureIdx.z++)
{
int3 index_3d;
for(featureIdx.y=0; featureIdx.y<processDim.y; featureIdx.y++)
{
for(featureIdx.x=0; featureIdx.x<processDim.x; featureIdx.x++)
{
//3D global index
index_3d = make_int3(
virtualIdx.x*processDim.x+featureIdx.x,
virtualIdx.y*processDim.y+featureIdx.y,
virtualIdx.z*processDim.z+featureIdx.z);
if(index_3d.x==dimx) break;
}
if(index_3d.y==dimy) break;
}
if(index_3d.z==dimz) break;
}
closedChunkDim = make_int3(featureIdx.x, featureIdx.y, featureIdx.z);
printf("Sub closed chunk size: closedChunkDim.x=%05d, closedChunkDim.y=%05d, closedChunkDim.z=%05d at virtualIdx.x=%02d, virtualIdx.y=%02d, virtualIdx.z=%02d (virtualRank=%02d)\n",
closedChunkDim.x, closedChunkDim.y, closedChunkDim.z,
virtualIdx.x, virtualIdx.y, virtualIdx.z, virtualRank, name);
MPI_Barrier(MPI_COMM_WORLD);
//================================================================================
// Read the file
char *srcChar = strdup(srcFile.c_str());
cout << srcChar << endl;
MPI_Datatype etype;
etype = MPI_FLOAT;
//================================================================================
index_3d = make_int3(
(virtualRank%(virtualDimx*virtualDimy)%virtualDimx)*processDim.x+0,
(virtualRank%(virtualDimx*virtualDimy)/virtualDimx)*processDim.y+0,
(virtualRank/(virtualDimx*virtualDimy) )*processDim.z+0);
///!Order is very important
bigsizes[0] = dimz; //0 2
bigsizes[1] = dimy; //1 0
bigsizes[2] = dimx; //2 1
subsizes[0] = closedChunkDim.z;
subsizes[1] = closedChunkDim.y;
subsizes[2] = closedChunkDim.x;
starts[0] = index_3d.z;
starts[1] = index_3d.y;
starts[2] = index_3d.x;
MPI_Datatype closedChunkArray; ///!!! Declare the data type
MPI_Type_create_subarray(3, bigsizes, subsizes, starts, MPI_ORDER_C, MPI_FLOAT, &closedChunkArray);
MPI_Type_commit(&closedChunkArray); ///!!! Commit the data type
// Check correctness here
/*
errCode = MPI_File_open(MPI_COMM_WORLD, srcChar, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
MPI_File_set_view(fh, 0, etype, closedChunkArray, "native", MPI_INFO_NULL);
float *p_closedChunk;
p_closedChunk = (float*)malloc(closedChunkDim.x*closedChunkDim.y*closedChunkDim.z*sizeof(float));
MPI_File_read(fh, p_closedChunk, closedChunkDim.x*closedChunkDim.y*closedChunkDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
//================================================================================
///!!! Write globally
errCode = MPI_File_open(MPI_COMM_WORLD, "closedSubArray.raw", MPI_MODE_RDWR|MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
MPI_File_set_view(fh, 0, etype, closedChunkArray, "native", MPI_INFO_NULL);
MPI_File_write_all(fh, p_closedChunk, closedChunkDim.x*closedChunkDim.y*closedChunkDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
*/
// MPI_Finalize(); return 0;
//================================================================================
//================================================================================
//================================================================================
//================================================================================
int leftRank, rightRank;
int topRank, bottomRank;
int frontRank, backRank;
leftRank = virtualIdx.x-1;
rightRank = virtualIdx.x+1;
topRank = virtualIdx.y-1;
bottomRank = virtualIdx.y+1;
frontRank = virtualIdx.z-1;
backRank = virtualIdx.z+1;
///!!! Handle the boundary case
bool atBoundariesLeftRight = (leftRank<0)|(rightRank>(virtualDimx-1));
bool atBoundariesTopBottom = (topRank<0)|(bottomRank>(virtualDimy-1));
bool atBoundariesFrontBack = (frontRank<0)|(backRank>(virtualDimz-1));
int3 openedChunkDim {0, 0, 0};
openedChunkDim = make_int3(closedChunkDim.x + ((atBoundariesLeftRight)?((virtualDimx==1)?0*halo.x:1*halo.x):(2*halo.x)),
closedChunkDim.y + ((atBoundariesTopBottom)?((virtualDimy==1)?0*halo.y:1*halo.y):(2*halo.y)),
closedChunkDim.z + ((atBoundariesFrontBack)?((virtualDimz==1)?0*halo.z:1*halo.z):(2*halo.z)));
printf("Sub opened chunk size: openedChunkDim.x=%05d, openedChunkDim.y=%05d, openedChunkDim.z=%05d at virtualIdx.x=%02d, virtualIdx.y=%02d, virtualIdx.z=%02d (virtualRank=%02d)\n",
openedChunkDim.x, openedChunkDim.y, openedChunkDim.z,
virtualIdx.x, virtualIdx.y, virtualIdx.z, virtualRank, name);
MPI_Barrier(MPI_COMM_WORLD);
// Redefine the data type
bigsizes[0] = dimz;
bigsizes[1] = dimy;
bigsizes[2] = dimx;
subsizes[0] = openedChunkDim.z;
subsizes[1] = openedChunkDim.y;
subsizes[2] = openedChunkDim.x;
starts[0] = (frontRank<0)?0:(index_3d.z-halo.z); ///!!! Handle the boundary start indices
starts[1] = (topRank<0)?0:(index_3d.y-halo.y); ///!!! Handle the boundary start indices
starts[2] = (leftRank<0)?0:(index_3d.x-halo.x); ///!!! Handle the boundary start indices
MPI_Datatype openedChunkArray; ///!!! Declare the data type
MPI_Type_create_subarray(3, bigsizes, subsizes, starts, MPI_ORDER_C, MPI_FLOAT, &openedChunkArray);
MPI_Type_commit(&openedChunkArray); ///!!! Commit the data type
errCode = MPI_File_open(MPI_COMM_WORLD, srcChar, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
MPI_File_set_view(fh, 0, etype, openedChunkArray, "native", MPI_INFO_NULL);
float *p_openedChunk;
p_openedChunk = (float*)malloc(openedChunkDim.x*openedChunkDim.y*openedChunkDim.z*sizeof(float));
MPI_File_read(fh, p_openedChunk, openedChunkDim.x*openedChunkDim.y*openedChunkDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
//================================================================================
///!!! Processing with CUDA here
/// Allocate d_src, d_dst, copy back to p_openedChunk
int numDevices;
hipGetDeviceCount(&numDevices);
hipSetDevice(rank%numDevices);
hipDeviceReset();
cudaCheckLastError();
float *d_src, *d_dst;
int local_halo, local_radius;
int openedChunkDimxyz = openedChunkDim.x*openedChunkDim.y*openedChunkDim.z;
hipMalloc((void**)&d_src, (openedChunkDimxyz)*sizeof(float));
hipMalloc((void**)&d_dst, (openedChunkDimxyz)*sizeof(float));
local_radius = 3;
local_halo = 3;
hipMemcpy(d_src, p_openedChunk, (openedChunkDimxyz)*sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(d_dst, d_src, (openedChunkDimxyz)*sizeof(float), hipMemcpyDeviceToDevice); // Debug purpose
median_3d(d_src, d_dst, openedChunkDim.x, openedChunkDim.y, openedChunkDim.z, local_radius, local_halo);
hipDeviceSynchronize();
hipMemcpy(p_openedChunk, d_dst, (openedChunkDimxyz)*sizeof(float), hipMemcpyDeviceToHost);
// float t;
// for(int k=0; k<openedChunkDim.x*openedChunkDim.y*openedChunkDim.z; k++)
// {
// // t = ReverseFloat(p_openedChunk[k]);
// // if(virtualRank==0)
// // p_openedChunk[k] = ReverseFloat(t/2);
// // else
// // p_openedChunk[k] = ReverseFloat(t);
// t = p_openedChunk[k];
// if(virtualRank==0)
// p_openedChunk[k] = t/2;
// else
// p_openedChunk[k] = t;
// }
//================================================================================
// int3 closedChunkDim {0, 0, 0};
// closedChunkDim = make_int3(closedChunkDim.x,
// closedChunkDim.y,
// 1);
// printf("Sub closed chunk size: closedChunkDim.x=%05d, closedChunkDim.y=%05d at virtualIdx.x=%02d, virtualIdx.y=%02d (virtualRank=%02d)\n",
// closedChunkDim.x, closedChunkDim.y,
// virtualIdx.x, virtualIdx.y, virtualRank, name);
float *p_closedChunk;
p_closedChunk = (float*)malloc(closedChunkDim.x*closedChunkDim.y*closedChunkDim.z*sizeof(float));
///!!! Act like shared memory, copy from read
bigsizes[0] = openedChunkDim.z;
bigsizes[1] = openedChunkDim.y;
bigsizes[2] = openedChunkDim.x;
subsizes[0] = closedChunkDim.z;
subsizes[1] = closedChunkDim.y;
subsizes[2] = closedChunkDim.x;
starts[0] = (frontRank<0)?0:halo.z;
starts[1] = (topRank<0)?0:halo.y;
starts[2] = (leftRank<0)?0:halo.x;
MPI_Datatype subarray;
MPI_Type_create_subarray(3, bigsizes, subsizes, starts, MPI_ORDER_C, MPI_FLOAT, &subarray);
MPI_Type_commit(&subarray); ///!!! Commit the data type
//Self copy
MPI_Isend(p_openedChunk, 1, subarray, rank, 0, MPI_COMM_WORLD, &request);
MPI_Recv(p_closedChunk, closedChunkDim.x*closedChunkDim.y*closedChunkDim.z , MPI_FLOAT, rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
///!Order is very important
// bigsizes[0] = dimy;
// bigsizes[1] = dimx;
// subsizes[0] = closedChunkDim.y;
// subsizes[1] = closedChunkDim.x;
// starts[0] = index_2d.y;
// starts[1] = index_2d.x;
// MPI_Datatype closedChunkArray; ///!!! Declare the data type
// MPI_Type_create_subarray(2, bigsizes, subsizes, starts,
// MPI_ORDER_C, MPI_FLOAT, &closedChunkArray);
// MPI_Type_commit(&closedChunkArray); ///!!! Commit the data type
char *dstChar = strdup(dstFile.c_str());
cout << dstChar << endl;
///!!! Write globally
errCode = MPI_File_open(MPI_COMM_WORLD, dstChar, MPI_MODE_RDWR|MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
MPI_File_set_view(fh, 0, etype, closedChunkArray, "native", MPI_INFO_NULL);
MPI_File_write_all(fh, p_closedChunk, closedChunkDim.x*closedChunkDim.y*closedChunkDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
//================================================================================
// Close MPI
free(p_openedChunk);
free(p_closedChunk);
MPI_Finalize();
return 0;
} | b24e3aab8256f2f798a4fa7a5484d34a64a03a3b.cu | #include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip> // std::setfill, std::setw
#include <string>
#include <stdlib.h> // For malloc 2d
// #include <opencv2/opencv.hpp>
#include <omp.h>
#include <mpi.h>
#include <cuda.h>
#include <helper_math.h> //For clamp
#include <assert.h>
#include <hetero_cmdparser.hpp>
#include "median_3d.hpp"
float ReverseFloat( const float inFloat )
{
float retVal;
char *floatToConvert = ( char* ) & inFloat;
char *returnFloat = ( char* ) & retVal;
// swap the bytes into a temporary buffer
returnFloat[0] = floatToConvert[3];
returnFloat[1] = floatToConvert[2];
returnFloat[2] = floatToConvert[1];
returnFloat[3] = floatToConvert[0];
return retVal;
}
using namespace std;
// using namespace cv;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define cudaCheckLastError() { \
cudaError_t error = cudaGetLastError(); \
int id; cudaGetDevice(&id); \
if(error != cudaSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, cudaGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkReadFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename.c_str(), ios::in|ios::binary); \
if (!fs->is_open()) \
{ \
printf("Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->read(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkWriteFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename, ios::out|ios::binary); \
if (!fs->is_open()) \
{ \
fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->write(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static void handle_error(int errcode, const char *str)
{
char msg[MPI_MAX_ERROR_STRING];
int resultlen;
MPI_Error_string(errcode, msg, &resultlen);
fprintf(stderr, "%s: %s\n", str, msg);
MPI_Abort(MPI_COMM_WORLD, 1);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
const char* key =
"{ h |help | | print help message }"
"{ vx |virtualDimx | | virtualDimx }"
"{ vy |virtualDimy | | virtualDimy }"
"{ vz |virtualDimz | | virtualDimz }"
"{ px |processDimx | | processDimx }"
"{ py |processDimy | | processDimy }"
"{ pz |processDimz | | processDimz }"
"{ hx |haloDimx | | haloDimx }"
"{ hy |haloDimy | | haloDimy }"
"{ hz |haloDimz | | haloDimz }"
"{ dimx|dimx | | dimensionx }"
"{ dimy|dimy | | dimensiony }"
"{ dimz|dimz | | dimensionz }"
"{ mp |maxProcs | 1 | maxProcs }"
"{ id |execId | | indicate the ith times launch mpi, act like a queue}"
"{ i |srcFile | | source of the file }"
"{ o |dstFile | | destination of the file }"
;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define at(x, y, dimx, dimy) ( clamp((int)y, 0, dimy-1)*dimx + \
clamp((int)x, 0, dimx-1) )
int main(int argc, char *argv[])
{
//================================================================================
// Initialize MPI
int rank, size;
char name[MPI_MAX_PROCESSOR_NAME];
int length;
int errCode;
MPI_File fh;
MPI_Init(&argc, &argv);
//================================================================================
// Retrieve the number of execId
// Parsing the arguments
CommandLineParser cmd(argc, argv, key);
const int execId = cmd.get<int>("execId", false);
const int maxProcs = cmd.get<int>("maxProcs", false);
const int virtualDimx = cmd.get<int>("virtualDimx", false);
const int virtualDimy = cmd.get<int>("virtualDimy", false);
const int virtualDimz = cmd.get<int>("virtualDimz", false);
const int processDimx = cmd.get<int>("processDimx", false);
const int processDimy = cmd.get<int>("processDimy", false);
const int processDimz = cmd.get<int>("processDimz", false);
const int haloDimx = cmd.get<int>("haloDimx", false);
const int haloDimy = cmd.get<int>("haloDimy", false);
const int haloDimz = cmd.get<int>("haloDimz", false);
// const int virtualSize = cmd.get<int>("virtualSize", false);
const int dimx = cmd.get<int>("dimx", false);
const int dimy = cmd.get<int>("dimy", false);
const int dimz = cmd.get<int>("dimz", false);
const string srcFile = cmd.get<string>("srcFile", false);
const string dstFile = cmd.get<string>("dstFile", false);
// printf("execId=%d, setNumProcs=%d, rank=%d\n", execId, maxProcs, rank);
// printf("virtualDimx=%02d, virtualDimy=%02d\n",
// virtualDimx, virtualDimy);
//================================================================================
// Set up Cartesian grid of processors.
// MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periodic, reorder, &comm2d );
// MPI_Cart_get(comm2d, 2, dims, periodic, coords );
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Get_processor_name(name, &length);
MPI_Barrier(MPI_COMM_WORLD);
//================================================================================
// Manually determine rank in 3d, comm3d will work in this case
int virtualRank = execId * maxProcs + rank;
int3 virtualIdx = make_int3(virtualRank % (virtualDimx*virtualDimy) % virtualDimx,
virtualRank % (virtualDimx*virtualDimy) / virtualDimx,
virtualRank / (virtualDimx*virtualDimy));
printf("execId(%d), maxProcs(%d), rank(%d)\n", execId, maxProcs, rank);
MPI_Barrier(MPI_COMM_WORLD);
printf("virtualIdx.x=%02d, virtualIdx.y=%02d, virtualIdx.z=%02d, virtualRank=%02d, at %s\n",
virtualIdx.x, virtualIdx.y, virtualIdx.z, virtualRank, name);
MPI_Barrier(MPI_COMM_WORLD);
// MPI_Finalize(); return 0;
//================================================================================
/// Data type primitives
int starts[3];
int subsizes[3];
int bigsizes[3];
MPI_Request request;
//================================================================================
///!!!Do not need to pack, calculate per-process directly
/// !! First step: Determine size of buffer
int3 featureIdx { 0, 0, 0};
int3 processDim {1, 1, 1};
int3 index_3d;
int3 closedChunkDim {0, 0, 0};
int3 halo {0, 0, 0};
//================================================================================
processDim.x = processDimx;
processDim.y = processDimy;
processDim.z = processDimz;
halo.x = haloDimx;
halo.y = haloDimy;
halo.z = haloDimz;
//================================================================================
for(featureIdx.z=0; featureIdx.z<processDim.z; featureIdx.z++)
{
int3 index_3d;
for(featureIdx.y=0; featureIdx.y<processDim.y; featureIdx.y++)
{
for(featureIdx.x=0; featureIdx.x<processDim.x; featureIdx.x++)
{
//3D global index
index_3d = make_int3(
virtualIdx.x*processDim.x+featureIdx.x,
virtualIdx.y*processDim.y+featureIdx.y,
virtualIdx.z*processDim.z+featureIdx.z);
if(index_3d.x==dimx) break;
}
if(index_3d.y==dimy) break;
}
if(index_3d.z==dimz) break;
}
closedChunkDim = make_int3(featureIdx.x, featureIdx.y, featureIdx.z);
printf("Sub closed chunk size: closedChunkDim.x=%05d, closedChunkDim.y=%05d, closedChunkDim.z=%05d at virtualIdx.x=%02d, virtualIdx.y=%02d, virtualIdx.z=%02d (virtualRank=%02d)\n",
closedChunkDim.x, closedChunkDim.y, closedChunkDim.z,
virtualIdx.x, virtualIdx.y, virtualIdx.z, virtualRank, name);
MPI_Barrier(MPI_COMM_WORLD);
//================================================================================
// Read the file
char *srcChar = strdup(srcFile.c_str());
cout << srcChar << endl;
MPI_Datatype etype;
etype = MPI_FLOAT;
//================================================================================
index_3d = make_int3(
(virtualRank%(virtualDimx*virtualDimy)%virtualDimx)*processDim.x+0,
(virtualRank%(virtualDimx*virtualDimy)/virtualDimx)*processDim.y+0,
(virtualRank/(virtualDimx*virtualDimy) )*processDim.z+0);
///!Order is very important
bigsizes[0] = dimz; //0 2
bigsizes[1] = dimy; //1 0
bigsizes[2] = dimx; //2 1
subsizes[0] = closedChunkDim.z;
subsizes[1] = closedChunkDim.y;
subsizes[2] = closedChunkDim.x;
starts[0] = index_3d.z;
starts[1] = index_3d.y;
starts[2] = index_3d.x;
MPI_Datatype closedChunkArray; ///!!! Declare the data type
MPI_Type_create_subarray(3, bigsizes, subsizes, starts, MPI_ORDER_C, MPI_FLOAT, &closedChunkArray);
MPI_Type_commit(&closedChunkArray); ///!!! Commit the data type
// Check correctness here
/*
errCode = MPI_File_open(MPI_COMM_WORLD, srcChar, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
MPI_File_set_view(fh, 0, etype, closedChunkArray, "native", MPI_INFO_NULL);
float *p_closedChunk;
p_closedChunk = (float*)malloc(closedChunkDim.x*closedChunkDim.y*closedChunkDim.z*sizeof(float));
MPI_File_read(fh, p_closedChunk, closedChunkDim.x*closedChunkDim.y*closedChunkDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
//================================================================================
///!!! Write globally
errCode = MPI_File_open(MPI_COMM_WORLD, "closedSubArray.raw", MPI_MODE_RDWR|MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
MPI_File_set_view(fh, 0, etype, closedChunkArray, "native", MPI_INFO_NULL);
MPI_File_write_all(fh, p_closedChunk, closedChunkDim.x*closedChunkDim.y*closedChunkDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
*/
// MPI_Finalize(); return 0;
//================================================================================
//================================================================================
//================================================================================
//================================================================================
int leftRank, rightRank;
int topRank, bottomRank;
int frontRank, backRank;
leftRank = virtualIdx.x-1;
rightRank = virtualIdx.x+1;
topRank = virtualIdx.y-1;
bottomRank = virtualIdx.y+1;
frontRank = virtualIdx.z-1;
backRank = virtualIdx.z+1;
///!!! Handle the boundary case
bool atBoundariesLeftRight = (leftRank<0)|(rightRank>(virtualDimx-1));
bool atBoundariesTopBottom = (topRank<0)|(bottomRank>(virtualDimy-1));
bool atBoundariesFrontBack = (frontRank<0)|(backRank>(virtualDimz-1));
int3 openedChunkDim {0, 0, 0};
openedChunkDim = make_int3(closedChunkDim.x + ((atBoundariesLeftRight)?((virtualDimx==1)?0*halo.x:1*halo.x):(2*halo.x)),
closedChunkDim.y + ((atBoundariesTopBottom)?((virtualDimy==1)?0*halo.y:1*halo.y):(2*halo.y)),
closedChunkDim.z + ((atBoundariesFrontBack)?((virtualDimz==1)?0*halo.z:1*halo.z):(2*halo.z)));
printf("Sub opened chunk size: openedChunkDim.x=%05d, openedChunkDim.y=%05d, openedChunkDim.z=%05d at virtualIdx.x=%02d, virtualIdx.y=%02d, virtualIdx.z=%02d (virtualRank=%02d)\n",
openedChunkDim.x, openedChunkDim.y, openedChunkDim.z,
virtualIdx.x, virtualIdx.y, virtualIdx.z, virtualRank, name);
MPI_Barrier(MPI_COMM_WORLD);
// Redefine the data type
bigsizes[0] = dimz;
bigsizes[1] = dimy;
bigsizes[2] = dimx;
subsizes[0] = openedChunkDim.z;
subsizes[1] = openedChunkDim.y;
subsizes[2] = openedChunkDim.x;
starts[0] = (frontRank<0)?0:(index_3d.z-halo.z); ///!!! Handle the boundary start indices
starts[1] = (topRank<0)?0:(index_3d.y-halo.y); ///!!! Handle the boundary start indices
starts[2] = (leftRank<0)?0:(index_3d.x-halo.x); ///!!! Handle the boundary start indices
MPI_Datatype openedChunkArray; ///!!! Declare the data type
MPI_Type_create_subarray(3, bigsizes, subsizes, starts, MPI_ORDER_C, MPI_FLOAT, &openedChunkArray);
MPI_Type_commit(&openedChunkArray); ///!!! Commit the data type
errCode = MPI_File_open(MPI_COMM_WORLD, srcChar, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
MPI_File_set_view(fh, 0, etype, openedChunkArray, "native", MPI_INFO_NULL);
float *p_openedChunk;
p_openedChunk = (float*)malloc(openedChunkDim.x*openedChunkDim.y*openedChunkDim.z*sizeof(float));
MPI_File_read(fh, p_openedChunk, openedChunkDim.x*openedChunkDim.y*openedChunkDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
//================================================================================
///!!! Processing with CUDA here
/// Allocate d_src, d_dst, copy back to p_openedChunk
int numDevices;
cudaGetDeviceCount(&numDevices);
cudaSetDevice(rank%numDevices);
cudaDeviceReset();
cudaCheckLastError();
float *d_src, *d_dst;
int local_halo, local_radius;
int openedChunkDimxyz = openedChunkDim.x*openedChunkDim.y*openedChunkDim.z;
cudaMalloc((void**)&d_src, (openedChunkDimxyz)*sizeof(float));
cudaMalloc((void**)&d_dst, (openedChunkDimxyz)*sizeof(float));
local_radius = 3;
local_halo = 3;
cudaMemcpy(d_src, p_openedChunk, (openedChunkDimxyz)*sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_dst, d_src, (openedChunkDimxyz)*sizeof(float), cudaMemcpyDeviceToDevice); // Debug purpose
median_3d(d_src, d_dst, openedChunkDim.x, openedChunkDim.y, openedChunkDim.z, local_radius, local_halo);
cudaDeviceSynchronize();
cudaMemcpy(p_openedChunk, d_dst, (openedChunkDimxyz)*sizeof(float), cudaMemcpyDeviceToHost);
// float t;
// for(int k=0; k<openedChunkDim.x*openedChunkDim.y*openedChunkDim.z; k++)
// {
// // t = ReverseFloat(p_openedChunk[k]);
// // if(virtualRank==0)
// // p_openedChunk[k] = ReverseFloat(t/2);
// // else
// // p_openedChunk[k] = ReverseFloat(t);
// t = p_openedChunk[k];
// if(virtualRank==0)
// p_openedChunk[k] = t/2;
// else
// p_openedChunk[k] = t;
// }
//================================================================================
// int3 closedChunkDim {0, 0, 0};
// closedChunkDim = make_int3(closedChunkDim.x,
// closedChunkDim.y,
// 1);
// printf("Sub closed chunk size: closedChunkDim.x=%05d, closedChunkDim.y=%05d at virtualIdx.x=%02d, virtualIdx.y=%02d (virtualRank=%02d)\n",
// closedChunkDim.x, closedChunkDim.y,
// virtualIdx.x, virtualIdx.y, virtualRank, name);
float *p_closedChunk;
p_closedChunk = (float*)malloc(closedChunkDim.x*closedChunkDim.y*closedChunkDim.z*sizeof(float));
///!!! Act like shared memory, copy from read
bigsizes[0] = openedChunkDim.z;
bigsizes[1] = openedChunkDim.y;
bigsizes[2] = openedChunkDim.x;
subsizes[0] = closedChunkDim.z;
subsizes[1] = closedChunkDim.y;
subsizes[2] = closedChunkDim.x;
starts[0] = (frontRank<0)?0:halo.z;
starts[1] = (topRank<0)?0:halo.y;
starts[2] = (leftRank<0)?0:halo.x;
MPI_Datatype subarray;
MPI_Type_create_subarray(3, bigsizes, subsizes, starts, MPI_ORDER_C, MPI_FLOAT, &subarray);
MPI_Type_commit(&subarray); ///!!! Commit the data type
//Self copy
MPI_Isend(p_openedChunk, 1, subarray, rank, 0, MPI_COMM_WORLD, &request);
MPI_Recv(p_closedChunk, closedChunkDim.x*closedChunkDim.y*closedChunkDim.z , MPI_FLOAT, rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
///!Order is very important
// bigsizes[0] = dimy;
// bigsizes[1] = dimx;
// subsizes[0] = closedChunkDim.y;
// subsizes[1] = closedChunkDim.x;
// starts[0] = index_2d.y;
// starts[1] = index_2d.x;
// MPI_Datatype closedChunkArray; ///!!! Declare the data type
// MPI_Type_create_subarray(2, bigsizes, subsizes, starts,
// MPI_ORDER_C, MPI_FLOAT, &closedChunkArray);
// MPI_Type_commit(&closedChunkArray); ///!!! Commit the data type
char *dstChar = strdup(dstFile.c_str());
cout << dstChar << endl;
///!!! Write globally
errCode = MPI_File_open(MPI_COMM_WORLD, dstChar, MPI_MODE_RDWR|MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
MPI_File_set_view(fh, 0, etype, closedChunkArray, "native", MPI_INFO_NULL);
MPI_File_write_all(fh, p_closedChunk, closedChunkDim.x*closedChunkDim.y*closedChunkDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
//================================================================================
// Close MPI
free(p_openedChunk);
free(p_closedChunk);
MPI_Finalize();
return 0;
} |
da7093bfcc5c7827a0bf40fbaf0fd1402c47b3cd.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
// for the older gpus atomicAdd with double arguments does not exist
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
if (val==0.0)
return __longlong_as_double(old);
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
}
#endif
namespace{
template <typename scalar_t>
__global__ void voxelize_sub1_kernel(
const scalar_t* __restrict__ faces,
int32_t* voxels,
int batch_size,
int num_faces,
int voxel_size) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * voxel_size * voxel_size) {
return;
}
const int bs = batch_size;
const int nf = num_faces;
const int vs = voxel_size;
int y = i % vs;
int x = (i / vs) % vs;
int bn = i / (vs * vs);
//
for (int fn = 0; fn < nf; fn++) {
const scalar_t* face = &faces[(bn * nf + fn) * 9];
scalar_t y1d = face[3] - face[0];
scalar_t x1d = face[4] - face[1];
scalar_t z1d = face[5] - face[2];
scalar_t y2d = face[6] - face[0];
scalar_t x2d = face[7] - face[1];
scalar_t z2d = face[8] - face[2];
scalar_t ypd = y - face[0];
scalar_t xpd = x - face[1];
scalar_t det = x1d * y2d - x2d * y1d;
if (det == 0) continue;
scalar_t t1 = (y2d * xpd - x2d * ypd) / det;
scalar_t t2 = (-y1d * xpd + x1d * ypd) / det;
if (t1 < 0) continue;
if (t2 < 0) continue;
if (1 < t1 + t2) continue;
int zi = floor(t1 * z1d + t2 * z2d + face[2]);
int yi, xi;
yi = y;
xi = x;
if ((0 <= yi) && (yi < vs) && (0 <= xi) && (xi < vs) && (0 <= zi) && (zi < vs))
voxels[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] = 1;
yi = y - 1;
xi = x;
if ((0 <= yi) && (yi < vs) && (0 <= xi) && (xi < vs) && (0 <= zi) && (zi < vs))
voxels[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] = 1;
yi = y;
xi = x - 1;
if ((0 <= yi) && (yi < vs) && (0 <= xi) && (xi < vs) && (0 <= zi) && (zi < vs))
voxels[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] = 1;
yi = y - 1;
xi = x - 1;
if ((0 <= yi) && (yi < vs) && (0 <= xi) && (xi < vs) && (0 <= zi) && (zi < vs))
voxels[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] = 1;
}
}
template <typename scalar_t>
__global__ void voxelize_sub2_kernel(
const scalar_t* __restrict__ faces,
int32_t* voxels,
int batch_size,
int num_faces,
int voxel_size) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * num_faces) {
return;
}
const int bs = batch_size;
const int nf = num_faces;
const int vs = voxel_size;
int fn = i % nf;
int bn = i / nf;
const scalar_t* face = &faces[(bn * nf + fn) * 9];
for (int k = 0; k < 3; k++) {
int yi = floor(face[3 * k + 0]);
int xi = floor(face[3 * k + 1]);
int zi = floor(face[3 * k + 2]);
if ((0 <= yi) && (yi < vs) && (0 <= xi) && (xi < vs) && (0 <= zi) && (zi < vs)) {
voxels[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] = 1;
}
}
}
template <typename scalar_t>
__global__ void voxelize_sub3_kernel(
int32_t* voxels,
int32_t* visible,
int batch_size,
int voxel_size) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * voxel_size * voxel_size * voxel_size) {
return;
}
const int bs = batch_size;
const int vs = voxel_size;
int z = i % vs;
int x = (i / vs) % vs;
int y = (i / (vs * vs)) % vs;
int bn = i / (vs * vs * vs);
int pn = i;
if ((y == 0) || (y == vs - 1) || (x == 0) || (x == vs - 1) || (z == 0) || (z == vs - 1)) {
if (voxels[pn] == 0) visible[pn] = 1;
}
}
template <typename scalar_t>
__global__ void voxelize_sub4_kernel(
int32_t* voxels,
int32_t* visible,
int batch_size,
int voxel_size) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * voxel_size * voxel_size * voxel_size) {
return;
}
const int bs = batch_size;
const int vs = voxel_size;
int z = i % vs;
int x = (i / vs) % vs;
int y = (i / (vs * vs)) % vs;
int bn = i / (vs * vs * vs);
int pn = i;
if ((y == 0) || (y == vs - 1) || (x == 0) || (x == vs - 1) || (z == 0) || (z == vs - 1)) return;
if (voxels[pn] == 0 && visible[pn] == 0) {
int yi, xi, zi;
yi = y - 1;
xi = x;
zi = z;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
yi = y + 1;
xi = x;
zi = z;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
yi = y;
xi = x - 1;
zi = z;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
yi = y;
xi = x + 1;
zi = z;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
yi = y;
xi = x;
zi = z - 1;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
yi = y;
xi = x;
zi = z + 1;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
}
}
}
std::vector<at::Tensor> voxelize_sub1_cuda(
at::Tensor faces,
at::Tensor voxels) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto voxel_size = voxels.size(1);
const int threads = 512;
const dim3 blocks ((batch_size * voxel_size * voxel_size - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "voxelize_sub1_cuda", ([&] {
hipLaunchKernelGGL(( voxelize_sub1_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
faces.data<scalar_t>(),
voxels.data<int32_t>(),
batch_size,
num_faces,
voxel_size);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in voxelize_sub1_kernel: %s\n", hipGetErrorString(err));
return {voxels};
}
std::vector<at::Tensor> voxelize_sub2_cuda(
at::Tensor faces,
at::Tensor voxels) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto voxel_size = voxels.size(1);
const int threads = 512;
const dim3 blocks ((batch_size * num_faces - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "voxelize_sub2_cuda", ([&] {
hipLaunchKernelGGL(( voxelize_sub2_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
faces.data<scalar_t>(),
voxels.data<int32_t>(),
batch_size,
num_faces,
voxel_size);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in voxelize_sub2_kernel: %s\n", hipGetErrorString(err));
return {voxels};
}
std::vector<at::Tensor> voxelize_sub3_cuda(
at::Tensor faces,
at::Tensor voxels,
at::Tensor visible) {
const auto batch_size = voxels.size(0);
const auto voxel_size = voxels.size(1);
const int threads = 512;
const dim3 blocks ((batch_size * voxel_size * voxel_size * voxel_size - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "voxelize_sub3_cuda", ([&] {
hipLaunchKernelGGL(( voxelize_sub3_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
voxels.data<int32_t>(),
visible.data<int32_t>(),
batch_size,
voxel_size);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in voxelize_sub3_kernel: %s\n", hipGetErrorString(err));
return {voxels, visible};
}
std::vector<at::Tensor> voxelize_sub4_cuda(
at::Tensor faces,
at::Tensor voxels,
at::Tensor visible) {
const auto batch_size = voxels.size(0);
const auto voxel_size = voxels.size(1);
const int threads = 512;
const dim3 blocks ((batch_size * voxel_size * voxel_size * voxel_size - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "voxelize_sub4_cuda", ([&] {
hipLaunchKernelGGL(( voxelize_sub4_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
voxels.data<int32_t>(),
visible.data<int32_t>(),
batch_size,
voxel_size);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in voxelize_sub4_kernel: %s\n", hipGetErrorString(err));
return {voxels, visible};
} | da7093bfcc5c7827a0bf40fbaf0fd1402c47b3cd.cu | #include <iostream>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
// for the older gpus atomicAdd with double arguments does not exist
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
if (val==0.0)
return __longlong_as_double(old);
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
}
#endif
namespace{
template <typename scalar_t>
__global__ void voxelize_sub1_kernel(
const scalar_t* __restrict__ faces,
int32_t* voxels,
int batch_size,
int num_faces,
int voxel_size) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * voxel_size * voxel_size) {
return;
}
const int bs = batch_size;
const int nf = num_faces;
const int vs = voxel_size;
int y = i % vs;
int x = (i / vs) % vs;
int bn = i / (vs * vs);
//
for (int fn = 0; fn < nf; fn++) {
const scalar_t* face = &faces[(bn * nf + fn) * 9];
scalar_t y1d = face[3] - face[0];
scalar_t x1d = face[4] - face[1];
scalar_t z1d = face[5] - face[2];
scalar_t y2d = face[6] - face[0];
scalar_t x2d = face[7] - face[1];
scalar_t z2d = face[8] - face[2];
scalar_t ypd = y - face[0];
scalar_t xpd = x - face[1];
scalar_t det = x1d * y2d - x2d * y1d;
if (det == 0) continue;
scalar_t t1 = (y2d * xpd - x2d * ypd) / det;
scalar_t t2 = (-y1d * xpd + x1d * ypd) / det;
if (t1 < 0) continue;
if (t2 < 0) continue;
if (1 < t1 + t2) continue;
int zi = floor(t1 * z1d + t2 * z2d + face[2]);
int yi, xi;
yi = y;
xi = x;
if ((0 <= yi) && (yi < vs) && (0 <= xi) && (xi < vs) && (0 <= zi) && (zi < vs))
voxels[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] = 1;
yi = y - 1;
xi = x;
if ((0 <= yi) && (yi < vs) && (0 <= xi) && (xi < vs) && (0 <= zi) && (zi < vs))
voxels[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] = 1;
yi = y;
xi = x - 1;
if ((0 <= yi) && (yi < vs) && (0 <= xi) && (xi < vs) && (0 <= zi) && (zi < vs))
voxels[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] = 1;
yi = y - 1;
xi = x - 1;
if ((0 <= yi) && (yi < vs) && (0 <= xi) && (xi < vs) && (0 <= zi) && (zi < vs))
voxels[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] = 1;
}
}
template <typename scalar_t>
__global__ void voxelize_sub2_kernel(
const scalar_t* __restrict__ faces,
int32_t* voxels,
int batch_size,
int num_faces,
int voxel_size) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * num_faces) {
return;
}
const int bs = batch_size;
const int nf = num_faces;
const int vs = voxel_size;
int fn = i % nf;
int bn = i / nf;
const scalar_t* face = &faces[(bn * nf + fn) * 9];
for (int k = 0; k < 3; k++) {
int yi = floor(face[3 * k + 0]);
int xi = floor(face[3 * k + 1]);
int zi = floor(face[3 * k + 2]);
if ((0 <= yi) && (yi < vs) && (0 <= xi) && (xi < vs) && (0 <= zi) && (zi < vs)) {
voxels[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] = 1;
}
}
}
template <typename scalar_t>
__global__ void voxelize_sub3_kernel(
int32_t* voxels,
int32_t* visible,
int batch_size,
int voxel_size) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * voxel_size * voxel_size * voxel_size) {
return;
}
const int bs = batch_size;
const int vs = voxel_size;
int z = i % vs;
int x = (i / vs) % vs;
int y = (i / (vs * vs)) % vs;
int bn = i / (vs * vs * vs);
int pn = i;
if ((y == 0) || (y == vs - 1) || (x == 0) || (x == vs - 1) || (z == 0) || (z == vs - 1)) {
if (voxels[pn] == 0) visible[pn] = 1;
}
}
template <typename scalar_t>
__global__ void voxelize_sub4_kernel(
int32_t* voxels,
int32_t* visible,
int batch_size,
int voxel_size) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * voxel_size * voxel_size * voxel_size) {
return;
}
const int bs = batch_size;
const int vs = voxel_size;
int z = i % vs;
int x = (i / vs) % vs;
int y = (i / (vs * vs)) % vs;
int bn = i / (vs * vs * vs);
int pn = i;
if ((y == 0) || (y == vs - 1) || (x == 0) || (x == vs - 1) || (z == 0) || (z == vs - 1)) return;
if (voxels[pn] == 0 && visible[pn] == 0) {
int yi, xi, zi;
yi = y - 1;
xi = x;
zi = z;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
yi = y + 1;
xi = x;
zi = z;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
yi = y;
xi = x - 1;
zi = z;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
yi = y;
xi = x + 1;
zi = z;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
yi = y;
xi = x;
zi = z - 1;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
yi = y;
xi = x;
zi = z + 1;
if (visible[bn * vs * vs * vs + yi * vs * vs + xi * vs + zi] != 0) visible[pn] = 1;
}
}
}
std::vector<at::Tensor> voxelize_sub1_cuda(
at::Tensor faces,
at::Tensor voxels) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto voxel_size = voxels.size(1);
const int threads = 512;
const dim3 blocks ((batch_size * voxel_size * voxel_size - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "voxelize_sub1_cuda", ([&] {
voxelize_sub1_kernel<scalar_t><<<blocks, threads>>>(
faces.data<scalar_t>(),
voxels.data<int32_t>(),
batch_size,
num_faces,
voxel_size);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in voxelize_sub1_kernel: %s\n", cudaGetErrorString(err));
return {voxels};
}
std::vector<at::Tensor> voxelize_sub2_cuda(
at::Tensor faces,
at::Tensor voxels) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto voxel_size = voxels.size(1);
const int threads = 512;
const dim3 blocks ((batch_size * num_faces - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "voxelize_sub2_cuda", ([&] {
voxelize_sub2_kernel<scalar_t><<<blocks, threads>>>(
faces.data<scalar_t>(),
voxels.data<int32_t>(),
batch_size,
num_faces,
voxel_size);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in voxelize_sub2_kernel: %s\n", cudaGetErrorString(err));
return {voxels};
}
std::vector<at::Tensor> voxelize_sub3_cuda(
at::Tensor faces,
at::Tensor voxels,
at::Tensor visible) {
const auto batch_size = voxels.size(0);
const auto voxel_size = voxels.size(1);
const int threads = 512;
const dim3 blocks ((batch_size * voxel_size * voxel_size * voxel_size - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "voxelize_sub3_cuda", ([&] {
voxelize_sub3_kernel<scalar_t><<<blocks, threads>>>(
voxels.data<int32_t>(),
visible.data<int32_t>(),
batch_size,
voxel_size);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in voxelize_sub3_kernel: %s\n", cudaGetErrorString(err));
return {voxels, visible};
}
std::vector<at::Tensor> voxelize_sub4_cuda(
at::Tensor faces,
at::Tensor voxels,
at::Tensor visible) {
const auto batch_size = voxels.size(0);
const auto voxel_size = voxels.size(1);
const int threads = 512;
const dim3 blocks ((batch_size * voxel_size * voxel_size * voxel_size - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "voxelize_sub4_cuda", ([&] {
voxelize_sub4_kernel<scalar_t><<<blocks, threads>>>(
voxels.data<int32_t>(),
visible.data<int32_t>(),
batch_size,
voxel_size);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in voxelize_sub4_kernel: %s\n", cudaGetErrorString(err));
return {voxels, visible};
} |
f6a3931f18ef47aec01065886228cf23d1248de4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* maketable_v4.cu
* calculate and sort a rainbow table
*
* nvcc maketable_v4.cu ./obj/fname_gen.o ./obj/md5.o -o ./bin/mktab
* From the parameters in rainbow.h, maketable produces an unsorted
* table (new) and a table sorted on final hash (sorted). This is
* used for merging into the main table.
*
* The kernel will time out if more than 34*1024 threads are launched.
* Defining 1 workunit as 32*1024 threads.
*
* Introduce use of a unique table_ident (header->f1). Use this
* ident for naming/selecting files.
* Examples: merge_03ca59e3.rbt or sort_03ca59e3.rbt
*
*/
#ifndef __CUDA__
#define __CUDA__
#endif
//===========================Include code======================================
// main header files
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <endian.h>
#include <time.h>
#include <unistd.h>
// local header files
#include "../common/rainbow.h"
#include "md5.h"
// nvcc does not support externel calls
//#include "utils_2.h"
#include "utils_2.cu"
//======================================================================
__host__
void table_setup(TableHeader*,TableEntry*, uint32_t);
__host__
int sort_table(TableHeader*,TableEntry*);
__host__
uint32_t get_table_id(char *tid);
//======================================================================
__host__
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//======================================================================
__host__
void table_setup(TableHeader *header, TableEntry *entry, uint32_t table_id) {
int i,di;
//unsigned int t_size=sizeof(TableHeader)+(sizeof(TableEntry)*T_ENTRIES);
srand(time(NULL));
//printf("Threads: %d Table_Size: %d\n",THREADS,t_size);
for(i=0; i<THREADS*DIMGRIDX; i++) {
// Random password type 'UUnnllU'
(entry+i)->initial_password[0]= (rand() % 26) + 'A';
(entry+i)->initial_password[1]= (rand() % 26) + 'A';
(entry+i)->initial_password[2]= (rand() % 10) + '0';
(entry+i)->initial_password[3]= (rand() % 10) + '0';
(entry+i)->initial_password[4]= (rand() % 26) + 'a';
(entry+i)->initial_password[5]= (rand() % 26) + 'a';
(entry+i)->initial_password[6]= (rand() % 26) + 'A';
(entry+i)->initial_password[7]= '\0';
// DEBUG Remove either or both for operational use
(entry+i)->final_hash[0] = 0x776f6272;
// END DEBUG
}
header->hdr_size = sizeof(TableHeader);
header->entries = T_ENTRIES;
header->links = LINKS;
header->table_id = table_id; // Table Ident
header->f2 = 0x3e3e3e3e; // '>>>>'
// Calculate the md5sum of the table entries
md5_state_t state;
md5_byte_t digest[16];
md5_init(&state);
for(i=0; i<T_ENTRIES; i++)
md5_append(&state, (const md5_byte_t *)&(entry[i]), sizeof(TableEntry));
md5_finish(&state, digest);
// print md5sum for test purposes
//for (di = 0; di < 16; ++di)
//printf("%02x", digest[di]);
//printf("\n");
// Save the md5sum in check_sum slot
for (di = 0; di < 16; ++di)
sprintf(header->check_sum + di * 2, "%02x", digest[di]);
*(header->check_sum + di * 2) = '\0';
}
//======================================================================
__host__
int sort_table(TableHeader *header,TableEntry *entry) {
// Revised code to read directly from memory
TableEntry *target, *found;
int i; //loop variable
printf("Sorting %u Table Entries:-", header->entries);
qsort(entry, header->entries, sizeof(TableEntry), hash_compare_32bit);
// select a hash at random to act as test target
srand(time(NULL));
target = (entry + rand()%header->entries);
printf("\nRandom target: %s Hash: ", target->initial_password);
for(i=0;i<8;i++) printf("%08x ", target->final_hash[i]);
found = (TableEntry*)bsearch(target, entry, header->entries, sizeof(TableEntry), hash_compare_32bit);
if(found != NULL) {
printf("\nLocated target %s Hash: ", found->initial_password);
for(i=0;i<8;i++) printf("%08x ", found->final_hash[i]);
printf("\n");
} else {
printf("\nTarget hash not found?\n");
}
// end test target
return(0);
}
//======================================================================
//=========================Device Code==================================
//======================================================================
//----------------------------------------------------------------------
__global__
void table_calculate(TableHeader *header, TableEntry *entry) {
// revised 29Dec2011
// The parameter is the base address of a large table of TableEntry(s)
uint8_t M[64]; // Initial string - zero padded and length in bits appended
uint32_t W[64]; // Expanded Key Schedule
uint32_t H[8]; // Hash
int i = 0; // working index
uint64_t l = 0; // length of message
uint8_t B[64]; // store initial and working passwords here to protect original data
uint32_t chain_idx, link_idx;
// set up a pointer to initial_password & final_hash
TableEntry *data = entry + blockIdx.x*blockDim.x + threadIdx.x;
// set up to read in the trial string into the B buffer
uint8_t *in = (uint8_t*)data->initial_password;
uint8_t *out = B;
// copy zero terminated string
i=0;
while(in[i] != 0x00) {
out[i] = in[i];
i++;
}
out[i] = 0x00;
// ---> main loop buffer B contains the zero term string
for(chain_idx=0; chain_idx<LINKS; chain_idx++) {
// copy zero terminated string from B to M and note length
in = B;
out = M;
i=0; l=0;
while(in[i] != 0x00) {
out[i] = in[i];
i++;
l++;
}
out[i++] = 0x80;
// zero fill
while(i < 56) out[i++]=0x00;
/*The hash algorithm uses 32 bit (4 byte words).
* On little endian machines (Intel) the constants
* are stored lsb->msb internally. To match this the WORDS
* of the input message are subject to endian swap.
*/
uint8_t *x = M;
int y;
for(y=0; y<14; y++) {
// long swap
*(x+3) ^= *x;
*x ^= *(x+3);
*(x+3) ^= *x;
// short swap
*(x+2) ^= *(x+1);
*(x+1) ^= *(x+2);
*(x+2) ^= *(x+1);
// move pointer up
x += 4;
}
// need a 32 bit pointer to store length as 2 words
l*=8; //length in bits
uint32_t *p = (uint32_t*)&l;
uint32_t *q = (uint32_t*)&out[i];
*q = *(p+1);
*(q+1) = *p;
// The 64 bytes in the message block can now be used
// to initialise the 64 4-byte words in the message schedule W[64]
// REUSE i
uint8_t *r = (uint8_t*)M;
uint8_t *s = (uint8_t*)W;
for(i=0;i<64;i++) s[i] = r[i];
for(i=16;i<64;i++) W[i] = SIG1(W[i-2]) + W[i-7] + SIG0(W[i-15]) + W[i-16];
// set initial hash values
initHash(H);
// Now calc the hash
sha256_transform(W,H);
// For testing use 0 as table index
// link_idx = chain_idx + 0;
// Reduce the Hash using the table_ident
//???????????????????????????????????????????????
link_idx = chain_idx + header->table_id;
//???????????????????????????????????????????????
// call reduce function
reduce_hash(H,B,link_idx);
// TODO: remove???? clear internal index
i=0;
} // --> end main loop
// copy comp_hash to final hash
for(i=0;i<8;i++) data->final_hash[i] = H[i];
// Ensure padding is zero
data->pad=0x0000;
__syncthreads();
}
void Check_CUDA_Error(const char *message,FILE *table, char *fname) {
hipError_t error = hipGetLastError();
if(error!=hipSuccess) {
fprintf(stderr,"ERROR: %s: %s\n", message, hipGetErrorString(error) );
fprintf(stderr,"Removing invalid file in rbt.\n");
fclose(table);
remove(fname);
exit(1);
}
}
uint32_t get_table_id(char *tid) {
// Revised version of test code
// Expects a pointer to a hex string
// Returns equivalent uint32_t or zero on failure
if(tid==NULL) {return(0);}
uint32_t table_id = strtol(tid,NULL,16);
return(table_id);
}
//=================================Main Code============================
int main(int argc, char **argv) {
TableHeader *header, *dev_header;
TableEntry *entry, *dev_entry;
char table_file[81];
char sort_file[81];
FILE *table, *sort;
int i,di,work_unit;
uint32_t offset,table_id;
hipEvent_t start;
hipEvent_t end;
float ms;
size_t count;
printf("========= Maketable_v4 =========\n");
// Required parameter is the table identifier
// Supplied as hex string of the form 0x12ab34cd
// Stored internally as uint32_t
// String form used to generate the table name
// of the form "sort_0x12ab34cd.rbt
if(argc != 2) {
printf("Table Identifier missing.\nUsage: mktab 0x1234abcd\n");
exit(1);
}
if((table_id=get_table_id(argv[1]))==0) {
printf("Table index zero not permitted.\n");
exit(1);
}
fname_gen(sort_file,"sort",table_id); // determine the filenames
fname_gen(table_file,"new",table_id); // at the same time so tmerge
table=fopen(table_file,"w"); // can delete the unrequired files.
if(table==NULL) {
printf("Error - maketable_v4: Unable to open 'new' file.\n");
return(1);
}
header = (TableHeader*)malloc(sizeof(TableHeader));
entry = (TableEntry*)malloc(sizeof(TableEntry)*T_ENTRIES);
if((header != NULL)&&(entry != NULL)) {
printf("Preparing the table header and initial passwords.\n");
table_setup(header,entry,table_id); // initialise header and set initial passwords
// hipMalloc space for table header
HANDLE_ERROR(hipMalloc((void**)&dev_header,sizeof(TableHeader)));
// copy header to device
HANDLE_ERROR(hipMemcpy(dev_header, header, sizeof(TableHeader), hipMemcpyHostToDevice));
// hipMalloc space for 1 work unit in table body
HANDLE_ERROR(hipMalloc((void**)&dev_entry,sizeof(TableEntry)*DIMGRIDX*THREADS));
hipEventCreate(&start);
hipEventCreate(&end);
printf("Starting the first of %d work units....\n",WORKUNITS);
// .....workunit...loop start.....
for(work_unit=0; work_unit<WORKUNITS; work_unit++) {
// track position in table of entries
offset = work_unit*DIMGRIDX*THREADS;
// Copy entries to device
HANDLE_ERROR(hipMemcpy(dev_entry, entry+offset, sizeof(TableEntry)*DIMGRIDX*THREADS, hipMemcpyHostToDevice));
// =====Launch Kernel=====
hipEventRecord(start,0);
hipGetLastError(); // Clear cuda error flag
hipLaunchKernelGGL(( table_calculate), dim3(DIMGRIDX),dim3(THREADS), 0, 0, dev_header,dev_entry);
hipEventRecord(end,0);
hipEventSynchronize(end);
hipEventElapsedTime(&ms,start,end);
printf("Work unit %d completed in %4.2f ms.\n", work_unit,ms);
Check_CUDA_Error("Error thrown after kernel launch",table,table_file);
// copy back entries to host
HANDLE_ERROR( hipMemcpy(entry+offset, dev_entry, sizeof(TableEntry)*DIMGRIDX*THREADS, hipMemcpyDeviceToHost) );
// copy back header to host
HANDLE_ERROR( hipMemcpy(header, dev_header, sizeof(TableHeader), hipMemcpyDeviceToHost) );
} // .....workunit...loop end.....
fwrite(header,sizeof(TableHeader),1,table);
fwrite(entry,sizeof(TableEntry),T_ENTRIES,table);
fclose(table);
// sort on hash value
sort_table(header,entry);
// Calculate the md5sum of the table entries
md5_state_t state;
md5_byte_t digest[16];
md5_init(&state);
for(i=0; i<T_ENTRIES; i++)
md5_append(&state, (const md5_byte_t *)&(entry[i]), sizeof(TableEntry));
md5_finish(&state, digest);
// print md5sum for test purposes
printf("Checksum for TableEntries: ");
for (di = 0; di < 16; ++di)
printf("%02x", digest[di]);
printf("\n");
// Save the md5sum in check_sum slot
for (di = 0; di < 16; ++di)
sprintf(header->check_sum + di * 2, "%02x", digest[di]);
// Open the sort file for writing
sort=fopen(sort_file,"w");
if(sort==NULL) {
printf("Error - maketable_v3: Unable to open 'sort' file.\n");
return(1);
}
// save sorted table to file 'sorted table'
count = fwrite(header,sizeof(TableHeader),1,sort);
count += fwrite(entry,sizeof(TableEntry),header->entries,sort);
if((count == header->entries + 1)&&(fclose(sort)==0)) {
// ok to remove 'new' file
printf("Sorted file successfully writen - deleting original.\n");
if( remove( table_file ) != 0 )
perror( "Error deleting file\n" );
else
printf( "original file successfully deleted\n" );
}
}
printf("table_id: %u sort_file %s\n",table_id,sort_file);
// Clean up memory
free(header);
free(entry);
hipFree(dev_entry);
hipFree(dev_header);
return(0);
}
| f6a3931f18ef47aec01065886228cf23d1248de4.cu | /*
*
* maketable_v4.cu
* calculate and sort a rainbow table
*
* nvcc maketable_v4.cu ./obj/fname_gen.o ./obj/md5.o -o ./bin/mktab
* From the parameters in rainbow.h, maketable produces an unsorted
* table (new) and a table sorted on final hash (sorted). This is
* used for merging into the main table.
*
* The kernel will time out if more than 34*1024 threads are launched.
* Defining 1 workunit as 32*1024 threads.
*
* Introduce use of a unique table_ident (header->f1). Use this
* ident for naming/selecting files.
* Examples: merge_03ca59e3.rbt or sort_03ca59e3.rbt
*
*/
#ifndef __CUDA__
#define __CUDA__
#endif
//===========================Include code======================================
// main header files
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <endian.h>
#include <time.h>
#include <unistd.h>
// local header files
#include "../common/rainbow.h"
#include "md5.h"
// nvcc does not support externel calls
//#include "utils_2.h"
#include "utils_2.cu"
//======================================================================
__host__
void table_setup(TableHeader*,TableEntry*, uint32_t);
__host__
int sort_table(TableHeader*,TableEntry*);
__host__
uint32_t get_table_id(char *tid);
//======================================================================
__host__
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//======================================================================
__host__
void table_setup(TableHeader *header, TableEntry *entry, uint32_t table_id) {
int i,di;
//unsigned int t_size=sizeof(TableHeader)+(sizeof(TableEntry)*T_ENTRIES);
srand(time(NULL));
//printf("Threads: %d Table_Size: %d\n",THREADS,t_size);
for(i=0; i<THREADS*DIMGRIDX; i++) {
// Random password type 'UUnnllU'
(entry+i)->initial_password[0]= (rand() % 26) + 'A';
(entry+i)->initial_password[1]= (rand() % 26) + 'A';
(entry+i)->initial_password[2]= (rand() % 10) + '0';
(entry+i)->initial_password[3]= (rand() % 10) + '0';
(entry+i)->initial_password[4]= (rand() % 26) + 'a';
(entry+i)->initial_password[5]= (rand() % 26) + 'a';
(entry+i)->initial_password[6]= (rand() % 26) + 'A';
(entry+i)->initial_password[7]= '\0';
// DEBUG Remove either or both for operational use
(entry+i)->final_hash[0] = 0x776f6272;
// END DEBUG
}
header->hdr_size = sizeof(TableHeader);
header->entries = T_ENTRIES;
header->links = LINKS;
header->table_id = table_id; // Table Ident
header->f2 = 0x3e3e3e3e; // '>>>>'
// Calculate the md5sum of the table entries
md5_state_t state;
md5_byte_t digest[16];
md5_init(&state);
for(i=0; i<T_ENTRIES; i++)
md5_append(&state, (const md5_byte_t *)&(entry[i]), sizeof(TableEntry));
md5_finish(&state, digest);
// print md5sum for test purposes
//for (di = 0; di < 16; ++di)
//printf("%02x", digest[di]);
//printf("\n");
// Save the md5sum in check_sum slot
for (di = 0; di < 16; ++di)
sprintf(header->check_sum + di * 2, "%02x", digest[di]);
*(header->check_sum + di * 2) = '\0';
}
//======================================================================
__host__
int sort_table(TableHeader *header,TableEntry *entry) {
// Revised code to read directly from memory
TableEntry *target, *found;
int i; //loop variable
printf("Sorting %u Table Entries:-", header->entries);
qsort(entry, header->entries, sizeof(TableEntry), hash_compare_32bit);
// select a hash at random to act as test target
srand(time(NULL));
target = (entry + rand()%header->entries);
printf("\nRandom target: %s Hash: ", target->initial_password);
for(i=0;i<8;i++) printf("%08x ", target->final_hash[i]);
found = (TableEntry*)bsearch(target, entry, header->entries, sizeof(TableEntry), hash_compare_32bit);
if(found != NULL) {
printf("\nLocated target %s Hash: ", found->initial_password);
for(i=0;i<8;i++) printf("%08x ", found->final_hash[i]);
printf("\n");
} else {
printf("\nTarget hash not found?\n");
}
// end test target
return(0);
}
//======================================================================
//=========================Device Code==================================
//======================================================================
//----------------------------------------------------------------------
__global__
void table_calculate(TableHeader *header, TableEntry *entry) {
// revised 29Dec2011
// The parameter is the base address of a large table of TableEntry(s)
uint8_t M[64]; // Initial string - zero padded and length in bits appended
uint32_t W[64]; // Expanded Key Schedule
uint32_t H[8]; // Hash
int i = 0; // working index
uint64_t l = 0; // length of message
uint8_t B[64]; // store initial and working passwords here to protect original data
uint32_t chain_idx, link_idx;
// set up a pointer to initial_password & final_hash
TableEntry *data = entry + blockIdx.x*blockDim.x + threadIdx.x;
// set up to read in the trial string into the B buffer
uint8_t *in = (uint8_t*)data->initial_password;
uint8_t *out = B;
// copy zero terminated string
i=0;
while(in[i] != 0x00) {
out[i] = in[i];
i++;
}
out[i] = 0x00;
// ---> main loop buffer B contains the zero term string
for(chain_idx=0; chain_idx<LINKS; chain_idx++) {
// copy zero terminated string from B to M and note length
in = B;
out = M;
i=0; l=0;
while(in[i] != 0x00) {
out[i] = in[i];
i++;
l++;
}
out[i++] = 0x80;
// zero fill
while(i < 56) out[i++]=0x00;
/*The hash algorithm uses 32 bit (4 byte words).
* On little endian machines (Intel) the constants
* are stored lsb->msb internally. To match this the WORDS
* of the input message are subject to endian swap.
*/
uint8_t *x = M;
int y;
for(y=0; y<14; y++) {
// long swap
*(x+3) ^= *x;
*x ^= *(x+3);
*(x+3) ^= *x;
// short swap
*(x+2) ^= *(x+1);
*(x+1) ^= *(x+2);
*(x+2) ^= *(x+1);
// move pointer up
x += 4;
}
// need a 32 bit pointer to store length as 2 words
l*=8; //length in bits
uint32_t *p = (uint32_t*)&l;
uint32_t *q = (uint32_t*)&out[i];
*q = *(p+1);
*(q+1) = *p;
// The 64 bytes in the message block can now be used
// to initialise the 64 4-byte words in the message schedule W[64]
// REUSE i
uint8_t *r = (uint8_t*)M;
uint8_t *s = (uint8_t*)W;
for(i=0;i<64;i++) s[i] = r[i];
for(i=16;i<64;i++) W[i] = SIG1(W[i-2]) + W[i-7] + SIG0(W[i-15]) + W[i-16];
// set initial hash values
initHash(H);
// Now calc the hash
sha256_transform(W,H);
// For testing use 0 as table index
// link_idx = chain_idx + 0;
// Reduce the Hash using the table_ident
//???????????????????????????????????????????????
link_idx = chain_idx + header->table_id;
//???????????????????????????????????????????????
// call reduce function
reduce_hash(H,B,link_idx);
// TODO: remove???? clear internal index
i=0;
} // --> end main loop
// copy comp_hash to final hash
for(i=0;i<8;i++) data->final_hash[i] = H[i];
// Ensure padding is zero
data->pad=0x0000;
__syncthreads();
}
void Check_CUDA_Error(const char *message,FILE *table, char *fname) {
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr,"ERROR: %s: %s\n", message, cudaGetErrorString(error) );
fprintf(stderr,"Removing invalid file in rbt.\n");
fclose(table);
remove(fname);
exit(1);
}
}
uint32_t get_table_id(char *tid) {
// Revised version of test code
// Expects a pointer to a hex string
// Returns equivalent uint32_t or zero on failure
if(tid==NULL) {return(0);}
uint32_t table_id = strtol(tid,NULL,16);
return(table_id);
}
//=================================Main Code============================
int main(int argc, char **argv) {
TableHeader *header, *dev_header;
TableEntry *entry, *dev_entry;
char table_file[81];
char sort_file[81];
FILE *table, *sort;
int i,di,work_unit;
uint32_t offset,table_id;
cudaEvent_t start;
cudaEvent_t end;
float ms;
size_t count;
printf("========= Maketable_v4 =========\n");
// Required parameter is the table identifier
// Supplied as hex string of the form 0x12ab34cd
// Stored internally as uint32_t
// String form used to generate the table name
// of the form "sort_0x12ab34cd.rbt
if(argc != 2) {
printf("Table Identifier missing.\nUsage: mktab 0x1234abcd\n");
exit(1);
}
if((table_id=get_table_id(argv[1]))==0) {
printf("Table index zero not permitted.\n");
exit(1);
}
fname_gen(sort_file,"sort",table_id); // determine the filenames
fname_gen(table_file,"new",table_id); // at the same time so tmerge
table=fopen(table_file,"w"); // can delete the unrequired files.
if(table==NULL) {
printf("Error - maketable_v4: Unable to open 'new' file.\n");
return(1);
}
header = (TableHeader*)malloc(sizeof(TableHeader));
entry = (TableEntry*)malloc(sizeof(TableEntry)*T_ENTRIES);
if((header != NULL)&&(entry != NULL)) {
printf("Preparing the table header and initial passwords.\n");
table_setup(header,entry,table_id); // initialise header and set initial passwords
// cudaMalloc space for table header
HANDLE_ERROR(cudaMalloc((void**)&dev_header,sizeof(TableHeader)));
// copy header to device
HANDLE_ERROR(cudaMemcpy(dev_header, header, sizeof(TableHeader), cudaMemcpyHostToDevice));
// cudaMalloc space for 1 work unit in table body
HANDLE_ERROR(cudaMalloc((void**)&dev_entry,sizeof(TableEntry)*DIMGRIDX*THREADS));
cudaEventCreate(&start);
cudaEventCreate(&end);
printf("Starting the first of %d work units....\n",WORKUNITS);
// .....workunit...loop start.....
for(work_unit=0; work_unit<WORKUNITS; work_unit++) {
// track position in table of entries
offset = work_unit*DIMGRIDX*THREADS;
// Copy entries to device
HANDLE_ERROR(cudaMemcpy(dev_entry, entry+offset, sizeof(TableEntry)*DIMGRIDX*THREADS, cudaMemcpyHostToDevice));
// =====Launch Kernel=====
cudaEventRecord(start,0);
cudaGetLastError(); // Clear cuda error flag
table_calculate<<<DIMGRIDX,THREADS>>>(dev_header,dev_entry);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&ms,start,end);
printf("Work unit %d completed in %4.2f ms.\n", work_unit,ms);
Check_CUDA_Error("Error thrown after kernel launch",table,table_file);
// copy back entries to host
HANDLE_ERROR( cudaMemcpy(entry+offset, dev_entry, sizeof(TableEntry)*DIMGRIDX*THREADS, cudaMemcpyDeviceToHost) );
// copy back header to host
HANDLE_ERROR( cudaMemcpy(header, dev_header, sizeof(TableHeader), cudaMemcpyDeviceToHost) );
} // .....workunit...loop end.....
fwrite(header,sizeof(TableHeader),1,table);
fwrite(entry,sizeof(TableEntry),T_ENTRIES,table);
fclose(table);
// sort on hash value
sort_table(header,entry);
// Calculate the md5sum of the table entries
md5_state_t state;
md5_byte_t digest[16];
md5_init(&state);
for(i=0; i<T_ENTRIES; i++)
md5_append(&state, (const md5_byte_t *)&(entry[i]), sizeof(TableEntry));
md5_finish(&state, digest);
// print md5sum for test purposes
printf("Checksum for TableEntries: ");
for (di = 0; di < 16; ++di)
printf("%02x", digest[di]);
printf("\n");
// Save the md5sum in check_sum slot
for (di = 0; di < 16; ++di)
sprintf(header->check_sum + di * 2, "%02x", digest[di]);
// Open the sort file for writing
sort=fopen(sort_file,"w");
if(sort==NULL) {
printf("Error - maketable_v3: Unable to open 'sort' file.\n");
return(1);
}
// save sorted table to file 'sorted table'
count = fwrite(header,sizeof(TableHeader),1,sort);
count += fwrite(entry,sizeof(TableEntry),header->entries,sort);
if((count == header->entries + 1)&&(fclose(sort)==0)) {
// ok to remove 'new' file
printf("Sorted file successfully writen - deleting original.\n");
if( remove( table_file ) != 0 )
perror( "Error deleting file\n" );
else
printf( "original file successfully deleted\n" );
}
}
printf("table_id: %u sort_file %s\n",table_id,sort_file);
// Clean up memory
free(header);
free(entry);
cudaFree(dev_entry);
cudaFree(dev_header);
return(0);
}
|
cd748e42a1e68b94aef9d5802da93551027755e9.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2015-16 Tom Deakin, Simon McIntosh-Smith,
// University of Bristol HPC
//
// For full license terms please see the LICENSE file distributed with this
// source code
#include <iostream>
#include <vector>
#include <numeric>
#include <cmath>
#include <limits>
#include <chrono>
#include <algorithm>
#include <iomanip>
#include <cstring>
#define VERSION_STRING "3.3"
#include "Stream.h"
#if defined(CUDA)
#include "HIPStreamMasqueradingAsCUDA.h"
#elif defined(HIP)
#include "HIPStream.h"
#elif defined(HC)
#include "HCStream.h"
#elif defined(OCL)
#include "OCLStream.h"
#elif defined(USE_RAJA)
#include "RAJAStream.hpp"
#elif defined(KOKKOS)
#include "KokkosStream.hpp"
#elif defined(ACC)
#include "ACCStream.h"
#elif defined(SYCL)
#include "SYCLStream.h"
#elif defined(OMP)
#include "OMPStream.h"
#endif
// Default size of 2^13
unsigned int ARRAY_SIZE = 33554432;
unsigned int num_times = 100;
unsigned int deviceIndex = 0;
bool use_float = false;
bool triad_only = false;
bool output_as_csv = false;
std::string csv_separator = ",";
template <typename T>
void check_solution(const unsigned int ntimes, std::vector<T>& a, std::vector<T>& b, std::vector<T>& c, T& sum);
template <typename T>
void run();
template <typename T>
void run_triad();
void parseArguments(int argc, char *argv[]);
int main(int argc, char *argv[])
{
parseArguments(argc, argv);
if (!output_as_csv)
{
std::cout
<< "BabelStream" << std::endl
<< "Version: " << VERSION_STRING << std::endl
<< "Implementation: " << IMPLEMENTATION_STRING << std::endl;
}
// TODO: Fix Kokkos to allow multiple template specializations
if (triad_only)
{
if (use_float)
run_triad<float>();
else
run_triad<double>();
}
else
{
if (use_float)
run<float>();
else
run<double>();
}
}
template <typename T>
void run()
{
std::streamsize ss = std::cout.precision();
if (!output_as_csv)
{
std::cout << "Running kernels " << num_times << " times" << std::endl;
if (sizeof(T) == sizeof(float))
std::cout << "Precision: float" << std::endl;
else
std::cout << "Precision: double" << std::endl;
std::cout << std::setprecision(1) << std::fixed
<< "Array size: " << ARRAY_SIZE*sizeof(T)*1.0E-6 << " MB"
<< " (=" << ARRAY_SIZE*sizeof(T)*1.0E-9 << " GB)" << std::endl;
std::cout << "Total size: " << 3.0*ARRAY_SIZE*sizeof(T)*1.0E-6 << " MB"
<< " (=" << 3.0*ARRAY_SIZE*sizeof(T)*1.0E-9 << " GB)" << std::endl;
std::cout.precision(ss);
}
// Create host vectors
std::vector<T> a(ARRAY_SIZE);
std::vector<T> b(ARRAY_SIZE);
std::vector<T> c(ARRAY_SIZE);
// Result of the Dot kernel
T sum;
Stream<T> *stream;
#if defined(CUDA)
// Use the CUDA implementation
stream = new HIPStreamMasqueradingAsCUDA<T>(ARRAY_SIZE, deviceIndex);
#elif defined(HIP)
// Use the HIP implementation
stream = new HIPStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(HC)
// Use the HC implementation
stream = new HCStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(OCL)
// Use the OpenCL implementation
stream = new OCLStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(USE_RAJA)
// Use the RAJA implementation
stream = new RAJAStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(KOKKOS)
// Use the Kokkos implementation
stream = new KokkosStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(ACC)
// Use the OpenACC implementation
stream = new ACCStream<T>(ARRAY_SIZE, a.data(), b.data(), c.data(), deviceIndex);
#elif defined(SYCL)
// Use the SYCL implementation
stream = new SYCLStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(OMP)
// Use the OpenMP implementation
stream = new OMPStream<T>(ARRAY_SIZE, a.data(), b.data(), c.data(), deviceIndex);
#endif
stream->init_arrays(startA, startB, startC);
// List of times
std::vector<std::vector<double>> timings(5);
// Declare timers
std::chrono::high_resolution_clock::time_point t1, t2;
// Main loop
for (unsigned int k = 0; k < num_times; k++)
{
// Execute Copy
t1 = std::chrono::high_resolution_clock::now();
stream->copy();
t2 = std::chrono::high_resolution_clock::now();
timings[0].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
// Execute Mul
t1 = std::chrono::high_resolution_clock::now();
stream->mul();
t2 = std::chrono::high_resolution_clock::now();
timings[1].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
// Execute Add
t1 = std::chrono::high_resolution_clock::now();
stream->add();
t2 = std::chrono::high_resolution_clock::now();
timings[2].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
// Execute Triad
t1 = std::chrono::high_resolution_clock::now();
stream->triad();
t2 = std::chrono::high_resolution_clock::now();
timings[3].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
// Execute Dot
t1 = std::chrono::high_resolution_clock::now();
sum = stream->dot();
t2 = std::chrono::high_resolution_clock::now();
timings[4].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
}
// Check solutions
stream->read_arrays(a, b, c);
check_solution<T>(num_times, a, b, c, sum);
// Display timing results
if (output_as_csv)
{
std::cout
<< "function" << csv_separator
<< "num_times" << csv_separator
<< "n_elements" << csv_separator
<< "sizeof" << csv_separator
<< "max_mbytes_per_sec" << csv_separator
<< "min_runtime" << csv_separator
<< "max_runtime" << csv_separator
<< "avg_runtime" << std::endl;
}
else
{
std::cout
<< std::left << std::setw(12) << "Function"
<< std::left << std::setw(12) << "MBytes/sec"
<< std::left << std::setw(12) << "Min (sec)"
<< std::left << std::setw(12) << "Max"
<< std::left << std::setw(12) << "Average"
<< std::endl
<< std::fixed;
}
std::string labels[5] = {"Copy", "Mul", "Add", "Triad", "Dot"};
size_t sizes[5] = {
2 * sizeof(T) * ARRAY_SIZE,
2 * sizeof(T) * ARRAY_SIZE,
3 * sizeof(T) * ARRAY_SIZE,
3 * sizeof(T) * ARRAY_SIZE,
2 * sizeof(T) * ARRAY_SIZE
};
for (int i = 0; i < 5; i++)
{
// Get min/max; ignore the first result
auto minmax = std::minmax_element(timings[i].begin()+1, timings[i].end());
// Calculate average; ignore the first result
double average = std::accumulate(timings[i].begin()+1, timings[i].end(), 0.0) / (double)(num_times - 1);
// Display results
if (output_as_csv)
{
std::cout
<< labels[i] << csv_separator
<< num_times << csv_separator
<< ARRAY_SIZE << csv_separator
<< sizeof(T) << csv_separator
<< 1.0E-6 * sizes[i] / (*minmax.first) << csv_separator
<< *minmax.first << csv_separator
<< *minmax.second << csv_separator
<< average << csv_separator
<< std::endl;
}
else
{
std::cout
<< std::left << std::setw(12) << labels[i]
<< std::left << std::setw(12) << std::setprecision(3) << 1.0E-6 * sizes[i] / (*minmax.first)
<< std::left << std::setw(12) << std::setprecision(5) << *minmax.first
<< std::left << std::setw(12) << std::setprecision(5) << *minmax.second
<< std::left << std::setw(12) << std::setprecision(5) << average
<< std::endl;
}
}
delete stream;
}
template <typename T>
void run_triad()
{
std::cout << "Running triad " << num_times << " times" << std::endl;
std::cout << "Number of elements: " << ARRAY_SIZE << std::endl;
if (sizeof(T) == sizeof(float))
std::cout << "Precision: float" << std::endl;
else
std::cout << "Precision: double" << std::endl;
// Create host vectors
std::vector<T> a(ARRAY_SIZE);
std::vector<T> b(ARRAY_SIZE);
std::vector<T> c(ARRAY_SIZE);
std::streamsize ss = std::cout.precision();
std::cout << std::setprecision(1) << std::fixed
<< "Array size: " << ARRAY_SIZE*sizeof(T)*1.0E-3 << " KB"
<< " (=" << ARRAY_SIZE*sizeof(T)*1.0E-6 << " MB)" << std::endl;
std::cout << "Total size: " << 3.0*ARRAY_SIZE*sizeof(T)*1.0E-3 << " KB"
<< " (=" << 3.0*ARRAY_SIZE*sizeof(T)*1.0E-6 << " MB)" << std::endl;
std::cout.precision(ss);
Stream<T> *stream;
#if defined(CUDA)
// Use the CUDA implementation
stream = new HIPStreamMasqueradingAsCUDA<T>(ARRAY_SIZE, deviceIndex);
#elif defined(HIP)
// Use the HIP implementation
stream = new HIPStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(OCL)
// Use the OpenCL implementation
stream = new OCLStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(USE_RAJA)
// Use the RAJA implementation
stream = new RAJAStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(KOKKOS)
// Use the Kokkos implementation
stream = new KokkosStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(ACC)
// Use the OpenACC implementation
stream = new ACCStream<T>(ARRAY_SIZE, a.data(), b.data(), c.data(), deviceIndex);
#elif defined(SYCL)
// Use the SYCL implementation
stream = new SYCLStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(OMP)
// Use the OpenMP implementation
stream = new OMPStream<T>(ARRAY_SIZE, a.data(), b.data(), c.data(), deviceIndex);
#endif
stream->init_arrays(startA, startB, startC);
// Declare timers
std::chrono::high_resolution_clock::time_point t1, t2;
// Run triad in loop
t1 = std::chrono::high_resolution_clock::now();
for (unsigned int k = 0; k < num_times; k++)
{
stream->triad();
}
t2 = std::chrono::high_resolution_clock::now();
double runtime = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
// Check solutions
T sum = 0.0;
stream->read_arrays(a, b, c);
check_solution<T>(num_times, a, b, c, sum);
// Display timing results
double total_bytes = 3 * sizeof(T) * ARRAY_SIZE * num_times;
double bandwidth = 1.0E-9 * (total_bytes / runtime);
std::cout
<< "--------------------------------"
<< std::endl << std::fixed
<< "Runtime (seconds): " << std::left << std::setprecision(5)
<< runtime << std::endl
<< "Bandwidth (GB/s): " << std::left << std::setprecision(3)
<< bandwidth << std::endl;
delete stream;
}
template <typename T>
void check_solution(const unsigned int ntimes, std::vector<T>& a, std::vector<T>& b, std::vector<T>& c, T& sum)
{
// Generate correct solution
T goldA = startA;
T goldB = startB;
T goldC = startC;
T goldSum = 0.0;
const T scalar = startScalar;
for (unsigned int i = 0; i < ntimes; i++)
{
// Do STREAM!
if (!triad_only)
{
goldC = goldA;
goldC = scalar * goldA;
goldC = goldA + goldB;
}
goldC = goldB + scalar * goldA;
}
// Do the reduction
goldSum = goldA * goldB * ARRAY_SIZE;
// Calculate the average error
double errA = std::accumulate(a.begin(), a.end(), 0.0, [&](double sum, const T val){ return sum + fabs(val - goldA); });
errA /= a.size();
double errB = std::accumulate(b.begin(), b.end(), 0.0, [&](double sum, const T val){ return sum + fabs(val - goldB); });
errB /= b.size();
double errC = std::accumulate(c.begin(), c.end(), 0.0, [&](double sum, const T val){ return sum + fabs(val - goldC); });
errC /= c.size();
double errSum = fabs(sum - goldSum);
double epsi = std::numeric_limits<T>::epsilon() * 100.0;
if (errA > epsi)
std::cerr
<< "Validation failed on a[]. Average error " << errA
<< std::endl;
if (errB > epsi)
std::cerr
<< "Validation failed on b[]. Average error " << errB
<< std::endl;
if (errC > epsi)
std::cerr
<< "Validation failed on c[]. Average error " << errC
<< std::endl;
// Check sum to 8 decimal places
if (!triad_only && errSum > 1.0E-8)
std::cerr
<< "Validation failed on sum. Error " << errSum
<< std::endl << std::setprecision(15)
<< "Sum was " << sum << " but should be " << goldSum
<< std::endl;
}
int parseUInt(const char *str, unsigned int *output)
{
char *next;
*output = strtoul(str, &next, 10);
return !strlen(next);
}
void parseArguments(int argc, char *argv[])
{
for (int i = 1; i < argc; i++)
{
if (!std::string("--list").compare(argv[i]))
{
listDevices();
exit(EXIT_SUCCESS);
}
else if (!std::string("--device").compare(argv[i]))
{
if (++i >= argc || !parseUInt(argv[i], &deviceIndex))
{
std::cerr << "Invalid device index." << std::endl;
exit(EXIT_FAILURE);
}
}
else if (!std::string("--arraysize").compare(argv[i]) ||
!std::string("-s").compare(argv[i]))
{
if (++i >= argc || !parseUInt(argv[i], &ARRAY_SIZE))
{
std::cerr << "Invalid array size." << std::endl;
exit(EXIT_FAILURE);
}
}
else if (!std::string("--numtimes").compare(argv[i]) ||
!std::string("-n").compare(argv[i]))
{
if (++i >= argc || !parseUInt(argv[i], &num_times))
{
std::cerr << "Invalid number of times." << std::endl;
exit(EXIT_FAILURE);
}
if (num_times < 2)
{
std::cerr << "Number of times must be 2 or more" << std::endl;
exit(EXIT_FAILURE);
}
}
else if (!std::string("--float").compare(argv[i]))
{
use_float = true;
}
else if (!std::string("--triad-only").compare(argv[i]))
{
triad_only = true;
}
else if (!std::string("--csv").compare(argv[i]))
{
output_as_csv = true;
}
else if (!std::string("--help").compare(argv[i]) ||
!std::string("-h").compare(argv[i]))
{
std::cout << std::endl;
std::cout << "Usage: " << argv[0] << " [OPTIONS]" << std::endl << std::endl;
std::cout << "Options:" << std::endl;
std::cout << " -h --help Print the message" << std::endl;
std::cout << " --list List available devices" << std::endl;
std::cout << " --device INDEX Select device at INDEX" << std::endl;
std::cout << " -s --arraysize SIZE Use SIZE elements in the array" << std::endl;
std::cout << " -n --numtimes NUM Run the test NUM times (NUM >= 2)" << std::endl;
std::cout << " --float Use floats (rather than doubles)" << std::endl;
std::cout << " --triad-only Only run triad" << std::endl;
std::cout << " --csv Output as csv table" << std::endl;
std::cout << std::endl;
exit(EXIT_SUCCESS);
}
else
{
std::cerr << "Unrecognized argument '" << argv[i] << "' (try '--help')"
<< std::endl;
exit(EXIT_FAILURE);
}
}
}
| cd748e42a1e68b94aef9d5802da93551027755e9.cu |
// Copyright (c) 2015-16 Tom Deakin, Simon McIntosh-Smith,
// University of Bristol HPC
//
// For full license terms please see the LICENSE file distributed with this
// source code
#include <iostream>
#include <vector>
#include <numeric>
#include <cmath>
#include <limits>
#include <chrono>
#include <algorithm>
#include <iomanip>
#include <cstring>
#define VERSION_STRING "3.3"
#include "Stream.h"
#if defined(CUDA)
#include "CUDAStream.h"
#elif defined(HIP)
#include "HIPStream.h"
#elif defined(HC)
#include "HCStream.h"
#elif defined(OCL)
#include "OCLStream.h"
#elif defined(USE_RAJA)
#include "RAJAStream.hpp"
#elif defined(KOKKOS)
#include "KokkosStream.hpp"
#elif defined(ACC)
#include "ACCStream.h"
#elif defined(SYCL)
#include "SYCLStream.h"
#elif defined(OMP)
#include "OMPStream.h"
#endif
// Default size of 2^13
unsigned int ARRAY_SIZE = 33554432;
unsigned int num_times = 100;
unsigned int deviceIndex = 0;
bool use_float = false;
bool triad_only = false;
bool output_as_csv = false;
std::string csv_separator = ",";
template <typename T>
void check_solution(const unsigned int ntimes, std::vector<T>& a, std::vector<T>& b, std::vector<T>& c, T& sum);
template <typename T>
void run();
template <typename T>
void run_triad();
void parseArguments(int argc, char *argv[]);
int main(int argc, char *argv[])
{
parseArguments(argc, argv);
if (!output_as_csv)
{
std::cout
<< "BabelStream" << std::endl
<< "Version: " << VERSION_STRING << std::endl
<< "Implementation: " << IMPLEMENTATION_STRING << std::endl;
}
// TODO: Fix Kokkos to allow multiple template specializations
if (triad_only)
{
if (use_float)
run_triad<float>();
else
run_triad<double>();
}
else
{
if (use_float)
run<float>();
else
run<double>();
}
}
template <typename T>
void run()
{
std::streamsize ss = std::cout.precision();
if (!output_as_csv)
{
std::cout << "Running kernels " << num_times << " times" << std::endl;
if (sizeof(T) == sizeof(float))
std::cout << "Precision: float" << std::endl;
else
std::cout << "Precision: double" << std::endl;
std::cout << std::setprecision(1) << std::fixed
<< "Array size: " << ARRAY_SIZE*sizeof(T)*1.0E-6 << " MB"
<< " (=" << ARRAY_SIZE*sizeof(T)*1.0E-9 << " GB)" << std::endl;
std::cout << "Total size: " << 3.0*ARRAY_SIZE*sizeof(T)*1.0E-6 << " MB"
<< " (=" << 3.0*ARRAY_SIZE*sizeof(T)*1.0E-9 << " GB)" << std::endl;
std::cout.precision(ss);
}
// Create host vectors
std::vector<T> a(ARRAY_SIZE);
std::vector<T> b(ARRAY_SIZE);
std::vector<T> c(ARRAY_SIZE);
// Result of the Dot kernel
T sum;
Stream<T> *stream;
#if defined(CUDA)
// Use the CUDA implementation
stream = new CUDAStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(HIP)
// Use the HIP implementation
stream = new HIPStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(HC)
// Use the HC implementation
stream = new HCStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(OCL)
// Use the OpenCL implementation
stream = new OCLStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(USE_RAJA)
// Use the RAJA implementation
stream = new RAJAStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(KOKKOS)
// Use the Kokkos implementation
stream = new KokkosStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(ACC)
// Use the OpenACC implementation
stream = new ACCStream<T>(ARRAY_SIZE, a.data(), b.data(), c.data(), deviceIndex);
#elif defined(SYCL)
// Use the SYCL implementation
stream = new SYCLStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(OMP)
// Use the OpenMP implementation
stream = new OMPStream<T>(ARRAY_SIZE, a.data(), b.data(), c.data(), deviceIndex);
#endif
stream->init_arrays(startA, startB, startC);
// List of times
std::vector<std::vector<double>> timings(5);
// Declare timers
std::chrono::high_resolution_clock::time_point t1, t2;
// Main loop
for (unsigned int k = 0; k < num_times; k++)
{
// Execute Copy
t1 = std::chrono::high_resolution_clock::now();
stream->copy();
t2 = std::chrono::high_resolution_clock::now();
timings[0].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
// Execute Mul
t1 = std::chrono::high_resolution_clock::now();
stream->mul();
t2 = std::chrono::high_resolution_clock::now();
timings[1].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
// Execute Add
t1 = std::chrono::high_resolution_clock::now();
stream->add();
t2 = std::chrono::high_resolution_clock::now();
timings[2].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
// Execute Triad
t1 = std::chrono::high_resolution_clock::now();
stream->triad();
t2 = std::chrono::high_resolution_clock::now();
timings[3].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
// Execute Dot
t1 = std::chrono::high_resolution_clock::now();
sum = stream->dot();
t2 = std::chrono::high_resolution_clock::now();
timings[4].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
}
// Check solutions
stream->read_arrays(a, b, c);
check_solution<T>(num_times, a, b, c, sum);
// Display timing results
if (output_as_csv)
{
std::cout
<< "function" << csv_separator
<< "num_times" << csv_separator
<< "n_elements" << csv_separator
<< "sizeof" << csv_separator
<< "max_mbytes_per_sec" << csv_separator
<< "min_runtime" << csv_separator
<< "max_runtime" << csv_separator
<< "avg_runtime" << std::endl;
}
else
{
std::cout
<< std::left << std::setw(12) << "Function"
<< std::left << std::setw(12) << "MBytes/sec"
<< std::left << std::setw(12) << "Min (sec)"
<< std::left << std::setw(12) << "Max"
<< std::left << std::setw(12) << "Average"
<< std::endl
<< std::fixed;
}
std::string labels[5] = {"Copy", "Mul", "Add", "Triad", "Dot"};
size_t sizes[5] = {
2 * sizeof(T) * ARRAY_SIZE,
2 * sizeof(T) * ARRAY_SIZE,
3 * sizeof(T) * ARRAY_SIZE,
3 * sizeof(T) * ARRAY_SIZE,
2 * sizeof(T) * ARRAY_SIZE
};
for (int i = 0; i < 5; i++)
{
// Get min/max; ignore the first result
auto minmax = std::minmax_element(timings[i].begin()+1, timings[i].end());
// Calculate average; ignore the first result
double average = std::accumulate(timings[i].begin()+1, timings[i].end(), 0.0) / (double)(num_times - 1);
// Display results
if (output_as_csv)
{
std::cout
<< labels[i] << csv_separator
<< num_times << csv_separator
<< ARRAY_SIZE << csv_separator
<< sizeof(T) << csv_separator
<< 1.0E-6 * sizes[i] / (*minmax.first) << csv_separator
<< *minmax.first << csv_separator
<< *minmax.second << csv_separator
<< average << csv_separator
<< std::endl;
}
else
{
std::cout
<< std::left << std::setw(12) << labels[i]
<< std::left << std::setw(12) << std::setprecision(3) << 1.0E-6 * sizes[i] / (*minmax.first)
<< std::left << std::setw(12) << std::setprecision(5) << *minmax.first
<< std::left << std::setw(12) << std::setprecision(5) << *minmax.second
<< std::left << std::setw(12) << std::setprecision(5) << average
<< std::endl;
}
}
delete stream;
}
template <typename T>
void run_triad()
{
std::cout << "Running triad " << num_times << " times" << std::endl;
std::cout << "Number of elements: " << ARRAY_SIZE << std::endl;
if (sizeof(T) == sizeof(float))
std::cout << "Precision: float" << std::endl;
else
std::cout << "Precision: double" << std::endl;
// Create host vectors
std::vector<T> a(ARRAY_SIZE);
std::vector<T> b(ARRAY_SIZE);
std::vector<T> c(ARRAY_SIZE);
std::streamsize ss = std::cout.precision();
std::cout << std::setprecision(1) << std::fixed
<< "Array size: " << ARRAY_SIZE*sizeof(T)*1.0E-3 << " KB"
<< " (=" << ARRAY_SIZE*sizeof(T)*1.0E-6 << " MB)" << std::endl;
std::cout << "Total size: " << 3.0*ARRAY_SIZE*sizeof(T)*1.0E-3 << " KB"
<< " (=" << 3.0*ARRAY_SIZE*sizeof(T)*1.0E-6 << " MB)" << std::endl;
std::cout.precision(ss);
Stream<T> *stream;
#if defined(CUDA)
// Use the CUDA implementation
stream = new CUDAStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(HIP)
// Use the HIP implementation
stream = new HIPStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(OCL)
// Use the OpenCL implementation
stream = new OCLStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(USE_RAJA)
// Use the RAJA implementation
stream = new RAJAStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(KOKKOS)
// Use the Kokkos implementation
stream = new KokkosStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(ACC)
// Use the OpenACC implementation
stream = new ACCStream<T>(ARRAY_SIZE, a.data(), b.data(), c.data(), deviceIndex);
#elif defined(SYCL)
// Use the SYCL implementation
stream = new SYCLStream<T>(ARRAY_SIZE, deviceIndex);
#elif defined(OMP)
// Use the OpenMP implementation
stream = new OMPStream<T>(ARRAY_SIZE, a.data(), b.data(), c.data(), deviceIndex);
#endif
stream->init_arrays(startA, startB, startC);
// Declare timers
std::chrono::high_resolution_clock::time_point t1, t2;
// Run triad in loop
t1 = std::chrono::high_resolution_clock::now();
for (unsigned int k = 0; k < num_times; k++)
{
stream->triad();
}
t2 = std::chrono::high_resolution_clock::now();
double runtime = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
// Check solutions
T sum = 0.0;
stream->read_arrays(a, b, c);
check_solution<T>(num_times, a, b, c, sum);
// Display timing results
double total_bytes = 3 * sizeof(T) * ARRAY_SIZE * num_times;
double bandwidth = 1.0E-9 * (total_bytes / runtime);
std::cout
<< "--------------------------------"
<< std::endl << std::fixed
<< "Runtime (seconds): " << std::left << std::setprecision(5)
<< runtime << std::endl
<< "Bandwidth (GB/s): " << std::left << std::setprecision(3)
<< bandwidth << std::endl;
delete stream;
}
template <typename T>
void check_solution(const unsigned int ntimes, std::vector<T>& a, std::vector<T>& b, std::vector<T>& c, T& sum)
{
// Generate correct solution
T goldA = startA;
T goldB = startB;
T goldC = startC;
T goldSum = 0.0;
const T scalar = startScalar;
for (unsigned int i = 0; i < ntimes; i++)
{
// Do STREAM!
if (!triad_only)
{
goldC = goldA;
goldC = scalar * goldA;
goldC = goldA + goldB;
}
goldC = goldB + scalar * goldA;
}
// Do the reduction
goldSum = goldA * goldB * ARRAY_SIZE;
// Calculate the average error
double errA = std::accumulate(a.begin(), a.end(), 0.0, [&](double sum, const T val){ return sum + fabs(val - goldA); });
errA /= a.size();
double errB = std::accumulate(b.begin(), b.end(), 0.0, [&](double sum, const T val){ return sum + fabs(val - goldB); });
errB /= b.size();
double errC = std::accumulate(c.begin(), c.end(), 0.0, [&](double sum, const T val){ return sum + fabs(val - goldC); });
errC /= c.size();
double errSum = fabs(sum - goldSum);
double epsi = std::numeric_limits<T>::epsilon() * 100.0;
if (errA > epsi)
std::cerr
<< "Validation failed on a[]. Average error " << errA
<< std::endl;
if (errB > epsi)
std::cerr
<< "Validation failed on b[]. Average error " << errB
<< std::endl;
if (errC > epsi)
std::cerr
<< "Validation failed on c[]. Average error " << errC
<< std::endl;
// Check sum to 8 decimal places
if (!triad_only && errSum > 1.0E-8)
std::cerr
<< "Validation failed on sum. Error " << errSum
<< std::endl << std::setprecision(15)
<< "Sum was " << sum << " but should be " << goldSum
<< std::endl;
}
int parseUInt(const char *str, unsigned int *output)
{
char *next;
*output = strtoul(str, &next, 10);
return !strlen(next);
}
void parseArguments(int argc, char *argv[])
{
for (int i = 1; i < argc; i++)
{
if (!std::string("--list").compare(argv[i]))
{
listDevices();
exit(EXIT_SUCCESS);
}
else if (!std::string("--device").compare(argv[i]))
{
if (++i >= argc || !parseUInt(argv[i], &deviceIndex))
{
std::cerr << "Invalid device index." << std::endl;
exit(EXIT_FAILURE);
}
}
else if (!std::string("--arraysize").compare(argv[i]) ||
!std::string("-s").compare(argv[i]))
{
if (++i >= argc || !parseUInt(argv[i], &ARRAY_SIZE))
{
std::cerr << "Invalid array size." << std::endl;
exit(EXIT_FAILURE);
}
}
else if (!std::string("--numtimes").compare(argv[i]) ||
!std::string("-n").compare(argv[i]))
{
if (++i >= argc || !parseUInt(argv[i], &num_times))
{
std::cerr << "Invalid number of times." << std::endl;
exit(EXIT_FAILURE);
}
if (num_times < 2)
{
std::cerr << "Number of times must be 2 or more" << std::endl;
exit(EXIT_FAILURE);
}
}
else if (!std::string("--float").compare(argv[i]))
{
use_float = true;
}
else if (!std::string("--triad-only").compare(argv[i]))
{
triad_only = true;
}
else if (!std::string("--csv").compare(argv[i]))
{
output_as_csv = true;
}
else if (!std::string("--help").compare(argv[i]) ||
!std::string("-h").compare(argv[i]))
{
std::cout << std::endl;
std::cout << "Usage: " << argv[0] << " [OPTIONS]" << std::endl << std::endl;
std::cout << "Options:" << std::endl;
std::cout << " -h --help Print the message" << std::endl;
std::cout << " --list List available devices" << std::endl;
std::cout << " --device INDEX Select device at INDEX" << std::endl;
std::cout << " -s --arraysize SIZE Use SIZE elements in the array" << std::endl;
std::cout << " -n --numtimes NUM Run the test NUM times (NUM >= 2)" << std::endl;
std::cout << " --float Use floats (rather than doubles)" << std::endl;
std::cout << " --triad-only Only run triad" << std::endl;
std::cout << " --csv Output as csv table" << std::endl;
std::cout << std::endl;
exit(EXIT_SUCCESS);
}
else
{
std::cerr << "Unrecognized argument '" << argv[i] << "' (try '--help')"
<< std::endl;
exit(EXIT_FAILURE);
}
}
}
|
77346a373409c00b2e558db656331dd93b1e1ab2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#define N 10
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_a, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_c, N * sizeof(int) ) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( hipMemcpy( dev_a, a, N * sizeof(int),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b, b, N * sizeof(int),
hipMemcpyHostToDevice ) );
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( hipMemcpy( c, dev_c, N * sizeof(int),
hipMemcpyDeviceToHost ) );
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
HANDLE_ERROR( hipFree( dev_a ) );
HANDLE_ERROR( hipFree( dev_b ) );
HANDLE_ERROR( hipFree( dev_c ) );
return 0;
}
| 77346a373409c00b2e558db656331dd93b1e1ab2.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#define N 10
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( cudaMemcpy( dev_a, a, N * sizeof(int),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b, b, N * sizeof(int),
cudaMemcpyHostToDevice ) );
add<<<N,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( cudaMemcpy( c, dev_c, N * sizeof(int),
cudaMemcpyDeviceToHost ) );
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
HANDLE_ERROR( cudaFree( dev_a ) );
HANDLE_ERROR( cudaFree( dev_b ) );
HANDLE_ERROR( cudaFree( dev_c ) );
return 0;
}
|
ff1a7994a0ebf0b6dadde9f6760c6c9d09948a52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zlange.cu, normal z -> d, Thu Oct 8 23:05:33 2020
@author Mark Gates
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define REAL
#define NB_X 64
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:m-1, for || A ||_inf,
* where m and n are any size.
* Has ceil( m/NB_X ) blocks of NB_X threads. Each thread does one row.
* See also dlange_max_kernel code, below. */
extern "C" __global__ void
dlange_inf_kernel(
int m, int n,
const double * __restrict__ A, int lda,
double * __restrict__ dwork )
{
int i = blockIdx.x*NB_X + threadIdx.x;
double rsum[4] = {0, 0, 0, 0};
int n_mod_4 = n % 4;
n -= n_mod_4;
// if beyond last row, skip row
if ( i < m ) {
A += i;
if ( n >= 4 ) {
const double *Aend = A + lda*n;
double rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] };
A += 4*lda;
while( A < Aend ) {
rsum[0] += MAGMA_D_ABS( rA[0] ); rA[0] = A[0];
rsum[1] += MAGMA_D_ABS( rA[1] ); rA[1] = A[lda];
rsum[2] += MAGMA_D_ABS( rA[2] ); rA[2] = A[2*lda];
rsum[3] += MAGMA_D_ABS( rA[3] ); rA[3] = A[3*lda];
A += 4*lda;
}
rsum[0] += MAGMA_D_ABS( rA[0] );
rsum[1] += MAGMA_D_ABS( rA[1] );
rsum[2] += MAGMA_D_ABS( rA[2] );
rsum[3] += MAGMA_D_ABS( rA[3] );
}
/* clean up code */
switch( n_mod_4 ) {
case 0:
break;
case 1:
rsum[0] += MAGMA_D_ABS( A[0] );
break;
case 2:
rsum[0] += MAGMA_D_ABS( A[0] );
rsum[1] += MAGMA_D_ABS( A[lda] );
break;
case 3:
rsum[0] += MAGMA_D_ABS( A[0] );
rsum[1] += MAGMA_D_ABS( A[lda] );
rsum[2] += MAGMA_D_ABS( A[2*lda] );
break;
}
/* compute final result */
dwork[i] = rsum[0] + rsum[1] + rsum[2] + rsum[3];
}
}
/* Computes max of row dwork[i] = max( abs( A(i,:) )), i=0:m-1, for || A ||_max,
* where m and n are any size.
* Has ceil( m/NB_X ) blocks of NB_X threads. Each thread does one row.
* Based on dlange_inf_kernel code, above. */
extern "C" __global__ void
dlange_max_kernel(
int m, int n,
const double * __restrict__ A, int lda,
double * __restrict__ dwork )
{
int i = blockIdx.x*NB_X + threadIdx.x;
double rmax[4] = {0, 0, 0, 0};
int n_mod_4 = n % 4;
n -= n_mod_4;
// if beyond last row, skip row
if ( i < m ) {
A += i;
if ( n >= 4 ) {
const double *Aend = A + lda*n;
double rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] };
A += 4*lda;
while( A < Aend ) {
rmax[0] = max_nan( rmax[0], MAGMA_D_ABS( rA[0] )); rA[0] = A[0];
rmax[1] = max_nan( rmax[1], MAGMA_D_ABS( rA[1] )); rA[1] = A[lda];
rmax[2] = max_nan( rmax[2], MAGMA_D_ABS( rA[2] )); rA[2] = A[2*lda];
rmax[3] = max_nan( rmax[3], MAGMA_D_ABS( rA[3] )); rA[3] = A[3*lda];
A += 4*lda;
}
rmax[0] = max_nan( rmax[0], MAGMA_D_ABS( rA[0] ));
rmax[1] = max_nan( rmax[1], MAGMA_D_ABS( rA[1] ));
rmax[2] = max_nan( rmax[2], MAGMA_D_ABS( rA[2] ));
rmax[3] = max_nan( rmax[3], MAGMA_D_ABS( rA[3] ));
}
/* clean up code */
switch( n_mod_4 ) {
case 0:
break;
case 1:
rmax[0] = max_nan( rmax[0], MAGMA_D_ABS( A[0] ));
break;
case 2:
rmax[0] = max_nan( rmax[0], MAGMA_D_ABS( A[ 0] ));
rmax[1] = max_nan( rmax[1], MAGMA_D_ABS( A[lda] ));
break;
case 3:
rmax[0] = max_nan( rmax[0], MAGMA_D_ABS( A[ 0] ));
rmax[1] = max_nan( rmax[1], MAGMA_D_ABS( A[ lda] ));
rmax[2] = max_nan( rmax[2], MAGMA_D_ABS( A[2*lda] ));
break;
}
/* compute final result */
dwork[i] = max_nan( max_nan( max_nan( rmax[0], rmax[1] ), rmax[2] ), rmax[3] );
}
}
/* Computes col sums dwork[j] = sum( abs( A(:,j) )), j=0:n-1, for || A ||_one,
* where m and n are any size.
* Has n blocks of NB threads each. Block j sums one column, A(:,j) into dwork[j].
* Thread i accumulates A(i,j) + A(i+NB,j) + A(i+2*NB,j) + ... into ssum[i],
* then threads collectively do a sum-reduction of ssum,
* and finally thread 0 saves to dwork[j]. */
extern "C" __global__ void
dlange_one_kernel(
int m, int n,
const double * __restrict__ A, int lda,
double * __restrict__ dwork )
{
__shared__ double ssum[NB_X];
int tx = threadIdx.x;
A += blockIdx.x*lda; // column j
ssum[tx] = 0;
for( int i = tx; i < m; i += NB_X ) {
ssum[tx] += MAGMA_D_ABS( A[i] );
}
magma_sum_reduce< NB_X >( tx, ssum );
if ( tx == 0 ) {
dwork[ blockIdx.x ] = ssum[0];
}
}
/* Based on dlange_one_kernel code, above.
* Computes col sums dwork[j] = sum( abs( A(:,j) )^2 ), j=0:n-1, for || A ||_F,
* where m and n are any size.
* Has n blocks of NB threads each. Block j sums one column, A(:,j) into dwork[j].
* Thread i accumulates A(i,j) + A(i+NB,j) + A(i+2*NB,j) + ... into ssum[i],
* then threads collectively do a sum-reduction of ssum,
* and finally thread 0 saves to dwork[j]. */
extern "C" __global__ void
dlange_fro_kernel(
int m, int n,
const double * __restrict__ A, int lda,
double * __restrict__ dwork )
{
__shared__ double ssum[NB_X];
int tx = threadIdx.x;
A += blockIdx.x*lda; // column j
ssum[tx] = 0;
for( int i = tx; i < m; i += NB_X ) {
#ifdef COMPLEX
double a = MAGMA_D_ABS( A[i] );
#else
double a = A[i];
#endif
ssum[tx] += a*a;
}
magma_sum_reduce< NB_X >( tx, ssum );
if ( tx == 0 ) {
dwork[ blockIdx.x ] = ssum[0];
}
}
/***************************************************************************//**
Purpose
-------
DLANGE returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
real matrix A.
Description
-----------
DLANGE returns the value
DLANGE = ( max(abs(A(i,j))), NORM = MagmaMaxNorm
(
( norm1(A), NORM = MagmaOneNorm
(
( normI(A), NORM = MagmaInfNorm
(
( normF(A), NORM = MagmaFrobeniusNorm
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of
squares). Note that max(abs(A(i,j))) is not a consistent matrix norm.
Arguments
---------
@param[in]
norm magma_norm_t
Specifies the value to be returned in DLANGE as described
above.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0. When M = 0,
DLANGE is set to zero.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0. When N = 0,
DLANGE is set to zero.
@param[in]
dA DOUBLE PRECISION array on the GPU, dimension (LDDA,N)
The m by n matrix A.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(M,1).
@param
dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (LWORK).
@param[in]
lwork INTEGER
The dimension of the array WORK.
If NORM = MagmaInfNorm or MagmaMaxNorm, LWORK >= max( 1, M ).
If NORM = MagmaOneNorm, LWORK >= max( 1, N ).
Note this is different than LAPACK, which requires WORK only for
NORM = MagmaInfNorm, and does not pass LWORK.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lange
*******************************************************************************/
extern "C" double
magmablas_dlange(
magma_norm_t norm, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( ! (norm == MagmaInfNorm || norm == MagmaMaxNorm ||
norm == MagmaOneNorm || norm == MagmaFrobeniusNorm) )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -5;
else if ( ((norm == MagmaInfNorm || norm == MagmaMaxNorm) && (lwork < m)) ||
((norm == MagmaOneNorm || norm == MagmaFrobeniusNorm ) && (lwork < n)) )
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
/* Quick return */
if ( m == 0 || n == 0 )
return 0;
//int i;
dim3 threads( NB_X );
double result = -1;
if ( norm == MagmaInfNorm ) {
dim3 grid( magma_ceildiv( m, NB_X ) );
hipLaunchKernelGGL(( dlange_inf_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dwork );
hipLaunchKernelGGL(( magma_max_nan_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , m, dwork );
}
else if ( norm == MagmaMaxNorm ) {
dim3 grid( magma_ceildiv( m, NB_X ) );
hipLaunchKernelGGL(( dlange_max_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dwork );
hipLaunchKernelGGL(( magma_max_nan_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , m, dwork );
}
else if ( norm == MagmaOneNorm ) {
dim3 grid( n );
hipLaunchKernelGGL(( dlange_one_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dwork );
hipLaunchKernelGGL(( magma_max_nan_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , n, dwork ); // note n instead of m
}
else if ( norm == MagmaFrobeniusNorm ) {
dim3 grid( n );
hipLaunchKernelGGL(( dlange_fro_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dwork );
hipLaunchKernelGGL(( magma_sum_reduce_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , n, dwork ); // note n instead of m
}
magma_dgetvector( 1, &dwork[0], 1, &result, 1, queue );
if( norm == MagmaFrobeniusNorm ) {
result = sqrt(result); // Square root for final result.
}
return result;
}
| ff1a7994a0ebf0b6dadde9f6760c6c9d09948a52.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zlange.cu, normal z -> d, Thu Oct 8 23:05:33 2020
@author Mark Gates
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define REAL
#define NB_X 64
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:m-1, for || A ||_inf,
* where m and n are any size.
* Has ceil( m/NB_X ) blocks of NB_X threads. Each thread does one row.
* See also dlange_max_kernel code, below. */
extern "C" __global__ void
dlange_inf_kernel(
int m, int n,
const double * __restrict__ A, int lda,
double * __restrict__ dwork )
{
int i = blockIdx.x*NB_X + threadIdx.x;
double rsum[4] = {0, 0, 0, 0};
int n_mod_4 = n % 4;
n -= n_mod_4;
// if beyond last row, skip row
if ( i < m ) {
A += i;
if ( n >= 4 ) {
const double *Aend = A + lda*n;
double rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] };
A += 4*lda;
while( A < Aend ) {
rsum[0] += MAGMA_D_ABS( rA[0] ); rA[0] = A[0];
rsum[1] += MAGMA_D_ABS( rA[1] ); rA[1] = A[lda];
rsum[2] += MAGMA_D_ABS( rA[2] ); rA[2] = A[2*lda];
rsum[3] += MAGMA_D_ABS( rA[3] ); rA[3] = A[3*lda];
A += 4*lda;
}
rsum[0] += MAGMA_D_ABS( rA[0] );
rsum[1] += MAGMA_D_ABS( rA[1] );
rsum[2] += MAGMA_D_ABS( rA[2] );
rsum[3] += MAGMA_D_ABS( rA[3] );
}
/* clean up code */
switch( n_mod_4 ) {
case 0:
break;
case 1:
rsum[0] += MAGMA_D_ABS( A[0] );
break;
case 2:
rsum[0] += MAGMA_D_ABS( A[0] );
rsum[1] += MAGMA_D_ABS( A[lda] );
break;
case 3:
rsum[0] += MAGMA_D_ABS( A[0] );
rsum[1] += MAGMA_D_ABS( A[lda] );
rsum[2] += MAGMA_D_ABS( A[2*lda] );
break;
}
/* compute final result */
dwork[i] = rsum[0] + rsum[1] + rsum[2] + rsum[3];
}
}
/* Computes max of row dwork[i] = max( abs( A(i,:) )), i=0:m-1, for || A ||_max,
* where m and n are any size.
* Has ceil( m/NB_X ) blocks of NB_X threads. Each thread does one row.
* Based on dlange_inf_kernel code, above. */
extern "C" __global__ void
dlange_max_kernel(
int m, int n,
const double * __restrict__ A, int lda,
double * __restrict__ dwork )
{
int i = blockIdx.x*NB_X + threadIdx.x;
double rmax[4] = {0, 0, 0, 0};
int n_mod_4 = n % 4;
n -= n_mod_4;
// if beyond last row, skip row
if ( i < m ) {
A += i;
if ( n >= 4 ) {
const double *Aend = A + lda*n;
double rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] };
A += 4*lda;
while( A < Aend ) {
rmax[0] = max_nan( rmax[0], MAGMA_D_ABS( rA[0] )); rA[0] = A[0];
rmax[1] = max_nan( rmax[1], MAGMA_D_ABS( rA[1] )); rA[1] = A[lda];
rmax[2] = max_nan( rmax[2], MAGMA_D_ABS( rA[2] )); rA[2] = A[2*lda];
rmax[3] = max_nan( rmax[3], MAGMA_D_ABS( rA[3] )); rA[3] = A[3*lda];
A += 4*lda;
}
rmax[0] = max_nan( rmax[0], MAGMA_D_ABS( rA[0] ));
rmax[1] = max_nan( rmax[1], MAGMA_D_ABS( rA[1] ));
rmax[2] = max_nan( rmax[2], MAGMA_D_ABS( rA[2] ));
rmax[3] = max_nan( rmax[3], MAGMA_D_ABS( rA[3] ));
}
/* clean up code */
switch( n_mod_4 ) {
case 0:
break;
case 1:
rmax[0] = max_nan( rmax[0], MAGMA_D_ABS( A[0] ));
break;
case 2:
rmax[0] = max_nan( rmax[0], MAGMA_D_ABS( A[ 0] ));
rmax[1] = max_nan( rmax[1], MAGMA_D_ABS( A[lda] ));
break;
case 3:
rmax[0] = max_nan( rmax[0], MAGMA_D_ABS( A[ 0] ));
rmax[1] = max_nan( rmax[1], MAGMA_D_ABS( A[ lda] ));
rmax[2] = max_nan( rmax[2], MAGMA_D_ABS( A[2*lda] ));
break;
}
/* compute final result */
dwork[i] = max_nan( max_nan( max_nan( rmax[0], rmax[1] ), rmax[2] ), rmax[3] );
}
}
/* Computes col sums dwork[j] = sum( abs( A(:,j) )), j=0:n-1, for || A ||_one,
* where m and n are any size.
* Has n blocks of NB threads each. Block j sums one column, A(:,j) into dwork[j].
* Thread i accumulates A(i,j) + A(i+NB,j) + A(i+2*NB,j) + ... into ssum[i],
* then threads collectively do a sum-reduction of ssum,
* and finally thread 0 saves to dwork[j]. */
extern "C" __global__ void
dlange_one_kernel(
int m, int n,
const double * __restrict__ A, int lda,
double * __restrict__ dwork )
{
__shared__ double ssum[NB_X];
int tx = threadIdx.x;
A += blockIdx.x*lda; // column j
ssum[tx] = 0;
for( int i = tx; i < m; i += NB_X ) {
ssum[tx] += MAGMA_D_ABS( A[i] );
}
magma_sum_reduce< NB_X >( tx, ssum );
if ( tx == 0 ) {
dwork[ blockIdx.x ] = ssum[0];
}
}
/* Based on dlange_one_kernel code, above.
* Computes col sums dwork[j] = sum( abs( A(:,j) )^2 ), j=0:n-1, for || A ||_F,
* where m and n are any size.
* Has n blocks of NB threads each. Block j sums one column, A(:,j) into dwork[j].
* Thread i accumulates A(i,j) + A(i+NB,j) + A(i+2*NB,j) + ... into ssum[i],
* then threads collectively do a sum-reduction of ssum,
* and finally thread 0 saves to dwork[j]. */
extern "C" __global__ void
dlange_fro_kernel(
int m, int n,
const double * __restrict__ A, int lda,
double * __restrict__ dwork )
{
__shared__ double ssum[NB_X];
int tx = threadIdx.x;
A += blockIdx.x*lda; // column j
ssum[tx] = 0;
for( int i = tx; i < m; i += NB_X ) {
#ifdef COMPLEX
double a = MAGMA_D_ABS( A[i] );
#else
double a = A[i];
#endif
ssum[tx] += a*a;
}
magma_sum_reduce< NB_X >( tx, ssum );
if ( tx == 0 ) {
dwork[ blockIdx.x ] = ssum[0];
}
}
/***************************************************************************//**
Purpose
-------
DLANGE returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
real matrix A.
Description
-----------
DLANGE returns the value
DLANGE = ( max(abs(A(i,j))), NORM = MagmaMaxNorm
(
( norm1(A), NORM = MagmaOneNorm
(
( normI(A), NORM = MagmaInfNorm
(
( normF(A), NORM = MagmaFrobeniusNorm
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of
squares). Note that max(abs(A(i,j))) is not a consistent matrix norm.
Arguments
---------
@param[in]
norm magma_norm_t
Specifies the value to be returned in DLANGE as described
above.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0. When M = 0,
DLANGE is set to zero.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0. When N = 0,
DLANGE is set to zero.
@param[in]
dA DOUBLE PRECISION array on the GPU, dimension (LDDA,N)
The m by n matrix A.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(M,1).
@param
dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (LWORK).
@param[in]
lwork INTEGER
The dimension of the array WORK.
If NORM = MagmaInfNorm or MagmaMaxNorm, LWORK >= max( 1, M ).
If NORM = MagmaOneNorm, LWORK >= max( 1, N ).
Note this is different than LAPACK, which requires WORK only for
NORM = MagmaInfNorm, and does not pass LWORK.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lange
*******************************************************************************/
extern "C" double
magmablas_dlange(
magma_norm_t norm, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( ! (norm == MagmaInfNorm || norm == MagmaMaxNorm ||
norm == MagmaOneNorm || norm == MagmaFrobeniusNorm) )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -5;
else if ( ((norm == MagmaInfNorm || norm == MagmaMaxNorm) && (lwork < m)) ||
((norm == MagmaOneNorm || norm == MagmaFrobeniusNorm ) && (lwork < n)) )
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
/* Quick return */
if ( m == 0 || n == 0 )
return 0;
//int i;
dim3 threads( NB_X );
double result = -1;
if ( norm == MagmaInfNorm ) {
dim3 grid( magma_ceildiv( m, NB_X ) );
dlange_inf_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, dA, ldda, dwork );
magma_max_nan_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( m, dwork );
}
else if ( norm == MagmaMaxNorm ) {
dim3 grid( magma_ceildiv( m, NB_X ) );
dlange_max_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, dA, ldda, dwork );
magma_max_nan_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( m, dwork );
}
else if ( norm == MagmaOneNorm ) {
dim3 grid( n );
dlange_one_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, dA, ldda, dwork );
magma_max_nan_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( n, dwork ); // note n instead of m
}
else if ( norm == MagmaFrobeniusNorm ) {
dim3 grid( n );
dlange_fro_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, dA, ldda, dwork );
magma_sum_reduce_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( n, dwork ); // note n instead of m
}
magma_dgetvector( 1, &dwork[0], 1, &result, 1, queue );
if( norm == MagmaFrobeniusNorm ) {
result = sqrt(result); // Square root for final result.
}
return result;
}
|
20242a682574e4d118052e2eaf8ffb061a08368b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// statistical kernel
__global__ void compress(
long d_Ne,
fp *d_I) { // pointer to output image (DEVICE GLOBAL MEMORY)
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = (bx * NUMBER_THREADS) +
tx; // unique thread id, more threads than actual elements !!!
// copy input to output & log uncompress
if (ei < d_Ne) { // do only for the number of elements, omit extra threads
d_I[ei] = log(d_I[ei]) *
255; // exponentiate input IMAGE and copy to output image
}
}
| 20242a682574e4d118052e2eaf8ffb061a08368b.cu | // statistical kernel
__global__ void compress(
long d_Ne,
fp *d_I) { // pointer to output image (DEVICE GLOBAL MEMORY)
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = (bx * NUMBER_THREADS) +
tx; // unique thread id, more threads than actual elements !!!
// copy input to output & log uncompress
if (ei < d_Ne) { // do only for the number of elements, omit extra threads
d_I[ei] = log(d_I[ei]) *
255; // exponentiate input IMAGE and copy to output image
}
}
|
56a63b940d2fb82e8d5516302bfd0cd18b77486d.hip | // !!! This is a file automatically generated by hipify!!!
#include <Device/SafeCudaAPI.cuh>
#include <Device/SimpleKernels.cuh>
#include <Device/Timer.cuh>
#include <cmath>
#include <limits>
#include <iomanip>
#include <iostream>
#include <vector>
using namespace timer;
using xlib::byte_t;
using ttime_t = float;
int main() {
size_t size = 1024;
Timer<DEVICE> TM;
std::vector<ttime_t> allocation_time;
std::vector<ttime_t> allocation_pinned_time;
std::vector<ttime_t> H2D_time;
std::vector<ttime_t> H2D_pinned_time;
std::vector<ttime_t> D2D_time;
std::vector<ttime_t> memcpy_kernel_time;
std::vector<ttime_t> memset_time;
std::vector<ttime_t> memset_kernel_time;
byte_t* d_array, *h_array_pinned;;
std::cout << "Computing";
while (true) {
std::cout << "." << std::flush;
//======================================================================
TM.start();
if (hipMalloc(&d_array, size) != hipSuccess)
break;
TM.stop();
allocation_time.push_back(TM.duration());
//----------------------------------------------------------------------
auto h_array = new byte_t[size];
TM.start();
cuMemcpyToDevice(h_array, size, d_array);
TM.stop();
delete[] h_array;
H2D_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
hipHostMalloc(&h_array_pinned, size);
TM.stop();
allocation_pinned_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cuMemcpyToDevice(h_array_pinned, size, d_array);
TM.stop();
hipHostFree(h_array_pinned);
H2D_pinned_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
hipMemset(d_array, 0x00, size);
TM.stop();
memset_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cu::memset(reinterpret_cast<unsigned char*>(d_array), size,
(unsigned char) 0);
TM.stop();
CHECK_CUDA_ERROR
memset_kernel_time.push_back(TM.duration());
//----------------------------------------------------------------------
byte_t* d_array2;
if (hipMalloc(&d_array2, size) == hipSuccess) {
TM.start();
hipMemcpy(d_array2, d_array, size, hipMemcpyDeviceToDevice);
TM.stop();
D2D_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cu::memcpy(d_array, size, d_array2);
TM.stop();
memcpy_kernel_time.push_back(TM.duration());
cuFree(d_array2);
}
else {
D2D_time.push_back(std::nan(""));
memcpy_kernel_time.push_back(std::nan(""));
}
cuFree(d_array);
//----------------------------------------------------------------------
size *= 2;
}
size = 1024;
std::cout << "\n\n" << std::setprecision(2) << std::right << std::fixed
<< std::setw(8) << "SIZE"
<< std::setw(11) << "MemcpyHtD"
<< std::setw(14) << "MemcpyHtDPin"
<< std::setw(11) << "MemcpyDtD"
<< std::setw(14) << "MemcpyKernel"
<< std::setw(8) << "Memset"
<< std::setw(14) << "MemsetKernel" << std::endl;
xlib::char_sequence('-', 80);
for (size_t i = 0; i < H2D_time.size(); i++) {
std::cout << std::setw(8) << xlib::human_readable(size)
<< std::setw(11) << H2D_time[i]
<< std::setw(14) << H2D_pinned_time[i]
<< std::setw(11) << D2D_time[i]
<< std::setw(14) << memcpy_kernel_time[i]
<< std::setw(8) << memset_time[i]
<< std::setw(14) << memset_kernel_time[i] << "\n";
size *= 2;
}
}
| 56a63b940d2fb82e8d5516302bfd0cd18b77486d.cu | #include <Device/SafeCudaAPI.cuh>
#include <Device/SimpleKernels.cuh>
#include <Device/Timer.cuh>
#include <cmath>
#include <limits>
#include <iomanip>
#include <iostream>
#include <vector>
using namespace timer;
using xlib::byte_t;
using ttime_t = float;
int main() {
size_t size = 1024;
Timer<DEVICE> TM;
std::vector<ttime_t> allocation_time;
std::vector<ttime_t> allocation_pinned_time;
std::vector<ttime_t> H2D_time;
std::vector<ttime_t> H2D_pinned_time;
std::vector<ttime_t> D2D_time;
std::vector<ttime_t> memcpy_kernel_time;
std::vector<ttime_t> memset_time;
std::vector<ttime_t> memset_kernel_time;
byte_t* d_array, *h_array_pinned;;
std::cout << "Computing";
while (true) {
std::cout << "." << std::flush;
//======================================================================
TM.start();
if (cudaMalloc(&d_array, size) != cudaSuccess)
break;
TM.stop();
allocation_time.push_back(TM.duration());
//----------------------------------------------------------------------
auto h_array = new byte_t[size];
TM.start();
cuMemcpyToDevice(h_array, size, d_array);
TM.stop();
delete[] h_array;
H2D_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cudaMallocHost(&h_array_pinned, size);
TM.stop();
allocation_pinned_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cuMemcpyToDevice(h_array_pinned, size, d_array);
TM.stop();
cudaFreeHost(h_array_pinned);
H2D_pinned_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cudaMemset(d_array, 0x00, size);
TM.stop();
memset_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cu::memset(reinterpret_cast<unsigned char*>(d_array), size,
(unsigned char) 0);
TM.stop();
CHECK_CUDA_ERROR
memset_kernel_time.push_back(TM.duration());
//----------------------------------------------------------------------
byte_t* d_array2;
if (cudaMalloc(&d_array2, size) == cudaSuccess) {
TM.start();
cudaMemcpy(d_array2, d_array, size, cudaMemcpyDeviceToDevice);
TM.stop();
D2D_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cu::memcpy(d_array, size, d_array2);
TM.stop();
memcpy_kernel_time.push_back(TM.duration());
cuFree(d_array2);
}
else {
D2D_time.push_back(std::nan(""));
memcpy_kernel_time.push_back(std::nan(""));
}
cuFree(d_array);
//----------------------------------------------------------------------
size *= 2;
}
size = 1024;
std::cout << "\n\n" << std::setprecision(2) << std::right << std::fixed
<< std::setw(8) << "SIZE"
<< std::setw(11) << "MemcpyHtD"
<< std::setw(14) << "MemcpyHtDPin"
<< std::setw(11) << "MemcpyDtD"
<< std::setw(14) << "MemcpyKernel"
<< std::setw(8) << "Memset"
<< std::setw(14) << "MemsetKernel" << std::endl;
xlib::char_sequence('-', 80);
for (size_t i = 0; i < H2D_time.size(); i++) {
std::cout << std::setw(8) << xlib::human_readable(size)
<< std::setw(11) << H2D_time[i]
<< std::setw(14) << H2D_pinned_time[i]
<< std::setw(11) << D2D_time[i]
<< std::setw(14) << memcpy_kernel_time[i]
<< std::setw(8) << memset_time[i]
<< std::setw(14) << memset_kernel_time[i] << "\n";
size *= 2;
}
}
|
79a2a8bb1a5a602660dc8266374279bfd34b451b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/null_mask.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#include <hipcub/hipcub.hpp>
#include <algorithm>
#include <numeric>
#include <type_traits>
namespace cudf {
size_type state_null_count(mask_state state, size_type size)
{
switch (state) {
case mask_state::UNALLOCATED: return 0;
case mask_state::UNINITIALIZED: return UNKNOWN_NULL_COUNT;
case mask_state::ALL_NULL: return size;
case mask_state::ALL_VALID: return 0;
default: CUDF_FAIL("Invalid null mask state.");
}
}
// Computes required allocation size of a bitmask
std::size_t bitmask_allocation_size_bytes(size_type number_of_bits, std::size_t padding_boundary)
{
CUDF_EXPECTS(padding_boundary > 0, "Invalid padding boundary");
auto necessary_bytes = cudf::util::div_rounding_up_safe<size_type>(number_of_bits, CHAR_BIT);
auto padded_bytes = padding_boundary * cudf::util::div_rounding_up_safe<size_type>(
necessary_bytes, padding_boundary);
return padded_bytes;
}
// Computes number of *actual* bitmask_type elements needed
size_type num_bitmask_words(size_type number_of_bits)
{
return cudf::util::div_rounding_up_safe<size_type>(number_of_bits,
detail::size_in_bits<bitmask_type>());
}
namespace detail {
// Create a device_buffer for a null mask
rmm::device_buffer create_null_mask(size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type mask_size{0};
if (state != mask_state::UNALLOCATED) { mask_size = bitmask_allocation_size_bytes(size); }
rmm::device_buffer mask(mask_size, stream, mr);
if (state != mask_state::UNINITIALIZED) {
uint8_t fill_value = (state == mask_state::ALL_VALID) ? 0xff : 0x00;
CUDA_TRY(hipMemsetAsync(
static_cast<bitmask_type*>(mask.data()), fill_value, mask_size, stream.value()));
}
return mask;
}
namespace {
__global__ void set_null_mask_kernel(bitmask_type* __restrict__ destination,
size_type begin_bit,
size_type end_bit,
bool valid,
size_type number_of_mask_words)
{
auto x = destination + word_index(begin_bit);
const auto last_word = word_index(end_bit) - word_index(begin_bit);
bitmask_type fill_value = (valid == true) ? 0xffffffff : 0x00;
for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x;
destination_word_index < number_of_mask_words;
destination_word_index += blockDim.x * gridDim.x) {
if (destination_word_index == 0 || destination_word_index == last_word) {
bitmask_type mask = ~bitmask_type{0};
if (destination_word_index == 0) {
mask = ~(set_least_significant_bits(intra_word_index(begin_bit)));
}
if (destination_word_index == last_word) {
mask = mask & set_least_significant_bits(intra_word_index(end_bit));
}
x[destination_word_index] =
(valid == true) ? x[destination_word_index] | mask : x[destination_word_index] & ~mask;
} else {
x[destination_word_index] = fill_value;
}
}
}
} // namespace
// Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true,
// or null, otherwise;
void set_null_mask(bitmask_type* bitmask,
size_type begin_bit,
size_type end_bit,
bool valid,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(begin_bit >= 0, "Invalid range.");
CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range.");
if (begin_bit == end_bit) return;
if (bitmask != nullptr) {
auto number_of_mask_words =
num_bitmask_words(end_bit) - begin_bit / detail::size_in_bits<bitmask_type>();
cudf::detail::grid_1d config(number_of_mask_words, 256);
hipLaunchKernelGGL(( set_null_mask_kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(),
static_cast<bitmask_type*>(bitmask), begin_bit, end_bit, valid, number_of_mask_words);
CHECK_CUDA(stream.value());
}
}
} // namespace detail
// Create a device_buffer for a null mask
rmm::device_buffer create_null_mask(size_type size,
mask_state state,
rmm::mr::device_memory_resource* mr)
{
return detail::create_null_mask(size, state, rmm::cuda_stream_default, mr);
}
// Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true,
// or null, otherwise;
void set_null_mask(bitmask_type* bitmask, size_type begin_bit, size_type end_bit, bool valid)
{
return detail::set_null_mask(bitmask, begin_bit, end_bit, valid);
}
namespace {
/**
* @brief Counts the number of non-zero bits in a bitmask in the range
* `[first_bit_index, last_bit_index]`.
*
* Expects `0 <= first_bit_index <= last_bit_index`.
*
* @param[in] bitmask The bitmask whose non-zero bits will be counted.
* @param[in] first_bit_index The index (inclusive) of the first bit to count
* @param[in] last_bit_index The index (inclusive) of the last bit to count
* @param[out] global_count The number of non-zero bits in the specified range
*/
template <size_type block_size>
__global__ void count_set_bits_kernel(bitmask_type const* bitmask,
size_type first_bit_index,
size_type last_bit_index,
size_type* global_count)
{
constexpr auto const word_size{detail::size_in_bits<bitmask_type>()};
auto const first_word_index{word_index(first_bit_index)};
auto const last_word_index{word_index(last_bit_index)};
auto const tid = threadIdx.x + blockIdx.x * blockDim.x;
auto thread_word_index = tid + first_word_index;
size_type thread_count{0};
// First, just count the bits in all words
while (thread_word_index <= last_word_index) {
thread_count += __popc(bitmask[thread_word_index]);
thread_word_index += blockDim.x * gridDim.x;
}
// Subtract any slack bits counted from the first and last word
// Two threads handle this -- one for first word, one for last
if (tid < 2) {
bool const first{tid == 0};
bool const last{not first};
size_type bit_index = (first) ? first_bit_index : last_bit_index;
size_type word_index = (first) ? first_word_index : last_word_index;
size_type num_slack_bits = bit_index % word_size;
if (last) { num_slack_bits = word_size - num_slack_bits - 1; }
if (num_slack_bits > 0) {
bitmask_type word = bitmask[word_index];
auto slack_mask = (first) ? set_least_significant_bits(num_slack_bits)
: set_most_significant_bits(num_slack_bits);
thread_count -= __popc(word & slack_mask);
}
}
using BlockReduce = hipcub::BlockReduce<size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
size_type block_count{BlockReduce(temp_storage).Sum(thread_count)};
if (threadIdx.x == 0) { atomicAdd(global_count, block_count); }
}
/**
* @brief Copies the bits starting at the specified offset from a source
* bitmask into the destination bitmask.
*
* Bit `i` in `destination` will be equal to bit `i + offset` from `source`.
*
* @param destination The mask to copy into
* @param source The mask to copy from
* @param source_begin_bit The offset into `source` from which to begin the copy
* @param source_end_bit The offset into `source` till which copying is done
* @param number_of_mask_words The number of `cudf::bitmask_type` words to copy
*/
// TODO: Also make binops test that uses offset in column_view
__global__ void copy_offset_bitmask(bitmask_type* __restrict__ destination,
bitmask_type const* __restrict__ source,
size_type source_begin_bit,
size_type source_end_bit,
size_type number_of_mask_words)
{
for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x;
destination_word_index < number_of_mask_words;
destination_word_index += blockDim.x * gridDim.x) {
destination[destination_word_index] = detail::get_mask_offset_word(
source, destination_word_index, source_begin_bit, source_end_bit);
}
}
} // namespace
namespace detail {
// Create a bitmask from a specific range
rmm::device_buffer copy_bitmask(bitmask_type const* mask,
size_type begin_bit,
size_type end_bit,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(begin_bit >= 0, "Invalid range.");
CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range.");
rmm::device_buffer dest_mask{};
auto num_bytes = bitmask_allocation_size_bytes(end_bit - begin_bit);
if ((mask == nullptr) || (num_bytes == 0)) { return dest_mask; }
if (begin_bit == 0) {
dest_mask = rmm::device_buffer{static_cast<void const*>(mask), num_bytes, stream, mr};
} else {
auto number_of_mask_words = num_bitmask_words(end_bit - begin_bit);
dest_mask = rmm::device_buffer{num_bytes, stream, mr};
cudf::detail::grid_1d config(number_of_mask_words, 256);
hipLaunchKernelGGL(( copy_offset_bitmask), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(),
static_cast<bitmask_type*>(dest_mask.data()), mask, begin_bit, end_bit, number_of_mask_words);
CHECK_CUDA(stream.value());
}
return dest_mask;
}
// Create a bitmask from a column view
rmm::device_buffer copy_bitmask(column_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.nullable()) {
null_mask =
copy_bitmask(view.null_mask(), view.offset(), view.offset() + view.size(), stream, mr);
}
return null_mask;
}
cudf::size_type count_set_bits(bitmask_type const* bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
if (nullptr == bitmask) { return 0; }
CUDF_EXPECTS(start >= 0, "Invalid range.");
CUDF_EXPECTS(start <= stop, "Invalid bit range.");
std::size_t num_bits_to_count = stop - start;
if (num_bits_to_count == 0) { return 0; }
auto num_words = num_bitmask_words(num_bits_to_count);
constexpr size_type block_size{256};
cudf::detail::grid_1d grid(num_words, block_size);
rmm::device_scalar<size_type> non_zero_count(0, stream);
hipLaunchKernelGGL(( count_set_bits_kernel<block_size>)
, dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(),
bitmask, start, stop - 1, non_zero_count.data());
return non_zero_count.value(stream);
}
cudf::size_type count_unset_bits(bitmask_type const* bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
if (nullptr == bitmask) { return 0; }
auto num_bits = (stop - start);
return (num_bits - detail::count_set_bits(bitmask, start, stop, stream));
}
// Inplace Bitwise AND of the masks
void inplace_bitmask_and(device_span<bitmask_type> dest_mask,
host_span<bitmask_type const*> masks,
host_span<size_type const> begin_bits,
size_type mask_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
inplace_bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
dest_mask,
masks,
begin_bits,
mask_size,
stream,
mr);
}
// Bitwise AND of the masks
std::pair<rmm::device_buffer, size_type> bitmask_and(host_span<bitmask_type const*> masks,
host_span<size_type const> begin_bits,
size_type mask_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
masks,
begin_bits,
mask_size,
stream,
mr);
}
// Returns the bitwise AND of the null masks of all columns in the table view
std::pair<rmm::device_buffer, size_type> bitmask_and(table_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.num_rows() == 0 or view.num_columns() == 0) {
return std::make_pair(std::move(null_mask), 0);
}
std::vector<bitmask_type const*> masks;
std::vector<size_type> offsets;
for (auto&& col : view) {
if (col.nullable()) {
masks.push_back(col.null_mask());
offsets.push_back(col.offset());
}
}
if (masks.size() > 0) {
return cudf::detail::bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
masks,
offsets,
view.num_rows(),
stream,
mr);
}
return std::make_pair(std::move(null_mask), 0);
}
// Returns the bitwise OR of the null masks of all columns in the table view
std::pair<rmm::device_buffer, size_type> bitmask_or(table_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.num_rows() == 0 or view.num_columns() == 0) {
return std::make_pair(std::move(null_mask), 0);
}
std::vector<bitmask_type const*> masks;
std::vector<size_type> offsets;
for (auto&& col : view) {
if (col.nullable()) {
masks.push_back(col.null_mask());
offsets.push_back(col.offset());
}
}
if (static_cast<size_type>(masks.size()) == view.num_columns()) {
return cudf::detail::bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left | right; },
masks,
offsets,
view.num_rows(),
stream,
mr);
}
return std::make_pair(std::move(null_mask), 0);
}
/**
* @copydoc cudf::segmented_count_set_bits
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::vector<size_type> segmented_count_set_bits(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_set_bits(bitmask, indices.begin(), indices.end(), stream);
}
// Count zero bits in the specified ranges
std::vector<size_type> segmented_count_unset_bits(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_unset_bits(bitmask, indices.begin(), indices.end(), stream);
}
} // namespace detail
// Count non-zero bits in the specified range
cudf::size_type count_set_bits(bitmask_type const* bitmask, size_type start, size_type stop)
{
CUDF_FUNC_RANGE();
return detail::count_set_bits(bitmask, start, stop);
}
// Count zero bits in the specified range
cudf::size_type count_unset_bits(bitmask_type const* bitmask, size_type start, size_type stop)
{
CUDF_FUNC_RANGE();
return detail::count_unset_bits(bitmask, start, stop);
}
// Count non-zero bits in the specified ranges
std::vector<size_type> segmented_count_set_bits(bitmask_type const* bitmask,
host_span<size_type const> indices)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_set_bits(bitmask, indices, rmm::cuda_stream_default);
}
// Count zero bits in the specified ranges
std::vector<size_type> segmented_count_unset_bits(bitmask_type const* bitmask,
host_span<size_type const> indices)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_unset_bits(bitmask, indices, rmm::cuda_stream_default);
}
// Create a bitmask from a specific range
rmm::device_buffer copy_bitmask(bitmask_type const* mask,
size_type begin_bit,
size_type end_bit,
rmm::mr::device_memory_resource* mr)
{
return detail::copy_bitmask(mask, begin_bit, end_bit, rmm::cuda_stream_default, mr);
}
// Create a bitmask from a column view
rmm::device_buffer copy_bitmask(column_view const& view, rmm::mr::device_memory_resource* mr)
{
return detail::copy_bitmask(view, rmm::cuda_stream_default, mr);
}
std::pair<rmm::device_buffer, size_type> bitmask_and(table_view const& view,
rmm::mr::device_memory_resource* mr)
{
return detail::bitmask_and(view, rmm::cuda_stream_default, mr);
}
std::pair<rmm::device_buffer, size_type> bitmask_or(table_view const& view,
rmm::mr::device_memory_resource* mr)
{
return detail::bitmask_or(view, rmm::cuda_stream_default, mr);
}
} // namespace cudf
| 79a2a8bb1a5a602660dc8266374279bfd34b451b.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/null_mask.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#include <cub/cub.cuh>
#include <algorithm>
#include <numeric>
#include <type_traits>
namespace cudf {
size_type state_null_count(mask_state state, size_type size)
{
switch (state) {
case mask_state::UNALLOCATED: return 0;
case mask_state::UNINITIALIZED: return UNKNOWN_NULL_COUNT;
case mask_state::ALL_NULL: return size;
case mask_state::ALL_VALID: return 0;
default: CUDF_FAIL("Invalid null mask state.");
}
}
// Computes required allocation size of a bitmask
std::size_t bitmask_allocation_size_bytes(size_type number_of_bits, std::size_t padding_boundary)
{
CUDF_EXPECTS(padding_boundary > 0, "Invalid padding boundary");
auto necessary_bytes = cudf::util::div_rounding_up_safe<size_type>(number_of_bits, CHAR_BIT);
auto padded_bytes = padding_boundary * cudf::util::div_rounding_up_safe<size_type>(
necessary_bytes, padding_boundary);
return padded_bytes;
}
// Computes number of *actual* bitmask_type elements needed
size_type num_bitmask_words(size_type number_of_bits)
{
return cudf::util::div_rounding_up_safe<size_type>(number_of_bits,
detail::size_in_bits<bitmask_type>());
}
namespace detail {
// Create a device_buffer for a null mask
rmm::device_buffer create_null_mask(size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type mask_size{0};
if (state != mask_state::UNALLOCATED) { mask_size = bitmask_allocation_size_bytes(size); }
rmm::device_buffer mask(mask_size, stream, mr);
if (state != mask_state::UNINITIALIZED) {
uint8_t fill_value = (state == mask_state::ALL_VALID) ? 0xff : 0x00;
CUDA_TRY(cudaMemsetAsync(
static_cast<bitmask_type*>(mask.data()), fill_value, mask_size, stream.value()));
}
return mask;
}
namespace {
__global__ void set_null_mask_kernel(bitmask_type* __restrict__ destination,
size_type begin_bit,
size_type end_bit,
bool valid,
size_type number_of_mask_words)
{
auto x = destination + word_index(begin_bit);
const auto last_word = word_index(end_bit) - word_index(begin_bit);
bitmask_type fill_value = (valid == true) ? 0xffffffff : 0x00;
for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x;
destination_word_index < number_of_mask_words;
destination_word_index += blockDim.x * gridDim.x) {
if (destination_word_index == 0 || destination_word_index == last_word) {
bitmask_type mask = ~bitmask_type{0};
if (destination_word_index == 0) {
mask = ~(set_least_significant_bits(intra_word_index(begin_bit)));
}
if (destination_word_index == last_word) {
mask = mask & set_least_significant_bits(intra_word_index(end_bit));
}
x[destination_word_index] =
(valid == true) ? x[destination_word_index] | mask : x[destination_word_index] & ~mask;
} else {
x[destination_word_index] = fill_value;
}
}
}
} // namespace
// Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true,
// or null, otherwise;
void set_null_mask(bitmask_type* bitmask,
size_type begin_bit,
size_type end_bit,
bool valid,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(begin_bit >= 0, "Invalid range.");
CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range.");
if (begin_bit == end_bit) return;
if (bitmask != nullptr) {
auto number_of_mask_words =
num_bitmask_words(end_bit) - begin_bit / detail::size_in_bits<bitmask_type>();
cudf::detail::grid_1d config(number_of_mask_words, 256);
set_null_mask_kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
static_cast<bitmask_type*>(bitmask), begin_bit, end_bit, valid, number_of_mask_words);
CHECK_CUDA(stream.value());
}
}
} // namespace detail
// Create a device_buffer for a null mask
rmm::device_buffer create_null_mask(size_type size,
mask_state state,
rmm::mr::device_memory_resource* mr)
{
return detail::create_null_mask(size, state, rmm::cuda_stream_default, mr);
}
// Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true,
// or null, otherwise;
void set_null_mask(bitmask_type* bitmask, size_type begin_bit, size_type end_bit, bool valid)
{
return detail::set_null_mask(bitmask, begin_bit, end_bit, valid);
}
namespace {
/**
* @brief Counts the number of non-zero bits in a bitmask in the range
* `[first_bit_index, last_bit_index]`.
*
* Expects `0 <= first_bit_index <= last_bit_index`.
*
* @param[in] bitmask The bitmask whose non-zero bits will be counted.
* @param[in] first_bit_index The index (inclusive) of the first bit to count
* @param[in] last_bit_index The index (inclusive) of the last bit to count
* @param[out] global_count The number of non-zero bits in the specified range
*/
template <size_type block_size>
__global__ void count_set_bits_kernel(bitmask_type const* bitmask,
size_type first_bit_index,
size_type last_bit_index,
size_type* global_count)
{
constexpr auto const word_size{detail::size_in_bits<bitmask_type>()};
auto const first_word_index{word_index(first_bit_index)};
auto const last_word_index{word_index(last_bit_index)};
auto const tid = threadIdx.x + blockIdx.x * blockDim.x;
auto thread_word_index = tid + first_word_index;
size_type thread_count{0};
// First, just count the bits in all words
while (thread_word_index <= last_word_index) {
thread_count += __popc(bitmask[thread_word_index]);
thread_word_index += blockDim.x * gridDim.x;
}
// Subtract any slack bits counted from the first and last word
// Two threads handle this -- one for first word, one for last
if (tid < 2) {
bool const first{tid == 0};
bool const last{not first};
size_type bit_index = (first) ? first_bit_index : last_bit_index;
size_type word_index = (first) ? first_word_index : last_word_index;
size_type num_slack_bits = bit_index % word_size;
if (last) { num_slack_bits = word_size - num_slack_bits - 1; }
if (num_slack_bits > 0) {
bitmask_type word = bitmask[word_index];
auto slack_mask = (first) ? set_least_significant_bits(num_slack_bits)
: set_most_significant_bits(num_slack_bits);
thread_count -= __popc(word & slack_mask);
}
}
using BlockReduce = cub::BlockReduce<size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
size_type block_count{BlockReduce(temp_storage).Sum(thread_count)};
if (threadIdx.x == 0) { atomicAdd(global_count, block_count); }
}
/**
* @brief Copies the bits starting at the specified offset from a source
* bitmask into the destination bitmask.
*
* Bit `i` in `destination` will be equal to bit `i + offset` from `source`.
*
* @param destination The mask to copy into
* @param source The mask to copy from
* @param source_begin_bit The offset into `source` from which to begin the copy
* @param source_end_bit The offset into `source` till which copying is done
* @param number_of_mask_words The number of `cudf::bitmask_type` words to copy
*/
// TODO: Also make binops test that uses offset in column_view
__global__ void copy_offset_bitmask(bitmask_type* __restrict__ destination,
bitmask_type const* __restrict__ source,
size_type source_begin_bit,
size_type source_end_bit,
size_type number_of_mask_words)
{
for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x;
destination_word_index < number_of_mask_words;
destination_word_index += blockDim.x * gridDim.x) {
destination[destination_word_index] = detail::get_mask_offset_word(
source, destination_word_index, source_begin_bit, source_end_bit);
}
}
} // namespace
namespace detail {
// Create a bitmask from a specific range
rmm::device_buffer copy_bitmask(bitmask_type const* mask,
size_type begin_bit,
size_type end_bit,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(begin_bit >= 0, "Invalid range.");
CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range.");
rmm::device_buffer dest_mask{};
auto num_bytes = bitmask_allocation_size_bytes(end_bit - begin_bit);
if ((mask == nullptr) || (num_bytes == 0)) { return dest_mask; }
if (begin_bit == 0) {
dest_mask = rmm::device_buffer{static_cast<void const*>(mask), num_bytes, stream, mr};
} else {
auto number_of_mask_words = num_bitmask_words(end_bit - begin_bit);
dest_mask = rmm::device_buffer{num_bytes, stream, mr};
cudf::detail::grid_1d config(number_of_mask_words, 256);
copy_offset_bitmask<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
static_cast<bitmask_type*>(dest_mask.data()), mask, begin_bit, end_bit, number_of_mask_words);
CHECK_CUDA(stream.value());
}
return dest_mask;
}
// Create a bitmask from a column view
rmm::device_buffer copy_bitmask(column_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.nullable()) {
null_mask =
copy_bitmask(view.null_mask(), view.offset(), view.offset() + view.size(), stream, mr);
}
return null_mask;
}
cudf::size_type count_set_bits(bitmask_type const* bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
if (nullptr == bitmask) { return 0; }
CUDF_EXPECTS(start >= 0, "Invalid range.");
CUDF_EXPECTS(start <= stop, "Invalid bit range.");
std::size_t num_bits_to_count = stop - start;
if (num_bits_to_count == 0) { return 0; }
auto num_words = num_bitmask_words(num_bits_to_count);
constexpr size_type block_size{256};
cudf::detail::grid_1d grid(num_words, block_size);
rmm::device_scalar<size_type> non_zero_count(0, stream);
count_set_bits_kernel<block_size>
<<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>(
bitmask, start, stop - 1, non_zero_count.data());
return non_zero_count.value(stream);
}
cudf::size_type count_unset_bits(bitmask_type const* bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
if (nullptr == bitmask) { return 0; }
auto num_bits = (stop - start);
return (num_bits - detail::count_set_bits(bitmask, start, stop, stream));
}
// Inplace Bitwise AND of the masks
void inplace_bitmask_and(device_span<bitmask_type> dest_mask,
host_span<bitmask_type const*> masks,
host_span<size_type const> begin_bits,
size_type mask_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
inplace_bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
dest_mask,
masks,
begin_bits,
mask_size,
stream,
mr);
}
// Bitwise AND of the masks
std::pair<rmm::device_buffer, size_type> bitmask_and(host_span<bitmask_type const*> masks,
host_span<size_type const> begin_bits,
size_type mask_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
masks,
begin_bits,
mask_size,
stream,
mr);
}
// Returns the bitwise AND of the null masks of all columns in the table view
std::pair<rmm::device_buffer, size_type> bitmask_and(table_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.num_rows() == 0 or view.num_columns() == 0) {
return std::make_pair(std::move(null_mask), 0);
}
std::vector<bitmask_type const*> masks;
std::vector<size_type> offsets;
for (auto&& col : view) {
if (col.nullable()) {
masks.push_back(col.null_mask());
offsets.push_back(col.offset());
}
}
if (masks.size() > 0) {
return cudf::detail::bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
masks,
offsets,
view.num_rows(),
stream,
mr);
}
return std::make_pair(std::move(null_mask), 0);
}
// Returns the bitwise OR of the null masks of all columns in the table view
std::pair<rmm::device_buffer, size_type> bitmask_or(table_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.num_rows() == 0 or view.num_columns() == 0) {
return std::make_pair(std::move(null_mask), 0);
}
std::vector<bitmask_type const*> masks;
std::vector<size_type> offsets;
for (auto&& col : view) {
if (col.nullable()) {
masks.push_back(col.null_mask());
offsets.push_back(col.offset());
}
}
if (static_cast<size_type>(masks.size()) == view.num_columns()) {
return cudf::detail::bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left | right; },
masks,
offsets,
view.num_rows(),
stream,
mr);
}
return std::make_pair(std::move(null_mask), 0);
}
/**
* @copydoc cudf::segmented_count_set_bits
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::vector<size_type> segmented_count_set_bits(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_set_bits(bitmask, indices.begin(), indices.end(), stream);
}
// Count zero bits in the specified ranges
std::vector<size_type> segmented_count_unset_bits(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_unset_bits(bitmask, indices.begin(), indices.end(), stream);
}
} // namespace detail
// Count non-zero bits in the specified range
cudf::size_type count_set_bits(bitmask_type const* bitmask, size_type start, size_type stop)
{
CUDF_FUNC_RANGE();
return detail::count_set_bits(bitmask, start, stop);
}
// Count zero bits in the specified range
cudf::size_type count_unset_bits(bitmask_type const* bitmask, size_type start, size_type stop)
{
CUDF_FUNC_RANGE();
return detail::count_unset_bits(bitmask, start, stop);
}
// Count non-zero bits in the specified ranges
std::vector<size_type> segmented_count_set_bits(bitmask_type const* bitmask,
host_span<size_type const> indices)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_set_bits(bitmask, indices, rmm::cuda_stream_default);
}
// Count zero bits in the specified ranges
std::vector<size_type> segmented_count_unset_bits(bitmask_type const* bitmask,
host_span<size_type const> indices)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_unset_bits(bitmask, indices, rmm::cuda_stream_default);
}
// Create a bitmask from a specific range
rmm::device_buffer copy_bitmask(bitmask_type const* mask,
size_type begin_bit,
size_type end_bit,
rmm::mr::device_memory_resource* mr)
{
return detail::copy_bitmask(mask, begin_bit, end_bit, rmm::cuda_stream_default, mr);
}
// Create a bitmask from a column view
rmm::device_buffer copy_bitmask(column_view const& view, rmm::mr::device_memory_resource* mr)
{
return detail::copy_bitmask(view, rmm::cuda_stream_default, mr);
}
std::pair<rmm::device_buffer, size_type> bitmask_and(table_view const& view,
rmm::mr::device_memory_resource* mr)
{
return detail::bitmask_and(view, rmm::cuda_stream_default, mr);
}
std::pair<rmm::device_buffer, size_type> bitmask_or(table_view const& view,
rmm::mr::device_memory_resource* mr)
{
return detail::bitmask_or(view, rmm::cuda_stream_default, mr);
}
} // namespace cudf
|
c76be60774702a712a80c1a9729ebee8a3f79606.hip | // !!! This is a file automatically generated by hipify!!!
//#include <hip/hip_runtime.h>
//#include "device_launch_parameters.h"
//#include <helper_cuda.h>
////#include "sm_20_atomic_functions.h"
//
//#include <thrust/host_vector.h>
//#include <thrust/device_vector.h>
//#include <thrust/count.h>
//#include <stdio.h>
//
//#define REAL float
////#define USE_CONST_MEM
//#define HANDLE_ERROR checkCudaErrors
//
//float elapsedTime;
//#define START_GPU {\
//elapsedTime = 0.0;\
//hipEvent_t start, stop;\
//checkCudaErrors(hipEventCreate(&start)); \
//checkCudaErrors(hipEventCreate(&stop));\
//checkCudaErrors(hipEventRecord(start, 0));\
//
//#define END_GPU \
//checkCudaErrors(hipEventRecord(stop, 0));\
//checkCudaErrors(hipEventSynchronize(stop));\
//checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); \
//printf("GPU Time used: %3.2f ms\n", elapsedTime);\
//checkCudaErrors(hipEventDestroy(start));\
//checkCudaErrors(hipEventDestroy(stop));}
//
//#define START_CPU {\
//double start = omp_get_wtime();
//
//#define END_CPU \
//double end = omp_get_wtime();\
//double duration = end - start;\
//printf("CPU Time used: %3.1f ms\n", duration * 1000);}
//
////############################################################################
//#ifdef _WIN64
//#define GLUT_NO_LIB_PRAGMA
//#pragma comment (lib, "opengl32.lib")
//#pragma comment (lib, "glut64.lib")
//#endif //_WIN64
//
///* On Windows, include the local copy of glut.h and glext.h */
//#include "GL/glut.h"
//#include "GL/glext.h"
//#define GET_PROC_ADDRESS( str ) wglGetProcAddress( str )
//
////----------------------bitmap------------------------------
//struct CPUAnimBitmap {
// //
// unsigned char *pixels;
// int width, height;
// //
// void *dataBlock;
//
// //
// void(*fAnim)(void*, int);
// void(*animExit)(void*);
// void(*clickDrag)(void*, int, int, int, int);
// int dragStartX, dragStartY;
//
// CPUAnimBitmap(int w, int h, void *d = NULL) {
// width = w;
// height = h;
// //r g b alph
// pixels = new unsigned char[width * height * 4];
// dataBlock = d;
// clickDrag = NULL;
// }
//
// ~CPUAnimBitmap() {
// delete[] pixels;
// }
//
// unsigned char* get_ptr(void) const { return pixels; }
// long image_size(void) const { return width * height * 4; }
//
// void click_drag(void(*f)(void*, int, int, int, int)) {
// clickDrag = f;
// }
//
// //
// //input: fGPUbitmap
// // ecuda
// void anim_and_exit(void(*f)(void*, int), void(*e)(void*)) {
// CPUAnimBitmap** bitmap = get_bitmap_ptr();
// *bitmap = this;
// fAnim = f;
// animExit = e;
// // a bug in the Windows GLUT implementation prevents us from
// // passing zero arguments to glutInit()
// int c = 1;
// char* dummy = "";
// glutInit(&c, &dummy);
// glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
// glutInitWindowSize(width, height);
// glutCreateWindow("bitmap");
// glutKeyboardFunc(Key);
// glutDisplayFunc(Draw);
//
// if (clickDrag != NULL)
// glutMouseFunc(mouse_func);
//
// //glutIdleFunc
// //GLUT
// //idle function
// glutIdleFunc(idle_func);
// glutMainLoop();
// }
//
// // static method used for glut callbacks
// static CPUAnimBitmap** get_bitmap_ptr(void) {
// static CPUAnimBitmap* gBitmap;
// return &gBitmap;
// }
//
// // static method used for glut callbacks
// static void mouse_func(int button, int state,
// int mx, int my) {
// if (button == GLUT_LEFT_BUTTON) {
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// if (state == GLUT_DOWN) {
// bitmap->dragStartX = mx;
// bitmap->dragStartY = my;
// }
// else if (state == GLUT_UP) {
// bitmap->clickDrag(bitmap->dataBlock,
// bitmap->dragStartX,
// bitmap->dragStartY,
// mx, my);
// }
// }
// }
//
// // static method used for glut callbacks
// static void idle_func(void) {
// static int ticks = 1;
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// bitmap->fAnim(bitmap->dataBlock, ticks++);
// glutPostRedisplay();
// }
//
// // static method used for glut callbacks
// static void Key(unsigned char key, int x, int y) {
// switch (key) {
// case 27:
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// bitmap->animExit(bitmap->dataBlock);
// //delete bitmap;
// exit(0);
// }
// }
//
// // static method used for glut callbacks
// static void Draw(void) {
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// glClearColor(0.0, 0.0, 0.0, 1.0);
// glClear(GL_COLOR_BUFFER_BIT);
// glDrawPixels(bitmap->width, bitmap->height, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels);
// glutSwapBuffers();
// }
//};
//
////
//#define DIM 1024
//#define rnd( x ) (x * rand() / RAND_MAX)
//#define INF 2e10f
//
////-----------------------------------------------------------
//struct Sphere {
// REAL r, b, g;
// REAL radius;
// //
// REAL x, y, z;
// //
// REAL dx, dy, dz;
// bool isCrash;
// // ox,oy
// //
// //
// __device__ REAL hit(REAL ox, REAL oy, REAL *n) {
// REAL dx = ox - x;
// REAL dy = oy - y;
// //
// if (dx*dx + dy*dy < radius*radius) {
// REAL dz = sqrtf(radius*radius - dx*dx - dy*dy);
// *n = dz / sqrtf(radius * radius);
// return dz + z;
// }
// //
// return -INF;
// }
//};
//
////----------------------
//#define SPHERES 2000
//
//int *d_crashnum, *h_crashnum;
//
//#ifdef USE_CONST_MEM
//__constant__ Sphere d_spheres[SPHERES];
//#else
//Sphere *d_spheres;
//#endif
//
////------------------------cuda kernel --------------------------
//
//#define STEP_SIZE REAL(20.0)
//
////
//__global__ void crash(Sphere *s, int num_sphere, int*d_crashnum)
//{
// //
// int s1 = threadIdx.x + blockIdx.x * blockDim.x;
// int s2 = threadIdx.y + blockIdx.y * blockDim.y;
//
// //x,y,
// if (s1 < num_sphere && s2 < num_sphere && s1 < s2)
// {
// REAL dx = s[s1].x - s[s2].x;
// REAL dy = s[s1].y - s[s2].y;
// REAL dz = s[s1].z - s[s2].z;
// REAL totalRadius = s[s1].radius + s[s2].radius;
// //
// if (dx*dx + dy*dy + dz*dz <= totalRadius * totalRadius)
// {
// s[s1].isCrash = true;
// s[s2].isCrash = true;
// atomicAdd(d_crashnum, 1);
// }
// }
//}
//
//__global__ void crashWithCountList(Sphere *s, int num_sphere, int * d_crashNumList)
//{
// //
// int s1 = threadIdx.x + blockIdx.x * blockDim.x;
// int s2 = threadIdx.y + blockIdx.y * blockDim.y;
//
// //x,y,
// if (s1 < num_sphere && s2 < num_sphere && s1 < s2)
// {
// REAL dx = s[s1].x - s[s2].x;
// REAL dy = s[s1].y - s[s2].y;
// REAL dz = s[s1].z - s[s2].z;
// REAL totalRadius = s[s1].radius + s[s2].radius;
// //
// if (dx*dx + dy*dy + dz*dz <= totalRadius * totalRadius)
// {
// //int i = countCrashNumList[s1];
// s[s1].isCrash = true;
// s[s2].isCrash = true;
// //i++;
// //countCrashNumList[s1]=i;
// atomicAdd(&(d_crashNumList[s1]), 1);
// }
// }
//}
//__global__ void copyCrashCount(int *countCrashNumList,
// int * d_crashNumList)
//{
// //
// int x = threadIdx.x + blockIdx.x * blockDim.x;
//
// //x,y,
// if (x < SPHERES)
// {
// countCrashNumList[x] = d_crashNumList[x];
// }
//}
//
////
//__global__ void kernelMoving(Sphere *s, int len)
//{
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// //x
// while (x < len) {
//
// s[x].isCrash = false;
// s[x].x += s[x].dx;
// s[x].y += s[x].dy;
// s[x].z += s[x].dz;
// x += gridDim.x*blockDim.x;
// }
//}
//
//#ifdef USE_CONST_MEM
//__global__ void kernel(unsigned char *ptr) {
//#else
//__global__ void kernel(Sphere *d_spheres, unsigned char *ptr) {
//#endif
// //pixel
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
// //
// int offset = x + y * blockDim.x * gridDim.x;
// REAL ox = (x - DIM / 2);
// REAL oy = (y - DIM / 2);
//
// REAL r = 0, g = 0, b = 0;
// REAL maxz = -INF;
// for (int i = 0; i < SPHERES; i++) {
// REAL n;
// REAL t = d_spheres[i].hit(ox, oy, &n);
// if (t > maxz) {
// REAL fscale = n;
// if (d_spheres[i].isCrash)
// {
// r = 1.0f *fscale;
// g = 0.0f*fscale;
// b = 0.0f*fscale;
// }
// else
// {
// r = d_spheres[i].r * fscale;
// g = d_spheres[i].g * fscale;
// b = d_spheres[i].b * fscale;
// maxz = t;
// }
// }
// }
//
// ptr[offset * 4 + 0] = (int)(r * 255);
// ptr[offset * 4 + 1] = (int)(g * 255);
// ptr[offset * 4 + 2] = (int)(b * 255);
// ptr[offset * 4 + 3] = 255;
//}
//
//// globals needed by the update routine
//struct DataBlock {
// // gpu bitmap
// unsigned char *dev_bitmap;
// //cpubitmap
// CPUAnimBitmap *bitmap;
//};
//
//void generate_frame(DataBlock *d, int ticks) {
//
// // initialize all integers of a device_vector to 0
// /*int * d_crashNumList;
// hipMalloc(&d_crashNumList, sizeof(int)* SPHERES);
// hipMemset(d_crashNumList, 0, sizeof(int)* SPHERES);*/
//
//
// float totalTime = 0.0;
// //0
// HANDLE_ERROR(hipMemset(d_crashnum, 0, sizeof(int)));
// // copyhost
// HANDLE_ERROR(hipMemcpy(h_crashnum, d_crashnum,
// sizeof(int), hipMemcpyDeviceToHost));
// printf("init num of crash: %d\n", (*h_crashnum));
//
// START_GPU
//
// //------------ --2000 ----------------
// kernelMoving << <64, 32 >> > (d_spheres, SPHERES);
// END_GPU
// totalTime += elapsedTime;
// thrust::device_ptr<int> countCrashNumList = thrust::device_malloc<int>(SPHERES);
// START_GPU
// //--------------------------------
// //SPHERES 2000 grid 64 * 64
// //dim3 crashGrids(64, 64);
// dim3 crashGrids(64, 64);
// dim3 crashBlock(32, 32);
// //
// //crash << <crashGrids, crashBlock >> > (d_spheres, SPHERES, d_crashnum);
// //
// int * raw_ptr = thrust::raw_pointer_cast(countCrashNumList);
// crashWithCountList << <crashGrids, crashBlock >> > (d_spheres, SPHERES, raw_ptr);
//
//
// //thrust::device_vector<int> countCrashNumList(SPHERES, 0);
// END_GPU
// START_GPU
// thrust::device_ptr<int> first = countCrashNumList;
// thrust::device_ptr<int> last = countCrashNumList + SPHERES;
// //copyCrashCount << <64, 32 >> > (raw_ptr, d_crashNumList);
// int sum = thrust::reduce(first, last, (int)0, thrust::plus<int>());
// printf("num of pair sphere (thrust): %d\n", sum);
//
// END_GPU
// totalTime += elapsedTime;
//
// //----------- bitmap--------
// START_GPU
// dim3 grids(DIM / 16, DIM / 16);
// dim3 threads(16, 16);
//#ifdef USE_CONST_MEM
// kernel << <grids, threads >> > (d->dev_bitmap);
//#else
// kernel << <grids, threads >> > (d_spheres, d->dev_bitmap);
//#endif
//
// END_GPU
// totalTime += elapsedTime;
//
// //-----bitmap device host -----------
// HANDLE_ERROR(hipMemcpy(d->bitmap->get_ptr(), d->dev_bitmap,
// d->bitmap->image_size(), hipMemcpyDeviceToHost));
//
// HANDLE_ERROR(hipMemcpy(h_crashnum, d_crashnum,
// sizeof(int), hipMemcpyDeviceToHost));
// //printf("num of pair sphere crash: %d\n", (*h_crashnum));
// printf("total time: %3.1f\n", totalTime);
// printf("---------------------------------------------\n");
//
//}
//
//// clean up memory allocated on the GPU
//void cleanup(DataBlock *d) {
// HANDLE_ERROR(hipFree(d->dev_bitmap));
// //
// HANDLE_ERROR(hipFree(d_crashnum));
// free(h_crashnum);
//}
//
////-------------------------main-------------------------------
//
//int main(void) {
// //-------------------
// DataBlock data;
// CPUAnimBitmap bitmap(DIM, DIM, &data);
// data.bitmap = &bitmap;
//
// //
// h_crashnum = (int *)malloc(sizeof(int));
// *h_crashnum = 0;
//
// HANDLE_ERROR(hipMalloc((void**)&d_crashnum, sizeof(int)));
// HANDLE_ERROR(hipMemcpy(d_crashnum, h_crashnum,
// sizeof(int), hipMemcpyHostToDevice));
// /*HANDLE_ERROR(hipMemset(d_crashnum, 0, sizeof(int)));
// int temp = 100;
// HANDLE_ERROR(hipMemcpy(&temp, d_crashnum,
// sizeof(int), hipMemcpyDeviceToHost));
// printf("num of sphere crash: %d\n", temp);*/
//
//
// //---------gpu-------------
// HANDLE_ERROR(hipMalloc((void**)&data.dev_bitmap, bitmap.image_size()));
//
//#ifdef USE_CONST_MEM
//#else
// HANDLE_ERROR(hipMalloc((void**)&d_spheres, sizeof(Sphere) * SPHERES));
//#endif
//
// // allocate temp memory, initialize it, copy to constant
// // memory on the GPU, then free our temp memory
// Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
// for (int i = 0; i < SPHERES; i++) {
// temp_s[i].r = rnd(1.0f);
// temp_s[i].g = rnd(1.0f);
// temp_s[i].b = rnd(1.0f);
//
// temp_s[i].x = rnd(1000.0f) - 500;
// temp_s[i].y = rnd(1000.0f) - 500;
// temp_s[i].z = rnd(1000.0f) - 500;
// temp_s[i].radius = rnd(10.0f) + 5;
//
// //
// temp_s[i].dx = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// temp_s[i].dy = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// temp_s[i].dz = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// }
//
//#ifdef USE_CONST_MEM
// HANDLE_ERROR(hipMemcpyToSymbol(d_spheres, temp_s, sizeof(Sphere) * SPHERES));
//#else
// HANDLE_ERROR(hipMemcpy(d_spheres, temp_s, sizeof(Sphere)*SPHERES, hipMemcpyHostToDevice));
//#endif
//
// free(temp_s);
//
// // display
// bitmap.anim_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup);
//} | c76be60774702a712a80c1a9729ebee8a3f79606.cu | //#include <cuda_runtime.h>
//#include "device_launch_parameters.h"
//#include <helper_cuda.h>
////#include "sm_20_atomic_functions.h"
//
//#include <thrust/host_vector.h>
//#include <thrust/device_vector.h>
//#include <thrust/count.h>
//#include <stdio.h>
//
//#define REAL float
////#define USE_CONST_MEM
//#define HANDLE_ERROR checkCudaErrors
//
//float elapsedTime;
//#define START_GPU {\
//elapsedTime = 0.0;\
//cudaEvent_t start, stop;\
//checkCudaErrors(cudaEventCreate(&start)); \
//checkCudaErrors(cudaEventCreate(&stop));\
//checkCudaErrors(cudaEventRecord(start, 0));\
//
//#define END_GPU \
//checkCudaErrors(cudaEventRecord(stop, 0));\
//checkCudaErrors(cudaEventSynchronize(stop));\
//checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); \
//printf("GPU Time used: %3.2f ms\n", elapsedTime);\
//checkCudaErrors(cudaEventDestroy(start));\
//checkCudaErrors(cudaEventDestroy(stop));}
//
//#define START_CPU {\
//double start = omp_get_wtime();
//
//#define END_CPU \
//double end = omp_get_wtime();\
//double duration = end - start;\
//printf("CPU Time used: %3.1f ms\n", duration * 1000);}
//
////############################################################################
//#ifdef _WIN64
//#define GLUT_NO_LIB_PRAGMA
//#pragma comment (lib, "opengl32.lib")
//#pragma comment (lib, "glut64.lib")
//#endif //_WIN64
//
///* On Windows, include the local copy of glut.h and glext.h */
//#include "GL/glut.h"
//#include "GL/glext.h"
//#define GET_PROC_ADDRESS( str ) wglGetProcAddress( str )
//
////----------------------封装了bitmap类------------------------------
//struct CPUAnimBitmap {
// //数据内容
// unsigned char *pixels;
// int width, height;
// //一个指针
// void *dataBlock;
//
// //可以动态的设置函数的指针
// void(*fAnim)(void*, int);
// void(*animExit)(void*);
// void(*clickDrag)(void*, int, int, int, int);
// int dragStartX, dragStartY;
//
// CPUAnimBitmap(int w, int h, void *d = NULL) {
// width = w;
// height = h;
// //r g b alph
// pixels = new unsigned char[width * height * 4];
// dataBlock = d;
// clickDrag = NULL;
// }
//
// ~CPUAnimBitmap() {
// delete[] pixels;
// }
//
// unsigned char* get_ptr(void) const { return pixels; }
// long image_size(void) const { return width * height * 4; }
//
// void click_drag(void(*f)(void*, int, int, int, int)) {
// clickDrag = f;
// }
//
// //渲染这个图片
// //input: f就是使用GPU计算得到bitmap的图片的函数
// // e是cuda 清理函数
// void anim_and_exit(void(*f)(void*, int), void(*e)(void*)) {
// CPUAnimBitmap** bitmap = get_bitmap_ptr();
// *bitmap = this;
// fAnim = f;
// animExit = e;
// // a bug in the Windows GLUT implementation prevents us from
// // passing zero arguments to glutInit()
// int c = 1;
// char* dummy = "";
// glutInit(&c, &dummy);
// glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
// glutInitWindowSize(width, height);
// glutCreateWindow("bitmap");
// glutKeyboardFunc(Key);
// glutDisplayFunc(Draw);
//
// if (clickDrag != NULL)
// glutMouseFunc(mouse_func);
//
// //glutIdleFunc设置全局的回调函数,当没有窗口事件到达时,
// //GLUT程序功能可以执行后台处理任务或连续动画。
// //如果启用,这个idle function会被不断调用,直到有窗口事件发生。
// glutIdleFunc(idle_func);
// glutMainLoop();
// }
//
// // static method used for glut callbacks
// static CPUAnimBitmap** get_bitmap_ptr(void) {
// static CPUAnimBitmap* gBitmap;
// return &gBitmap;
// }
//
// // static method used for glut callbacks
// static void mouse_func(int button, int state,
// int mx, int my) {
// if (button == GLUT_LEFT_BUTTON) {
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// if (state == GLUT_DOWN) {
// bitmap->dragStartX = mx;
// bitmap->dragStartY = my;
// }
// else if (state == GLUT_UP) {
// bitmap->clickDrag(bitmap->dataBlock,
// bitmap->dragStartX,
// bitmap->dragStartY,
// mx, my);
// }
// }
// }
//
// // static method used for glut callbacks
// static void idle_func(void) {
// static int ticks = 1;
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// bitmap->fAnim(bitmap->dataBlock, ticks++);
// glutPostRedisplay();
// }
//
// // static method used for glut callbacks
// static void Key(unsigned char key, int x, int y) {
// switch (key) {
// case 27:
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// bitmap->animExit(bitmap->dataBlock);
// //delete bitmap;
// exit(0);
// }
// }
//
// // static method used for glut callbacks
// static void Draw(void) {
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// glClearColor(0.0, 0.0, 0.0, 1.0);
// glClear(GL_COLOR_BUFFER_BIT);
// glDrawPixels(bitmap->width, bitmap->height, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels);
// glutSwapBuffers();
// }
//};
//
////图片的像素值
//#define DIM 1024
//#define rnd( x ) (x * rand() / RAND_MAX)
//#define INF 2e10f
//
////----------------------------封装了一个球-------------------------------
//struct Sphere {
// REAL r, b, g;
// REAL radius;
// //小球的位置
// REAL x, y, z;
// //每一帧小球的移动的速度
// REAL dx, dy, dz;
// bool isCrash;
// //来自于 ox,oy处的像素的光线,是否和这个球体相交。
// //如果光线与球面相交,那么这个方法将计算从相机到光线命中球面出的距离。
// //若果和多个球面相交,只记录最接近相机的球面才会被看见。
// __device__ REAL hit(REAL ox, REAL oy, REAL *n) {
// REAL dx = ox - x;
// REAL dy = oy - y;
// //距离小于球体的半径的时候,才能和球体相交
// if (dx*dx + dy*dy < radius*radius) {
// REAL dz = sqrtf(radius*radius - dx*dx - dy*dy);
// *n = dz / sqrtf(radius * radius);
// return dz + z;
// }
// //无穷远
// return -INF;
// }
//};
//
////------------小球碰撞的个数----------
//#define SPHERES 2000
//
//int *d_crashnum, *h_crashnum;
//
//#ifdef USE_CONST_MEM
//__constant__ Sphere d_spheres[SPHERES];
//#else
//Sphere *d_spheres;
//#endif
//
////------------------------cuda kernel --------------------------
//
//#define STEP_SIZE REAL(20.0)
//
////监测碰撞的小球的个数
//__global__ void crash(Sphere *s, int num_sphere, int*d_crashnum)
//{
// //得到两个碰撞小球的序号
// int s1 = threadIdx.x + blockIdx.x * blockDim.x;
// int s2 = threadIdx.y + blockIdx.y * blockDim.y;
//
// //对序号为x,y的两个小球进行碰撞监测,对称矩阵,计算一半的矩阵
// if (s1 < num_sphere && s2 < num_sphere && s1 < s2)
// {
// REAL dx = s[s1].x - s[s2].x;
// REAL dy = s[s1].y - s[s2].y;
// REAL dz = s[s1].z - s[s2].z;
// REAL totalRadius = s[s1].radius + s[s2].radius;
// //判断是否碰撞
// if (dx*dx + dy*dy + dz*dz <= totalRadius * totalRadius)
// {
// s[s1].isCrash = true;
// s[s2].isCrash = true;
// atomicAdd(d_crashnum, 1);
// }
// }
//}
//
//__global__ void crashWithCountList(Sphere *s, int num_sphere, int * d_crashNumList)
//{
// //得到两个碰撞小球的序号
// int s1 = threadIdx.x + blockIdx.x * blockDim.x;
// int s2 = threadIdx.y + blockIdx.y * blockDim.y;
//
// //对序号为x,y的两个小球进行碰撞监测,对称矩阵,计算一半的矩阵
// if (s1 < num_sphere && s2 < num_sphere && s1 < s2)
// {
// REAL dx = s[s1].x - s[s2].x;
// REAL dy = s[s1].y - s[s2].y;
// REAL dz = s[s1].z - s[s2].z;
// REAL totalRadius = s[s1].radius + s[s2].radius;
// //判断是否碰撞
// if (dx*dx + dy*dy + dz*dz <= totalRadius * totalRadius)
// {
// //int i = countCrashNumList[s1];
// s[s1].isCrash = true;
// s[s2].isCrash = true;
// //i++;
// //countCrashNumList[s1]=i;
// atomicAdd(&(d_crashNumList[s1]), 1);
// }
// }
//}
//__global__ void copyCrashCount(int *countCrashNumList,
// int * d_crashNumList)
//{
// //得到两个碰撞小球的序号
// int x = threadIdx.x + blockIdx.x * blockDim.x;
//
// //对序号为x,y的两个小球进行碰撞监测,对称矩阵,计算一半的矩阵
// if (x < SPHERES)
// {
// countCrashNumList[x] = d_crashNumList[x];
// }
//}
//
////更新球体所在的位置
//__global__ void kernelMoving(Sphere *s, int len)
//{
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// //对第x 个球体,更新它所在的位置
// while (x < len) {
//
// s[x].isCrash = false;
// s[x].x += s[x].dx;
// s[x].y += s[x].dy;
// s[x].z += s[x].dz;
// x += gridDim.x*blockDim.x;
// }
//}
//
//#ifdef USE_CONST_MEM
//__global__ void kernel(unsigned char *ptr) {
//#else
//__global__ void kernel(Sphere *d_spheres, unsigned char *ptr) {
//#endif
// //得到pixel 的像素的位置。
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
// //这是第几个像素
// int offset = x + y * blockDim.x * gridDim.x;
// REAL ox = (x - DIM / 2);
// REAL oy = (y - DIM / 2);
//
// REAL r = 0, g = 0, b = 0;
// REAL maxz = -INF;
// for (int i = 0; i < SPHERES; i++) {
// REAL n;
// REAL t = d_spheres[i].hit(ox, oy, &n);
// if (t > maxz) {
// REAL fscale = n;
// if (d_spheres[i].isCrash)
// {
// r = 1.0f *fscale;
// g = 0.0f*fscale;
// b = 0.0f*fscale;
// }
// else
// {
// r = d_spheres[i].r * fscale;
// g = d_spheres[i].g * fscale;
// b = d_spheres[i].b * fscale;
// maxz = t;
// }
// }
// }
//
// ptr[offset * 4 + 0] = (int)(r * 255);
// ptr[offset * 4 + 1] = (int)(g * 255);
// ptr[offset * 4 + 2] = (int)(b * 255);
// ptr[offset * 4 + 3] = 255;
//}
//
//// globals needed by the update routine
//struct DataBlock {
// //存放 gpu 中的bitmap 的数据
// unsigned char *dev_bitmap;
// //cpu中存放bitmap 的数据
// CPUAnimBitmap *bitmap;
//};
//
//void generate_frame(DataBlock *d, int ticks) {
//
// // initialize all integers of a device_vector to 0
// /*int * d_crashNumList;
// cudaMalloc(&d_crashNumList, sizeof(int)* SPHERES);
// cudaMemset(d_crashNumList, 0, sizeof(int)* SPHERES);*/
//
//
// float totalTime = 0.0;
// //把小球的碰撞的计数器清0
// HANDLE_ERROR(cudaMemset(d_crashnum, 0, sizeof(int)));
// //把小球的个数 copy到host 中,并打印出来
// HANDLE_ERROR(cudaMemcpy(h_crashnum, d_crashnum,
// sizeof(int), cudaMemcpyDeviceToHost));
// printf("init num of crash: %d\n", (*h_crashnum));
//
// START_GPU
//
// //------------移动的小球 --2000个 ----------------
// kernelMoving << <64, 32 >> > (d_spheres, SPHERES);
// END_GPU
// totalTime += elapsedTime;
// thrust::device_ptr<int> countCrashNumList = thrust::device_malloc<int>(SPHERES);
// START_GPU
// //--------------监测小球的碰撞------------------
// //SPHERES 是2000 的时候 grid 64 * 64
// //dim3 crashGrids(64, 64);
// dim3 crashGrids(64, 64);
// dim3 crashBlock(32, 32);
// //方案一
// //crash << <crashGrids, crashBlock >> > (d_spheres, SPHERES, d_crashnum);
// //方案二
// int * raw_ptr = thrust::raw_pointer_cast(countCrashNumList);
// crashWithCountList << <crashGrids, crashBlock >> > (d_spheres, SPHERES, raw_ptr);
//
//
// //thrust::device_vector<int> countCrashNumList(SPHERES, 0);
// END_GPU
// START_GPU
// thrust::device_ptr<int> first = countCrashNumList;
// thrust::device_ptr<int> last = countCrashNumList + SPHERES;
// //copyCrashCount << <64, 32 >> > (raw_ptr, d_crashNumList);
// int sum = thrust::reduce(first, last, (int)0, thrust::plus<int>());
// printf("num of pair sphere (thrust): %d\n", sum);
//
// END_GPU
// totalTime += elapsedTime;
//
// //-----------从小球数据中生成一张的 bitmap--------
// START_GPU
// dim3 grids(DIM / 16, DIM / 16);
// dim3 threads(16, 16);
//#ifdef USE_CONST_MEM
// kernel << <grids, threads >> > (d->dev_bitmap);
//#else
// kernel << <grids, threads >> > (d_spheres, d->dev_bitmap);
//#endif
//
// END_GPU
// totalTime += elapsedTime;
//
// //-----把bitmap 的数据从 device 拷贝到 host 中-----------
// HANDLE_ERROR(cudaMemcpy(d->bitmap->get_ptr(), d->dev_bitmap,
// d->bitmap->image_size(), cudaMemcpyDeviceToHost));
//
// HANDLE_ERROR(cudaMemcpy(h_crashnum, d_crashnum,
// sizeof(int), cudaMemcpyDeviceToHost));
// //printf("num of pair sphere crash: %d\n", (*h_crashnum));
// printf("total time: %3.1f\n", totalTime);
// printf("---------------------------------------------\n");
//
//}
//
//// clean up memory allocated on the GPU
//void cleanup(DataBlock *d) {
// HANDLE_ERROR(cudaFree(d->dev_bitmap));
// //释放小球碰撞个数的空间
// HANDLE_ERROR(cudaFree(d_crashnum));
// free(h_crashnum);
//}
//
////-------------------------main-------------------------------
//
//int main(void) {
// //---------分配图片的空间----------
// DataBlock data;
// CPUAnimBitmap bitmap(DIM, DIM, &data);
// data.bitmap = &bitmap;
//
// //分配小球碰撞的计数器的空间
// h_crashnum = (int *)malloc(sizeof(int));
// *h_crashnum = 0;
//
// HANDLE_ERROR(cudaMalloc((void**)&d_crashnum, sizeof(int)));
// HANDLE_ERROR(cudaMemcpy(d_crashnum, h_crashnum,
// sizeof(int), cudaMemcpyHostToDevice));
// /*HANDLE_ERROR(cudaMemset(d_crashnum, 0, sizeof(int)));
// int temp = 100;
// HANDLE_ERROR(cudaMemcpy(&temp, d_crashnum,
// sizeof(int), cudaMemcpyDeviceToHost));
// printf("num of sphere crash: %d\n", temp);*/
//
//
// //---------分配gpu空间-------------
// HANDLE_ERROR(cudaMalloc((void**)&data.dev_bitmap, bitmap.image_size()));
//
//#ifdef USE_CONST_MEM
//#else
// HANDLE_ERROR(cudaMalloc((void**)&d_spheres, sizeof(Sphere) * SPHERES));
//#endif
//
// // allocate temp memory, initialize it, copy to constant
// // memory on the GPU, then free our temp memory
// Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
// for (int i = 0; i < SPHERES; i++) {
// temp_s[i].r = rnd(1.0f);
// temp_s[i].g = rnd(1.0f);
// temp_s[i].b = rnd(1.0f);
//
// temp_s[i].x = rnd(1000.0f) - 500;
// temp_s[i].y = rnd(1000.0f) - 500;
// temp_s[i].z = rnd(1000.0f) - 500;
// temp_s[i].radius = rnd(10.0f) + 5;
//
// //初始化 小球移动的速度
// temp_s[i].dx = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// temp_s[i].dy = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// temp_s[i].dz = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// }
//
//#ifdef USE_CONST_MEM
// HANDLE_ERROR(cudaMemcpyToSymbol(d_spheres, temp_s, sizeof(Sphere) * SPHERES));
//#else
// HANDLE_ERROR(cudaMemcpy(d_spheres, temp_s, sizeof(Sphere)*SPHERES, cudaMemcpyHostToDevice));
//#endif
//
// free(temp_s);
//
// // display
// bitmap.anim_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup);
//} |
10f7774cebb60b0e39d361c2663d86d2ef087807.hip | // !!! This is a file automatically generated by hipify!!!
/*********************** LDL Decomposition using CUDA ************************/
/*********************** Name: Balaji Rajasekaran ******************************/
/*********************** UFID:1918-2684 ****************************************/
/*********************** Email : [email protected] **************************/
/*********************** Team-number : 14***************************************/
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include<math.h>
#include<time.h>
#include<cusolverDn.h>
#include <rocblas.h>
#include<sys/time.h>
#define EPISILON (0.0001)
#define MAX_VALUE (1e6)
//function to compute the execution time
double timerval()
{
struct timeval st;
gettimeofday(&st, NULL);
return (st.tv_sec+st.tv_usec*1e-6);
}
//function to generate a positive definite matrix
void init(int n, double* A, double* B)
{
int i, j, k;
//creating a random matrix
for(i=0; i<n; i++)
{
for(j=i; j<n; j++)
{
double r = rand() % 100;
A[i*n+j] = r;
A[j*n+i] = A[i*n+j];
}
}
//converting to positive definite matrix
for(i=0; i<n; i++)
{
for(j=0; j<n; j++)
{
double s = 0;
for(k=0; k<n; k++)
{
s += A[i*n+k]*A[j*n+k];
}
B[i*n+j] = s;
}
}
}
//Main function
int main()
{
double startTime = 0;
double endTime = 0;
int i, j, n;
int info;
cusolverStatus_t buffer_Status, solve_status;
for(i=1; i<13; i++)
{
n = pow(2, i); //Run the routine for matrix of order 2 to 4096
double *A = (double *)malloc(n* n * sizeof(double));
double *B = (double *)malloc(n* n * sizeof(double));
init(n, A, B); //initialize matrix function call
double *M;
hipMalloc(&M, n* n * sizeof(double)); //Allocate memory for M in GPU
hipMemcpy(M, B, n * n * sizeof(double), hipMemcpyHostToDevice); //Copy contents of the initialised array from host to device memory
int *devInfo;
hipMalloc(&devInfo, sizeof(int));
hipsolverDnHandle_t handle; //Initializing the CUDA solver
hipsolverDnCreate(&handle);
int work_size = 0;
buffer_status = hipsolverDnDsytrf_bufferSize(handle, n, M, n, &work_size)); //CUDA sytrf initialization
double *work;
hipMalloc(&work, work_size * sizeof(double));
int I[n];
startTime = timerval();
for(j=0; j<1000; j++) //Running the code 1000 times
{
solve_status = hipsolverDnDsytrf(handle, HIPBLAS_FILL_MODE_LOWER, n, M, n, I, work, work_size, devInfo); //CUDA sytrf function execution
}
endTime = timerval();
freopen("ldl_CUDA_results.txt","a",stdout);
printf("\n The computation time for %d order matrix is : %f \n", n, ((endTime - startTime)/1000)); //Print the execution time
hipsolverDnDestroy(handle);
hipFree(M);
free(B);
free(A);
}
return 0;
}
| 10f7774cebb60b0e39d361c2663d86d2ef087807.cu | /*********************** LDL Decomposition using CUDA ************************/
/*********************** Name: Balaji Rajasekaran ******************************/
/*********************** UFID:1918-2684 ****************************************/
/*********************** Email : [email protected] **************************/
/*********************** Team-number : 14***************************************/
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include<math.h>
#include<time.h>
#include<cusolverDn.h>
#include <cublas_v2.h>
#include<sys/time.h>
#define EPISILON (0.0001)
#define MAX_VALUE (1e6)
//function to compute the execution time
double timerval()
{
struct timeval st;
gettimeofday(&st, NULL);
return (st.tv_sec+st.tv_usec*1e-6);
}
//function to generate a positive definite matrix
void init(int n, double* A, double* B)
{
int i, j, k;
//creating a random matrix
for(i=0; i<n; i++)
{
for(j=i; j<n; j++)
{
double r = rand() % 100;
A[i*n+j] = r;
A[j*n+i] = A[i*n+j];
}
}
//converting to positive definite matrix
for(i=0; i<n; i++)
{
for(j=0; j<n; j++)
{
double s = 0;
for(k=0; k<n; k++)
{
s += A[i*n+k]*A[j*n+k];
}
B[i*n+j] = s;
}
}
}
//Main function
int main()
{
double startTime = 0;
double endTime = 0;
int i, j, n;
int info;
cusolverStatus_t buffer_Status, solve_status;
for(i=1; i<13; i++)
{
n = pow(2, i); //Run the routine for matrix of order 2 to 4096
double *A = (double *)malloc(n* n * sizeof(double));
double *B = (double *)malloc(n* n * sizeof(double));
init(n, A, B); //initialize matrix function call
double *M;
cudaMalloc(&M, n* n * sizeof(double)); //Allocate memory for M in GPU
cudaMemcpy(M, B, n * n * sizeof(double), cudaMemcpyHostToDevice); //Copy contents of the initialised array from host to device memory
int *devInfo;
cudaMalloc(&devInfo, sizeof(int));
cusolverDnHandle_t handle; //Initializing the CUDA solver
cusolverDnCreate(&handle);
int work_size = 0;
buffer_status = cusolverDnDsytrf_bufferSize(handle, n, M, n, &work_size)); //CUDA sytrf initialization
double *work;
cudaMalloc(&work, work_size * sizeof(double));
int I[n];
startTime = timerval();
for(j=0; j<1000; j++) //Running the code 1000 times
{
solve_status = cusolverDnDsytrf(handle, CUBLAS_FILL_MODE_LOWER, n, M, n, I, work, work_size, devInfo); //CUDA sytrf function execution
}
endTime = timerval();
freopen("ldl_CUDA_results.txt","a",stdout);
printf("\n The computation time for %d order matrix is : %f \n", n, ((endTime - startTime)/1000)); //Print the execution time
cusolverDnDestroy(handle);
cudaFree(M);
free(B);
free(A);
}
return 0;
}
|
b95f59b12765678f63793d0478e1fd2de9043b79.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
/*! \file ReactionFieldDriverPotentialPairGPU.cu
\brief Defines the driver functions for computing all types of pair forces on the GPU
*/
#include "EvaluatorPairReactionField.h"
#include "AllDriverPotentialPairGPU.cuh"
hipError_t gpu_compute_reaction_field_forces(const pair_args_t & args,
const Scalar3 *d_params)
{
return gpu_compute_pair_forces<EvaluatorPairReactionField>(args,
d_params);
}
| b95f59b12765678f63793d0478e1fd2de9043b79.cu | // Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
/*! \file ReactionFieldDriverPotentialPairGPU.cu
\brief Defines the driver functions for computing all types of pair forces on the GPU
*/
#include "EvaluatorPairReactionField.h"
#include "AllDriverPotentialPairGPU.cuh"
cudaError_t gpu_compute_reaction_field_forces(const pair_args_t & args,
const Scalar3 *d_params)
{
return gpu_compute_pair_forces<EvaluatorPairReactionField>(args,
d_params);
}
|
bef2f5e7af75fcbbf76a82347a29a9c90b9eba55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
unsigned absX = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned absY = (blockIdx.y * blockDim.y) + threadIdx.y;
unsigned offset = (absY * numCols) + absX;
const uchar4 rgba = rgbaImage[offset];
float intensity = (.299f * rgba.x) + (.587f * rgba.y) + (.114f * rgba.z);
greyImage[offset] = intensity;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize( 1, 1, 1); //TODO
const dim3 gridSize( numCols, numRows, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| bef2f5e7af75fcbbf76a82347a29a9c90b9eba55.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
unsigned absX = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned absY = (blockIdx.y * blockDim.y) + threadIdx.y;
unsigned offset = (absY * numCols) + absX;
const uchar4 rgba = rgbaImage[offset];
float intensity = (.299f * rgba.x) + (.587f * rgba.y) + (.114f * rgba.z);
greyImage[offset] = intensity;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize( 1, 1, 1); //TODO
const dim3 gridSize( numCols, numRows, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
78275733b930def0a36a2ee6e14a70122c4d18d1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include "cuHeader.h"
#include <math.h>
#include <hip/hip_runtime.h>
void initRand(){
time_t t;
srand((unsigned) time(&t));
}
void initData(float *ip,int size){
for (int i=0;i<size;i++){
ip[i] = (float)( rand() & 0xFF )/256*10;
}
}
/// using malloced nElem*3*sizeof(float) buffer,write uniform particles postiion.
/// and Mold buffer pointer to V3Buf
/// nElem must be 4(k)^3 !!! now Implemention is nElem == 256
V3Buf CreateUniformParticles(float* buf,float rho,int nElem,float* p_length){
int nloop = (int)powf((float)(nElem/4)+0.1f,1.0f/3);
printf("%d\n",nloop);
float length = powf(nElem/rho,1.0f/3);
*p_length = length;
float a = length/nloop;
float ah = a/2;
float px[4] = {0,0,ah,ah};
float py[4] = {0,ah,ah,0};
float pz[4] = {0,ah,0,ah};
float *h_x,*h_y,*h_z;
h_x = buf;
h_y = h_x + nElem;
h_z = h_y + nElem;
for (int i=0; i<nElem;i++){
h_x[i]=0;
h_y[i]=0;
h_z[i]=0;
}
int i=0;
for (int ix = 0;ix<nloop;ix++){
for (int iy = 0;iy<nloop;iy++){
for(int iz=0;iz<nloop;iz++){
for (int ia=0;ia<4;ia++){
h_x[i] = px[ia] + ix * a;
h_y[i] = py[ia] + iy * a;
h_z[i] = pz[ia] + iz * a;
i++;
}
}
}
}
V3Buf v3Buf = {h_x,h_y,h_z,nElem};
return v3Buf;
}
/// using malloced nElem*3*sizeof(float) buffer,write random particle position.
/// and Mold buffer pointer to V3Buf
/// nElem must be integer.
V3Buf CreateRandomVelocity(float* buf,int nElem){
float *h_x,*h_y,*h_z;
h_x = buf;
h_y = h_x + nElem;
h_z = h_y + nElem;
for(int i=0;i<nElem;i++){
float x = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
float y = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
float z = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
h_x[i]=x;
h_y[i]=y;
h_z[i]=z;
}
V3Buf v3Buf = {h_x,h_y,h_z,nElem};
return v3Buf;
}
V3Buf CalculateForce(float *force,float *pos,int nElem,float length,double *potential){
float *h_fx,*h_fy,*h_fz,*h_px,*h_py,*h_pz;
h_fx = force;
h_fy = h_fx + nElem;
h_fz = h_fy + nElem;
h_px = pos;
h_py = h_px + nElem;
h_pz = h_py + nElem;
for (int i=0;i<nElem*3;i++){//SYOKIKA
force[i]=0.0f;
}
*potential = 0.0;
float eps = 1.0f;
float sigma = 1.0f;
float ce12 = 4.0f*eps*powf(sigma,12);
float ce06 = 4.0f*eps*powf(sigma, 6);
float cf12 = ce12 * 12.0f;
float cf06 = ce06 * 6.0f;
for (int j=0;j<nElem;j++){
for (int i=0;i<j;i++){
float dx,dy,dz,r2,r2i,r06i,r12i,fc,fx,fy,fz;
dx = h_px[i]-h_px[j];
dy = h_py[i]-h_py[j];
dz = h_pz[i]-h_pz[j];
if(dx<-length/2) dx+=length;
if(dx> length/2) dx-=length;
if(dy<-length/2) dy+=length;
if(dy> length/2) dy-=length;
if(dz<-length/2) dz+=length;
if(dz> length/2) dz-=length;
r2 = dx*dx+dy*dy+dz*dz;
if (r2 > 4*4)continue;
r2i = 1.0f/r2;
r06i = r2i * r2i * r2i;
r12i = r06i * r06i;
*potential += (ce12 * r12i - ce06 * r06i);
fc = (cf12*r12i-cf06*r06i)*r2i;
fx = fc * dx;
fy = fc * dy;
fz = fc * dz;
h_fx[i]+=fx;
h_fy[i]+=fy;
h_fz[i]+=fz;
h_fx[j]-=fx;
h_fy[j]-=fy;
h_fz[j]-=fz;
}
}
V3Buf v3Buf = {h_fx,h_fy,h_fz,nElem};
return v3Buf;
}
double CalculateHamiltonian(float* pos,float* vel,int nElem,double potential){
float *h_px,*h_py,*h_pz,*h_vx,*h_vy,*h_vz;
h_px = pos;
h_py = h_px + nElem;
h_pz = h_py + nElem;
h_vx = vel;
h_vy = h_vx + nElem;
h_vz = h_vy + nElem;
double energy = 0.0;
for(int i=0;i<nElem;i++){
float px=h_px[i],py=h_py[i],pz=h_pz[i];
float vx=h_vx[i],vy=h_vy[i],vz=h_vz[i];
float r = sqrtf(px*px+py*py+pz*pz);
float v2= vx*vx+vy*vy+vz*vz;
energy += (double)(v2/2);
}
printf("%lf %lf %lf\n",energy+potential,energy,potential);
return energy+potential;
}
void SwapFloatPointer(float** a,float **b){
float *tmp = *b;
*b = *a;
*a = tmp;
}
__global__ void CalculateForce_GPUNaive(float *force,float *pos,int nElem,float length,float *pot){
int idx = threadIdx.x + blockIdx.x * blockDim.x; //1dim grid ,1im block
float *d_fx,*d_fy,*d_fz,*d_px,*d_py,*d_pz;
d_fx = force;
d_fy = d_fx + nElem;
d_fz = d_fy + nElem;
d_px = pos;
d_py = d_px + nElem;
d_pz = d_py + nElem;
//SYOKIKA
float t_fx=0.0f,t_fy=0.0f,t_fz=0.0f;
double potential = 0.0; //must implement to calculate Hamiltoniam...
//Is it better to load this constants from ConstantMemory?
float eps = 1.0f;
float sigma = 1.0f;
float ce12 = 4.0f*eps*powf(sigma,12);
float ce06 = 4.0f*eps*powf(sigma, 6);
float cf12 = ce12 * 12.0f;
float cf06 = ce06 * 6.0f;
int i = idx;
for (int j=0;j<nElem;j++){// j->i
if (i==j)continue;
float dx,dy,dz,r2,r2i,r06i,r12i,fc,fx,fy,fz;
dx = d_px[i]-d_px[j];
dy = d_py[i]-d_py[j];
dz = d_pz[i]-d_pz[j];
if(dx<-length/2) dx+=length;
if(dx> length/2) dx-=length;
if(dy<-length/2) dy+=length;
if(dy> length/2) dy-=length;
if(dz<-length/2) dz+=length;
if(dz> length/2) dz-=length;
r2 = dx*dx+dy*dy+dz*dz;
//if (r2 > 4*4)continue; //Cut force with far from cut radius;this may be MEANINGLESS in GPU.
r2i = 1.0f/r2;
r06i = r2i * r2i * r2i;
r12i = r06i * r06i;
potential += (ce12 * r12i - ce06 * r06i);
fc = (cf12*r12i-cf06*r06i)*r2i;
fx = fc * dx;
fy = fc * dy;
fz = fc * dz;
t_fx+=fx;
t_fy+=fy;
t_fz+=fz;
}
d_fx[i]=t_fx;
d_fy[i]=t_fy;
d_fz[i]=t_fz;
pot[i]=(float)potential;
}
void cuMain(void (*grpc)(V3Buf buf) ){
initRand();
//Buffer Initialization (CPU)
int nElem = 256*8;
int nBytes = nElem * sizeof(float);
float *h_p,*h_v,*h_f,*h_fd,*h_pot;
h_p = (float*)malloc(nBytes*3);
h_v = (float*)malloc(nBytes*3);
h_f = (float*)malloc(nBytes*3);
h_fd= (float*)malloc(nBytes*3);
h_pot = (float*)malloc(nBytes);
//Buffer Initialization (GPU)
float *d_p,*d_f,*d_pot;
hipMalloc(&d_p,nBytes*3);
hipMalloc(&d_f,nBytes*3);
hipMalloc(&d_pot,nBytes);
float *h_df; //Test Buf;
h_df = (float *)malloc(nBytes*3);
//Buffer Setting
float length;
V3Buf h_v3pos = CreateUniformParticles(h_p,1.0f,nElem,&length);
V3Buf h_v3vel = CreateRandomVelocity(h_v,nElem);
for (int i=0;i<nElem;i++){
h_v[i]*=10.0f;
}
double potential;
CalculateForce(h_f,h_p,nElem,length,&potential);//Zeroth Calculation;
float dt = 0.005;
int it = 0;
while(true){
//Graphics Functon(External) :transfer postion buffer to graphics function;
if (it%1==0) (*grpc)(h_v3pos);
for (int i=0;i<nElem*3;i++){
float p = h_p[i];
p+=dt*(h_v[i]+0.5f*dt*h_f[i]);
p = p- floorf(p/length)*length;
h_p[i] = p;
}
SwapFloatPointer(&h_f,&h_fd);
{
hipMemcpy(d_p,h_p,nBytes*3,hipMemcpyHostToDevice);
dim3 block(32);
dim3 grid((nElem+block.x-1)/block.x);
hipLaunchKernelGGL(( CalculateForce_GPUNaive), dim3(grid),dim3(block), 0, 0, d_f,d_p,nElem,length,d_pot);
hipMemcpy(h_f,d_f,nBytes*3,hipMemcpyDeviceToHost);
hipMemcpy(h_pot,d_pot,nBytes,hipMemcpyDeviceToHost);
//CalculateForce(h_f,h_p,nElem,length,&potential);
//printf("%f,%f\n",h_f[6000],h_df[6000]);
}
for (int i=0;i<nElem*3;i++){
h_v[i]+=dt*0.5*(h_f[i]+h_fd[i]);
}
potential = 0;
for (int i=0;i<nElem;i++){
potential += h_pot[i];
}
potential /=2.;
CalculateHamiltonian(h_p,h_v,nElem,potential);
it++;
}
} | 78275733b930def0a36a2ee6e14a70122c4d18d1.cu | #include <stdio.h>
#include <time.h>
#include "cuHeader.h"
#include <math.h>
#include <cuda_runtime.h>
void initRand(){
time_t t;
srand((unsigned) time(&t));
}
void initData(float *ip,int size){
for (int i=0;i<size;i++){
ip[i] = (float)( rand() & 0xFF )/256*10;
}
}
/// using malloced nElem*3*sizeof(float) buffer,write uniform particles postiion.
/// and Mold buffer pointer to V3Buf
/// nElem must be 4(k)^3 !!! now Implemention is nElem == 256
V3Buf CreateUniformParticles(float* buf,float rho,int nElem,float* p_length){
int nloop = (int)powf((float)(nElem/4)+0.1f,1.0f/3);
printf("%d\n",nloop);
float length = powf(nElem/rho,1.0f/3);
*p_length = length;
float a = length/nloop;
float ah = a/2;
float px[4] = {0,0,ah,ah};
float py[4] = {0,ah,ah,0};
float pz[4] = {0,ah,0,ah};
float *h_x,*h_y,*h_z;
h_x = buf;
h_y = h_x + nElem;
h_z = h_y + nElem;
for (int i=0; i<nElem;i++){
h_x[i]=0;
h_y[i]=0;
h_z[i]=0;
}
int i=0;
for (int ix = 0;ix<nloop;ix++){
for (int iy = 0;iy<nloop;iy++){
for(int iz=0;iz<nloop;iz++){
for (int ia=0;ia<4;ia++){
h_x[i] = px[ia] + ix * a;
h_y[i] = py[ia] + iy * a;
h_z[i] = pz[ia] + iz * a;
i++;
}
}
}
}
V3Buf v3Buf = {h_x,h_y,h_z,nElem};
return v3Buf;
}
/// using malloced nElem*3*sizeof(float) buffer,write random particle position.
/// and Mold buffer pointer to V3Buf
/// nElem must be integer.
V3Buf CreateRandomVelocity(float* buf,int nElem){
float *h_x,*h_y,*h_z;
h_x = buf;
h_y = h_x + nElem;
h_z = h_y + nElem;
for(int i=0;i<nElem;i++){
float x = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
float y = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
float z = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
h_x[i]=x;
h_y[i]=y;
h_z[i]=z;
}
V3Buf v3Buf = {h_x,h_y,h_z,nElem};
return v3Buf;
}
V3Buf CalculateForce(float *force,float *pos,int nElem,float length,double *potential){
float *h_fx,*h_fy,*h_fz,*h_px,*h_py,*h_pz;
h_fx = force;
h_fy = h_fx + nElem;
h_fz = h_fy + nElem;
h_px = pos;
h_py = h_px + nElem;
h_pz = h_py + nElem;
for (int i=0;i<nElem*3;i++){//SYOKIKA
force[i]=0.0f;
}
*potential = 0.0;
float eps = 1.0f;
float sigma = 1.0f;
float ce12 = 4.0f*eps*powf(sigma,12);
float ce06 = 4.0f*eps*powf(sigma, 6);
float cf12 = ce12 * 12.0f;
float cf06 = ce06 * 6.0f;
for (int j=0;j<nElem;j++){
for (int i=0;i<j;i++){
float dx,dy,dz,r2,r2i,r06i,r12i,fc,fx,fy,fz;
dx = h_px[i]-h_px[j];
dy = h_py[i]-h_py[j];
dz = h_pz[i]-h_pz[j];
if(dx<-length/2) dx+=length;
if(dx> length/2) dx-=length;
if(dy<-length/2) dy+=length;
if(dy> length/2) dy-=length;
if(dz<-length/2) dz+=length;
if(dz> length/2) dz-=length;
r2 = dx*dx+dy*dy+dz*dz;
if (r2 > 4*4)continue;
r2i = 1.0f/r2;
r06i = r2i * r2i * r2i;
r12i = r06i * r06i;
*potential += (ce12 * r12i - ce06 * r06i);
fc = (cf12*r12i-cf06*r06i)*r2i;
fx = fc * dx;
fy = fc * dy;
fz = fc * dz;
h_fx[i]+=fx;
h_fy[i]+=fy;
h_fz[i]+=fz;
h_fx[j]-=fx;
h_fy[j]-=fy;
h_fz[j]-=fz;
}
}
V3Buf v3Buf = {h_fx,h_fy,h_fz,nElem};
return v3Buf;
}
double CalculateHamiltonian(float* pos,float* vel,int nElem,double potential){
float *h_px,*h_py,*h_pz,*h_vx,*h_vy,*h_vz;
h_px = pos;
h_py = h_px + nElem;
h_pz = h_py + nElem;
h_vx = vel;
h_vy = h_vx + nElem;
h_vz = h_vy + nElem;
double energy = 0.0;
for(int i=0;i<nElem;i++){
float px=h_px[i],py=h_py[i],pz=h_pz[i];
float vx=h_vx[i],vy=h_vy[i],vz=h_vz[i];
float r = sqrtf(px*px+py*py+pz*pz);
float v2= vx*vx+vy*vy+vz*vz;
energy += (double)(v2/2);
}
printf("%lf %lf %lf\n",energy+potential,energy,potential);
return energy+potential;
}
void SwapFloatPointer(float** a,float **b){
float *tmp = *b;
*b = *a;
*a = tmp;
}
__global__ void CalculateForce_GPUNaive(float *force,float *pos,int nElem,float length,float *pot){
int idx = threadIdx.x + blockIdx.x * blockDim.x; //1dim grid ,1im block
float *d_fx,*d_fy,*d_fz,*d_px,*d_py,*d_pz;
d_fx = force;
d_fy = d_fx + nElem;
d_fz = d_fy + nElem;
d_px = pos;
d_py = d_px + nElem;
d_pz = d_py + nElem;
//SYOKIKA
float t_fx=0.0f,t_fy=0.0f,t_fz=0.0f;
double potential = 0.0; //must implement to calculate Hamiltoniam...
//Is it better to load this constants from ConstantMemory?
float eps = 1.0f;
float sigma = 1.0f;
float ce12 = 4.0f*eps*powf(sigma,12);
float ce06 = 4.0f*eps*powf(sigma, 6);
float cf12 = ce12 * 12.0f;
float cf06 = ce06 * 6.0f;
int i = idx;
for (int j=0;j<nElem;j++){// j->i
if (i==j)continue;
float dx,dy,dz,r2,r2i,r06i,r12i,fc,fx,fy,fz;
dx = d_px[i]-d_px[j];
dy = d_py[i]-d_py[j];
dz = d_pz[i]-d_pz[j];
if(dx<-length/2) dx+=length;
if(dx> length/2) dx-=length;
if(dy<-length/2) dy+=length;
if(dy> length/2) dy-=length;
if(dz<-length/2) dz+=length;
if(dz> length/2) dz-=length;
r2 = dx*dx+dy*dy+dz*dz;
//if (r2 > 4*4)continue; //Cut force with far from cut radius;this may be MEANINGLESS in GPU.
r2i = 1.0f/r2;
r06i = r2i * r2i * r2i;
r12i = r06i * r06i;
potential += (ce12 * r12i - ce06 * r06i);
fc = (cf12*r12i-cf06*r06i)*r2i;
fx = fc * dx;
fy = fc * dy;
fz = fc * dz;
t_fx+=fx;
t_fy+=fy;
t_fz+=fz;
}
d_fx[i]=t_fx;
d_fy[i]=t_fy;
d_fz[i]=t_fz;
pot[i]=(float)potential;
}
void cuMain(void (*grpc)(V3Buf buf) ){
initRand();
//Buffer Initialization (CPU)
int nElem = 256*8;
int nBytes = nElem * sizeof(float);
float *h_p,*h_v,*h_f,*h_fd,*h_pot;
h_p = (float*)malloc(nBytes*3);
h_v = (float*)malloc(nBytes*3);
h_f = (float*)malloc(nBytes*3);
h_fd= (float*)malloc(nBytes*3);
h_pot = (float*)malloc(nBytes);
//Buffer Initialization (GPU)
float *d_p,*d_f,*d_pot;
cudaMalloc(&d_p,nBytes*3);
cudaMalloc(&d_f,nBytes*3);
cudaMalloc(&d_pot,nBytes);
float *h_df; //Test Buf;
h_df = (float *)malloc(nBytes*3);
//Buffer Setting
float length;
V3Buf h_v3pos = CreateUniformParticles(h_p,1.0f,nElem,&length);
V3Buf h_v3vel = CreateRandomVelocity(h_v,nElem);
for (int i=0;i<nElem;i++){
h_v[i]*=10.0f;
}
double potential;
CalculateForce(h_f,h_p,nElem,length,&potential);//Zeroth Calculation;
float dt = 0.005;
int it = 0;
while(true){
//Graphics Functon(External) :transfer postion buffer to graphics function;
if (it%1==0) (*grpc)(h_v3pos);
for (int i=0;i<nElem*3;i++){
float p = h_p[i];
p+=dt*(h_v[i]+0.5f*dt*h_f[i]);
p = p- floorf(p/length)*length;
h_p[i] = p;
}
SwapFloatPointer(&h_f,&h_fd);
{
cudaMemcpy(d_p,h_p,nBytes*3,cudaMemcpyHostToDevice);
dim3 block(32);
dim3 grid((nElem+block.x-1)/block.x);
CalculateForce_GPUNaive<<<grid,block>>>(d_f,d_p,nElem,length,d_pot);
cudaMemcpy(h_f,d_f,nBytes*3,cudaMemcpyDeviceToHost);
cudaMemcpy(h_pot,d_pot,nBytes,cudaMemcpyDeviceToHost);
//CalculateForce(h_f,h_p,nElem,length,&potential);
//printf("%f,%f\n",h_f[6000],h_df[6000]);
}
for (int i=0;i<nElem*3;i++){
h_v[i]+=dt*0.5*(h_f[i]+h_fd[i]);
}
potential = 0;
for (int i=0;i<nElem;i++){
potential += h_pot[i];
}
potential /=2.;
CalculateHamiltonian(h_p,h_v,nElem,potential);
it++;
}
} |
3a6e10bea2ba83782c6b0e1f4f249c5c62c9cbef.hip | // !!! This is a file automatically generated by hipify!!!
// https://github.com/thrust/thrust/blob/8551c97870cd722486ba7834ae9d867f13e299ad/examples/sum_rows.cu
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random.h>
#include <iostream>
// convert a linear index to a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i)
{
return i / C;
}
};
const int NUM_REPS = 10;
hipEvent_t startEvent, stopEvent;
float ms;
int test(thrust::device_vector<int>& array, int R, int C)
{
// int R = 5; // number of rows
// int C = 8; // number of columns
// allocate storage for row sums and indices
thrust::device_vector<int> row_sums(R);
thrust::device_vector<int> row_indices(R);
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)) + (R*C),
array.begin(),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<int>());
hipEventRecord(startEvent, 0);
for (int i = 0; i < NUM_REPS; i++) {
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)) + (R*C),
array.begin(),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<int>());
}
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&ms, startEvent, stopEvent);
printf("%15.3f\n", (ms / NUM_REPS) * 1e3 );
return 0;
}
void dothatbench(int THEPOWER, int start) {
// int start=0;
int end=THEPOWER;
int total_elems = 1 << THEPOWER;
printf("Benchmarking Thrust %i.%i.%i TotalElems=%i\n",
THRUST_MAJOR_VERSION, THRUST_MINOR_VERSION, THRUST_SUBMINOR_VERSION, total_elems);
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist(10, 99);
// initialize data
thrust::device_vector<int> array(total_elems);
for (size_t i = 0; i < array.size(); i++)
array[i] = dist(rng);
printf("initialized array\n");
for(int powy=start; powy<=end; powy++) {
int powx = THEPOWER-powy;
int num_segments = 1 << powy;
int segment_size = 1 << powx;
char buf[16];
snprintf(buf, 16, "2^%i 2^%i", powy, powx);
printf("%15s", buf);
test(array, num_segments, segment_size);
}
}
int main(int argc, char** argv) {
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
dothatbench(20, 0);
dothatbench(26, 0);
hipEventDestroy(startEvent);
hipEventDestroy(stopEvent);
}
| 3a6e10bea2ba83782c6b0e1f4f249c5c62c9cbef.cu | // https://github.com/thrust/thrust/blob/8551c97870cd722486ba7834ae9d867f13e299ad/examples/sum_rows.cu
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random.h>
#include <iostream>
// convert a linear index to a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i)
{
return i / C;
}
};
const int NUM_REPS = 10;
cudaEvent_t startEvent, stopEvent;
float ms;
int test(thrust::device_vector<int>& array, int R, int C)
{
// int R = 5; // number of rows
// int C = 8; // number of columns
// allocate storage for row sums and indices
thrust::device_vector<int> row_sums(R);
thrust::device_vector<int> row_indices(R);
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)) + (R*C),
array.begin(),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<int>());
cudaEventRecord(startEvent, 0);
for (int i = 0; i < NUM_REPS; i++) {
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)) + (R*C),
array.begin(),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<int>());
}
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&ms, startEvent, stopEvent);
printf("%15.3f\n", (ms / NUM_REPS) * 1e3 );
return 0;
}
void dothatbench(int THEPOWER, int start) {
// int start=0;
int end=THEPOWER;
int total_elems = 1 << THEPOWER;
printf("Benchmarking Thrust %i.%i.%i TotalElems=%i\n",
THRUST_MAJOR_VERSION, THRUST_MINOR_VERSION, THRUST_SUBMINOR_VERSION, total_elems);
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist(10, 99);
// initialize data
thrust::device_vector<int> array(total_elems);
for (size_t i = 0; i < array.size(); i++)
array[i] = dist(rng);
printf("initialized array\n");
for(int powy=start; powy<=end; powy++) {
int powx = THEPOWER-powy;
int num_segments = 1 << powy;
int segment_size = 1 << powx;
char buf[16];
snprintf(buf, 16, "2^%i 2^%i", powy, powx);
printf("%15s", buf);
test(array, num_segments, segment_size);
}
}
int main(int argc, char** argv) {
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
dothatbench(20, 0);
dothatbench(26, 0);
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
|
ff94d25506b226e9da616080c4ef5832a38e8874.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "rism3d.h"
void RISM3D :: cal_Coulomb () {
__global__ void coulomb(double * de, double * dfr,
double4 * dru, double * dqu,
double dx, double dy, double dz,
int nx, int ny, int nz, int natu);
__global__ void fk(double2 *, const double4 * __restrict__ ,
const double4 * __restrict__ , const double * __restrict__,
int);
__global__ void beta(double * de, double * dfr, double2 * dfk, double ubeta);
cout << "synthesizing solute Coulomb potential ..." << endl;
hipMalloc(&de, ce -> ngrid * sizeof(double));
hipMalloc(&dfr, ce -> ngrid * sizeof(double));
hipMalloc(&dfk, ce -> ngrid * sizeof(double2));
hipMemset(de, 0.0, ce -> ngrid * sizeof(double));
hipMemset(dfr, 0.0, ce -> ngrid * sizeof(double));
hipMemset(dfk, 0.0, ce -> ngrid * sizeof(double2));
hipLaunchKernelGGL(( coulomb) , dim3(g), dim3(b) , 0, 0, de, dfr, su -> dr, su -> dq,
ce -> dr[0], ce -> dr[1], ce -> dr[2],
ce -> grid[0], ce -> grid[1], ce -> grid[2], su -> num);
hipLaunchKernelGGL(( fk) , dim3(g), dim3(b) , 0, 0, dfk, dgv, su -> dr, su -> dq, su -> num);
double ubeta = hartree * bohr / (boltzmann * sv -> temper);
hipLaunchKernelGGL(( beta) , dim3(g), dim3(b) , 0, 0, de, dfr, dfk, ubeta);
}
__global__ void coulomb(double * de, double * dfr,
double4 * dru, double * dqu,
double bx, double by, double bz,
int nx, int ny, int nz, int natu) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
double rx = ((int)threadIdx.x - nx / 2) * bx;
double ry = ((int)blockIdx.x - ny / 2) * by;
double rz = ((int)blockIdx.y - nz / 2) * bz;
for (int iu = 0; iu < natu; ++iu) {
double delx = rx - dru[iu].x;
double dely = ry - dru[iu].y;
double delz = rz - dru[iu].z;
double ra = sqrt(delx * delx + dely * dely + delz * delz) ;
if (ra >= 1.0e-5) {
double qr = dqu[iu] / ra ;
de[ip] += qr ;
dfr[ip] += qr * (1 - exp(- ra)) ;
} else {
dfr[ip] += dqu[iu] ;
}
}
}
__global__ void fk(double2 * dfk, const double4 * __restrict__ dgv,
const double4 * __restrict__ dru,
const double * __restrict__ dqu, int natu) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
double rk2 = dgv[ip].x * dgv[ip].x
+ dgv[ip].y * dgv[ip].y + dgv[ip].z * dgv[ip].z;
double rk4i = 1.0 / (rk2 * (rk2 + 1.0));
for (int iu = 0; iu < natu; ++iu) {
double ruk = dgv[ip].x * dru[iu].x
+ dgv[ip].y * dru[iu].y + dgv[ip].z * dru[iu].z;
double tmp = 4.0 * M_PI * dqu[iu] * rk4i;
dfk[ip].x += tmp * cos(ruk);
dfk[ip].y -= tmp * sin(ruk);
}
}
__global__ void beta(double * de, double * dfr, double2 * dfk, double ubeta) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
de[ip] *= ubeta;
dfr[ip] *= ubeta;
dfk[ip].x *= ubeta;
dfk[ip].y *= ubeta;
}
| ff94d25506b226e9da616080c4ef5832a38e8874.cu | #include <iostream>
#include "rism3d.h"
void RISM3D :: cal_Coulomb () {
__global__ void coulomb(double * de, double * dfr,
double4 * dru, double * dqu,
double dx, double dy, double dz,
int nx, int ny, int nz, int natu);
__global__ void fk(double2 *, const double4 * __restrict__ ,
const double4 * __restrict__ , const double * __restrict__,
int);
__global__ void beta(double * de, double * dfr, double2 * dfk, double ubeta);
cout << "synthesizing solute Coulomb potential ..." << endl;
cudaMalloc(&de, ce -> ngrid * sizeof(double));
cudaMalloc(&dfr, ce -> ngrid * sizeof(double));
cudaMalloc(&dfk, ce -> ngrid * sizeof(double2));
cudaMemset(de, 0.0, ce -> ngrid * sizeof(double));
cudaMemset(dfr, 0.0, ce -> ngrid * sizeof(double));
cudaMemset(dfk, 0.0, ce -> ngrid * sizeof(double2));
coulomb <<< g, b >>> (de, dfr, su -> dr, su -> dq,
ce -> dr[0], ce -> dr[1], ce -> dr[2],
ce -> grid[0], ce -> grid[1], ce -> grid[2], su -> num);
fk <<< g, b >>> (dfk, dgv, su -> dr, su -> dq, su -> num);
double ubeta = hartree * bohr / (boltzmann * sv -> temper);
beta <<< g, b >>> (de, dfr, dfk, ubeta);
}
__global__ void coulomb(double * de, double * dfr,
double4 * dru, double * dqu,
double bx, double by, double bz,
int nx, int ny, int nz, int natu) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
double rx = ((int)threadIdx.x - nx / 2) * bx;
double ry = ((int)blockIdx.x - ny / 2) * by;
double rz = ((int)blockIdx.y - nz / 2) * bz;
for (int iu = 0; iu < natu; ++iu) {
double delx = rx - dru[iu].x;
double dely = ry - dru[iu].y;
double delz = rz - dru[iu].z;
double ra = sqrt(delx * delx + dely * dely + delz * delz) ;
if (ra >= 1.0e-5) {
double qr = dqu[iu] / ra ;
de[ip] += qr ;
dfr[ip] += qr * (1 - exp(- ra)) ;
} else {
dfr[ip] += dqu[iu] ;
}
}
}
__global__ void fk(double2 * dfk, const double4 * __restrict__ dgv,
const double4 * __restrict__ dru,
const double * __restrict__ dqu, int natu) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
double rk2 = dgv[ip].x * dgv[ip].x
+ dgv[ip].y * dgv[ip].y + dgv[ip].z * dgv[ip].z;
double rk4i = 1.0 / (rk2 * (rk2 + 1.0));
for (int iu = 0; iu < natu; ++iu) {
double ruk = dgv[ip].x * dru[iu].x
+ dgv[ip].y * dru[iu].y + dgv[ip].z * dru[iu].z;
double tmp = 4.0 * M_PI * dqu[iu] * rk4i;
dfk[ip].x += tmp * cos(ruk);
dfk[ip].y -= tmp * sin(ruk);
}
}
__global__ void beta(double * de, double * dfr, double2 * dfk, double ubeta) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
de[ip] *= ubeta;
dfr[ip] *= ubeta;
dfk[ip].x *= ubeta;
dfk[ip].y *= ubeta;
}
|
8d18e7be7bd544d4556506bc7173cde814620521.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 160
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
__constant__ int const_width;
void checkresult(float *ref, float *in, float *out, float *mul, int width){
for(int i = 0 ; i < GRID_SIZE; i++){
for(int j = 0; j < GRID_SIZE; j++){
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
sum += in[start + ii * width + jj] * mul[jj];
}
}
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
if(jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for(int i = 0; i < SIZE; i++){
if(abs(ref[i]-out[i]) > 1.e-6){
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
/* Optimization 4: Remove the function parameter width, constant used instead */
__global__ void norm(float *in, float *out, float *mul){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= const_width || ty >= SIZE/const_width) return;
int start = blockIdx.x * blockDim.x * const_width + blockIdx.y * blockDim.y;
float sum = 0.0f;
for(int i = 0; i < BLOCK_SIZE; i++){
for(int j = 0; j < BLOCK_SIZE; j++){
sum += in[start + i * const_width + j] * mul[j];
}
}
if(tx % 2 == 0 && ty % 2 == 0)
out[tx * const_width + ty] = 2.0 * in[tx * const_width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 0)
out[tx * const_width + ty] = in[tx * const_width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 1)
out[tx * const_width + ty] = (-1.0) * in[tx * const_width + ty]/sum;
else
out[tx * const_width + ty] = 0.0f;
}
int main(){
float *hA_in = (float *)malloc(SIZE * sizeof(float));
float *hA_out = (float *)malloc(SIZE * sizeof(float));
float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *dA_in, *dA_out, *dB_in;
int width = BLOCK_SIZE * GRID_SIZE;
srand(2016);
for(int i = 0; i < SIZE; i++){
hA_in[i] = (float)rand()/(float)RAND_MAX;
}
for(int i = 0; i < BLOCK_SIZE; i++){
hB_in[i] = (float)rand()/(float)RAND_MAX;
}
hipMalloc((void **)&dA_in, SIZE * sizeof(float));
hipMalloc((void **)&dA_out, SIZE * sizeof(float));
hipMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
hipMemcpy(dA_in, hA_in, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpyToSymbol(const_width, &width, sizeof(int));
struct timespec start, end;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &start);
hipLaunchKernelGGL(( norm), dim3(grid), dim3(block), 0, 0, dA_in, dA_out, dB_in);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end);
printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9);
hipMemcpy(hA_out, dA_out, SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
| 8d18e7be7bd544d4556506bc7173cde814620521.cu | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 160
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
__constant__ int const_width;
void checkresult(float *ref, float *in, float *out, float *mul, int width){
for(int i = 0 ; i < GRID_SIZE; i++){
for(int j = 0; j < GRID_SIZE; j++){
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
sum += in[start + ii * width + jj] * mul[jj];
}
}
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
if(jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for(int i = 0; i < SIZE; i++){
if(abs(ref[i]-out[i]) > 1.e-6){
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
/* Optimization 4: Remove the function parameter width, constant used instead */
__global__ void norm(float *in, float *out, float *mul){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= const_width || ty >= SIZE/const_width) return;
int start = blockIdx.x * blockDim.x * const_width + blockIdx.y * blockDim.y;
float sum = 0.0f;
for(int i = 0; i < BLOCK_SIZE; i++){
for(int j = 0; j < BLOCK_SIZE; j++){
sum += in[start + i * const_width + j] * mul[j];
}
}
if(tx % 2 == 0 && ty % 2 == 0)
out[tx * const_width + ty] = 2.0 * in[tx * const_width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 0)
out[tx * const_width + ty] = in[tx * const_width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 1)
out[tx * const_width + ty] = (-1.0) * in[tx * const_width + ty]/sum;
else
out[tx * const_width + ty] = 0.0f;
}
int main(){
float *hA_in = (float *)malloc(SIZE * sizeof(float));
float *hA_out = (float *)malloc(SIZE * sizeof(float));
float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *dA_in, *dA_out, *dB_in;
int width = BLOCK_SIZE * GRID_SIZE;
srand(2016);
for(int i = 0; i < SIZE; i++){
hA_in[i] = (float)rand()/(float)RAND_MAX;
}
for(int i = 0; i < BLOCK_SIZE; i++){
hB_in[i] = (float)rand()/(float)RAND_MAX;
}
cudaMalloc((void **)&dA_in, SIZE * sizeof(float));
cudaMalloc((void **)&dA_out, SIZE * sizeof(float));
cudaMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
cudaMemcpy(dA_in, hA_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(const_width, &width, sizeof(int));
struct timespec start, end;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &start);
norm<<<grid, block>>>(dA_in, dA_out, dB_in);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end);
printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9);
cudaMemcpy(hA_out, dA_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
|
377022fb54bd9003d637d28b1efaf4f2c5f46461.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_drhoEpudx_kernel;
int xdim0_drhoEpudx_kernel_h = -1;
int ydim0_drhoEpudx_kernel_h = -1;
__constant__ int xdim1_drhoEpudx_kernel;
int xdim1_drhoEpudx_kernel_h = -1;
int ydim1_drhoEpudx_kernel_h = -1;
__constant__ int xdim2_drhoEpudx_kernel;
int xdim2_drhoEpudx_kernel_h = -1;
int ydim2_drhoEpudx_kernel_h = -1;
__constant__ int xdim3_drhoEpudx_kernel;
int xdim3_drhoEpudx_kernel_h = -1;
int ydim3_drhoEpudx_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
// user function
__device__
void
drhoEpudx_kernel(const double *rhou_new, const double *rho_new,
const double *rhoE_new, double *rhoE_res) {
double fni =
rhou_new[OPS_ACC0(0)] * rhou_new[OPS_ACC0(0)] / rho_new[OPS_ACC1(0)];
double p = gam1 * (rhoE_new[OPS_ACC2(0)] - 0.5 * fni);
fni = (rhoE_new[OPS_ACC2(0)] + p) * rhou_new[OPS_ACC0(0)] /
rho_new[OPS_ACC1(0)];
double fnim1 =
rhou_new[OPS_ACC0(-1)] * rhou_new[OPS_ACC0(-1)] / rho_new[OPS_ACC1(-1)];
p = gam1 * (rhoE_new[OPS_ACC2(-1)] - 0.5 * fnim1);
fnim1 = (rhoE_new[OPS_ACC2(-1)] + p) * rhou_new[OPS_ACC0(-1)] /
rho_new[OPS_ACC1(-1)];
double fnim2 =
rhou_new[OPS_ACC0(-2)] * rhou_new[OPS_ACC0(-2)] / rho_new[OPS_ACC1(-2)];
p = gam1 * (rhoE_new[OPS_ACC2(-2)] - 0.5 * fnim2);
fnim2 = (rhoE_new[OPS_ACC2(-2)] + p) * rhou_new[OPS_ACC0(-2)] /
rho_new[OPS_ACC1(-2)];
double fnip1 =
rhou_new[OPS_ACC0(1)] * rhou_new[OPS_ACC0(1)] / rho_new[OPS_ACC1(1)];
p = gam1 * (rhoE_new[OPS_ACC2(1)] - 0.5 * fnip1);
fnip1 = (rhoE_new[OPS_ACC2(1)] + p) * rhou_new[OPS_ACC0(1)] /
rho_new[OPS_ACC1(1)];
double fnip2 =
rhou_new[OPS_ACC0(2)] * rhou_new[OPS_ACC0(2)] / rho_new[OPS_ACC1(2)];
p = gam1 * (rhoE_new[OPS_ACC2(2)] - 0.5 * fnip2);
fnip2 = (rhoE_new[OPS_ACC2(2)] + p) * rhou_new[OPS_ACC0(2)] /
rho_new[OPS_ACC1(2)];
double deriv = (fnim2 - fnip2 + 8.0 * (fnip1 - fnim1)) / (12.00 * dx);
rhoE_res[OPS_ACC3(0)] = deriv;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_drhoEpudx_kernel(const double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
double *__restrict arg3, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
if (idx_x < size0) {
drhoEpudx_kernel(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_drhoEpudx_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 5))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(5, "drhoEpudx_kernel");
OPS_kernels[5].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != xdim0_drhoEpudx_kernel_h || xdim1 != xdim1_drhoEpudx_kernel_h ||
xdim2 != xdim2_drhoEpudx_kernel_h || xdim3 != xdim3_drhoEpudx_kernel_h) {
hipMemcpyToSymbol(xdim0_drhoEpudx_kernel, &xdim0, sizeof(int));
xdim0_drhoEpudx_kernel_h = xdim0;
hipMemcpyToSymbol(xdim1_drhoEpudx_kernel, &xdim1, sizeof(int));
xdim1_drhoEpudx_kernel_h = xdim1;
hipMemcpyToSymbol(xdim2_drhoEpudx_kernel, &xdim2, sizeof(int));
xdim2_drhoEpudx_kernel_h = xdim2;
hipMemcpyToSymbol(xdim3_drhoEpudx_kernel, &xdim3, sizeof(int));
xdim3_drhoEpudx_kernel_h = xdim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_drhoEpudx_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[5].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[3], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
| 377022fb54bd9003d637d28b1efaf4f2c5f46461.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_drhoEpudx_kernel;
int xdim0_drhoEpudx_kernel_h = -1;
int ydim0_drhoEpudx_kernel_h = -1;
__constant__ int xdim1_drhoEpudx_kernel;
int xdim1_drhoEpudx_kernel_h = -1;
int ydim1_drhoEpudx_kernel_h = -1;
__constant__ int xdim2_drhoEpudx_kernel;
int xdim2_drhoEpudx_kernel_h = -1;
int ydim2_drhoEpudx_kernel_h = -1;
__constant__ int xdim3_drhoEpudx_kernel;
int xdim3_drhoEpudx_kernel_h = -1;
int ydim3_drhoEpudx_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
// user function
__device__
void
drhoEpudx_kernel(const double *rhou_new, const double *rho_new,
const double *rhoE_new, double *rhoE_res) {
double fni =
rhou_new[OPS_ACC0(0)] * rhou_new[OPS_ACC0(0)] / rho_new[OPS_ACC1(0)];
double p = gam1 * (rhoE_new[OPS_ACC2(0)] - 0.5 * fni);
fni = (rhoE_new[OPS_ACC2(0)] + p) * rhou_new[OPS_ACC0(0)] /
rho_new[OPS_ACC1(0)];
double fnim1 =
rhou_new[OPS_ACC0(-1)] * rhou_new[OPS_ACC0(-1)] / rho_new[OPS_ACC1(-1)];
p = gam1 * (rhoE_new[OPS_ACC2(-1)] - 0.5 * fnim1);
fnim1 = (rhoE_new[OPS_ACC2(-1)] + p) * rhou_new[OPS_ACC0(-1)] /
rho_new[OPS_ACC1(-1)];
double fnim2 =
rhou_new[OPS_ACC0(-2)] * rhou_new[OPS_ACC0(-2)] / rho_new[OPS_ACC1(-2)];
p = gam1 * (rhoE_new[OPS_ACC2(-2)] - 0.5 * fnim2);
fnim2 = (rhoE_new[OPS_ACC2(-2)] + p) * rhou_new[OPS_ACC0(-2)] /
rho_new[OPS_ACC1(-2)];
double fnip1 =
rhou_new[OPS_ACC0(1)] * rhou_new[OPS_ACC0(1)] / rho_new[OPS_ACC1(1)];
p = gam1 * (rhoE_new[OPS_ACC2(1)] - 0.5 * fnip1);
fnip1 = (rhoE_new[OPS_ACC2(1)] + p) * rhou_new[OPS_ACC0(1)] /
rho_new[OPS_ACC1(1)];
double fnip2 =
rhou_new[OPS_ACC0(2)] * rhou_new[OPS_ACC0(2)] / rho_new[OPS_ACC1(2)];
p = gam1 * (rhoE_new[OPS_ACC2(2)] - 0.5 * fnip2);
fnip2 = (rhoE_new[OPS_ACC2(2)] + p) * rhou_new[OPS_ACC0(2)] /
rho_new[OPS_ACC1(2)];
double deriv = (fnim2 - fnip2 + 8.0 * (fnip1 - fnim1)) / (12.00 * dx);
rhoE_res[OPS_ACC3(0)] = deriv;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_drhoEpudx_kernel(const double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
double *__restrict arg3, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
if (idx_x < size0) {
drhoEpudx_kernel(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_drhoEpudx_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 5))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(5, "drhoEpudx_kernel");
OPS_kernels[5].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != xdim0_drhoEpudx_kernel_h || xdim1 != xdim1_drhoEpudx_kernel_h ||
xdim2 != xdim2_drhoEpudx_kernel_h || xdim3 != xdim3_drhoEpudx_kernel_h) {
cudaMemcpyToSymbol(xdim0_drhoEpudx_kernel, &xdim0, sizeof(int));
xdim0_drhoEpudx_kernel_h = xdim0;
cudaMemcpyToSymbol(xdim1_drhoEpudx_kernel, &xdim1, sizeof(int));
xdim1_drhoEpudx_kernel_h = xdim1;
cudaMemcpyToSymbol(xdim2_drhoEpudx_kernel, &xdim2, sizeof(int));
xdim2_drhoEpudx_kernel_h = xdim2;
cudaMemcpyToSymbol(xdim3_drhoEpudx_kernel, &xdim3, sizeof(int));
xdim3_drhoEpudx_kernel_h = xdim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_drhoEpudx_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[5].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[3], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
|
8c6b20038a431df60c0652b1b357e2084554de5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "neuron_layer.hpp"
#include "prelu_layer.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
| 8c6b20038a431df60c0652b1b357e2084554de5f.cu | #include <algorithm>
#include <vector>
#include "neuron_layer.hpp"
#include "prelu_layer.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
|
a01dc7e9e6ea9708afbe0a4c5578f63bbb05ee3e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2018-2023 by XGBoost Contributors
* \author Rory Mitchell
*/
#include <thrust/execution_policy.h>
#include <thrust/inner_product.h>
#include <xgboost/data.h>
#include <xgboost/linear_updater.h>
#include "xgboost/span.h"
#include "coordinate_common.h"
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/timer.h"
#include "./param.h"
namespace xgboost {
namespace linear {
DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate);
/**
* \class GPUCoordinateUpdater
*
* \brief Coordinate descent algorithm that updates one feature per iteration
*/
class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
public:
// set training parameter
void Configure(Args const& args) override {
tparam_.UpdateAllowUnknown(args);
coord_param_.UpdateAllowUnknown(args);
selector_.reset(FeatureSelector::Create(tparam_.feature_selector));
monitor_.Init("GPUCoordinateUpdater");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("linear_train_param"), &tparam_);
FromJson(config.at("coordinate_param"), &coord_param_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["linear_train_param"] = ToJson(tparam_);
out["coordinate_param"] = ToJson(coord_param_);
}
void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) {
if (ctx_->gpu_id < 0) return;
num_row_ = static_cast<size_t>(p_fmat->Info().num_row_);
CHECK(p_fmat->SingleColBlock());
SparsePage const &batch = *(p_fmat->GetBatches<CSCPage>(ctx_).begin());
auto page = batch.GetView();
if (IsEmpty()) {
return;
}
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
// The begin and end indices for the section of each column associated with
// this device
std::vector<std::pair<bst_uint, bst_uint>> column_segments;
row_ptr_ = {0};
// iterate through columns
for (size_t fidx = 0; fidx < batch.Size(); fidx++) {
common::Span<Entry const> col = page[fidx];
auto cmp = [](Entry e1, Entry e2) {
return e1.index < e2.index;
};
auto column_begin =
std::lower_bound(col.cbegin(), col.cend(),
xgboost::Entry(0, 0.0f), cmp);
auto column_end =
std::lower_bound(col.cbegin(), col.cend(),
xgboost::Entry(num_row_, 0.0f), cmp);
column_segments.emplace_back(static_cast<bst_uint>(column_begin - col.cbegin()),
static_cast<bst_uint>(column_end - col.cbegin()));
row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin));
}
data_.resize(row_ptr_.back());
gpair_.resize(num_row_ * model_param.num_output_group);
for (size_t fidx = 0; fidx < batch.Size(); fidx++) {
auto col = page[fidx];
auto seg = column_segments[fidx];
dh::safe_cuda(hipMemcpy(
data_.data().get() + row_ptr_[fidx],
col.data() + seg.first,
sizeof(Entry) * (seg.second - seg.first), hipMemcpyHostToDevice));
}
}
void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat,
gbm::GBLinearModel *model, double sum_instance_weight) override {
tparam_.DenormalizePenalties(sum_instance_weight);
monitor_.Start("LazyInitDevice");
this->LazyInitDevice(p_fmat, *(model->learner_model_param));
monitor_.Stop("LazyInitDevice");
monitor_.Start("UpdateGpair");
auto &in_gpair_host = in_gpair->ConstHostVector();
// Update gpair
if (ctx_->gpu_id >= 0) {
this->UpdateGpair(in_gpair_host);
}
monitor_.Stop("UpdateGpair");
monitor_.Start("UpdateBias");
this->UpdateBias(model);
monitor_.Stop("UpdateBias");
// prepare for updating the weights
selector_->Setup(ctx_, *model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm,
tparam_.reg_lambda_denorm, coord_param_.top_k);
monitor_.Start("UpdateFeature");
for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group;
++group_idx) {
for (auto i = 0U; i < model->learner_model_param->num_feature; i++) {
auto fidx =
selector_->NextFeature(ctx_, i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat,
tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm);
if (fidx < 0) break;
this->UpdateFeature(fidx, group_idx, model);
}
}
monitor_.Stop("UpdateFeature");
}
void UpdateBias(gbm::GBLinearModel *model) {
for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group;
++group_idx) {
// Get gradient
auto grad = GradientPair(0, 0);
if (ctx_->gpu_id >= 0) {
grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group);
}
auto dbias = static_cast<float>(
tparam_.learning_rate *
CoordinateDeltaBias(grad.GetGrad(), grad.GetHess()));
model->Bias()[group_idx] += dbias;
// Update residual
if (ctx_->gpu_id >= 0) {
UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group);
}
}
}
void UpdateFeature(int fidx, int group_idx,
gbm::GBLinearModel *model) {
bst_float &w = (*model)[fidx][group_idx];
// Get gradient
auto grad = GradientPair(0, 0);
if (ctx_->gpu_id >= 0) {
grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx);
}
auto dw = static_cast<float>(tparam_.learning_rate *
CoordinateDelta(grad.GetGrad(), grad.GetHess(),
w, tparam_.reg_alpha_denorm,
tparam_.reg_lambda_denorm));
w += dw;
if (ctx_->gpu_id >= 0) {
UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx);
}
}
// This needs to be public because of the __device__ lambda.
GradientPair GetBiasGradient(int group_idx, int num_group) {
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
auto counting = thrust::make_counting_iterator(0ull);
auto f = [=] __device__(size_t idx) {
return idx * num_group + group_idx;
}; // NOLINT
thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip(
counting, f);
auto perm = thrust::make_permutation_iterator(gpair_.data(), skip);
return dh::SumReduction(perm, num_row_);
}
// This needs to be public because of the __device__ lambda.
void UpdateBiasResidual(float dbias, int group_idx, int num_groups) {
if (dbias == 0.0f) return;
auto d_gpair = dh::ToSpan(gpair_);
dh::LaunchN(num_row_, [=] __device__(size_t idx) {
auto &g = d_gpair[idx * num_groups + group_idx];
g += GradientPair(g.GetHess() * dbias, 0);
});
}
// This needs to be public because of the __device__ lambda.
GradientPair GetGradient(int group_idx, int num_group, int fidx) {
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
common::Span<xgboost::Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]);
size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx];
common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_);
auto counting = thrust::make_counting_iterator(0ull);
auto f = [=] __device__(size_t idx) {
auto entry = d_col[idx];
auto g = d_gpair[entry.index * num_group + group_idx];
return GradientPair{g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue};
}; // NOLINT
thrust::transform_iterator<decltype(f), decltype(counting), GradientPair>
multiply_iterator(counting, f);
return dh::SumReduction(multiply_iterator, col_size);
}
// This needs to be public because of the __device__ lambda.
void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) {
common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_);
common::Span<Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]);
size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx];
dh::LaunchN(col_size, [=] __device__(size_t idx) {
auto entry = d_col[idx];
auto &g = d_gpair[entry.index * num_groups + group_idx];
g += GradientPair(g.GetHess() * dw * entry.fvalue, 0);
});
}
private:
bool IsEmpty() {
return num_row_ == 0;
}
void UpdateGpair(const std::vector<GradientPair> &host_gpair) {
dh::safe_cuda(hipMemcpyAsync(
gpair_.data().get(),
host_gpair.data(),
gpair_.size() * sizeof(GradientPair), hipMemcpyHostToDevice));
}
// training parameter
LinearTrainParam tparam_;
CoordinateParam coord_param_;
std::unique_ptr<FeatureSelector> selector_;
common::Monitor monitor_;
std::vector<size_t> row_ptr_;
dh::device_vector<xgboost::Entry> data_;
dh::caching_device_vector<GradientPair> gpair_;
size_t num_row_;
};
XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent")
.describe(
"Update linear model according to coordinate descent algorithm. GPU "
"accelerated.")
.set_body([]() { return new GPUCoordinateUpdater(); });
} // namespace linear
} // namespace xgboost
| a01dc7e9e6ea9708afbe0a4c5578f63bbb05ee3e.cu | /**
* Copyright 2018-2023 by XGBoost Contributors
* \author Rory Mitchell
*/
#include <thrust/execution_policy.h>
#include <thrust/inner_product.h>
#include <xgboost/data.h>
#include <xgboost/linear_updater.h>
#include "xgboost/span.h"
#include "coordinate_common.h"
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/timer.h"
#include "./param.h"
namespace xgboost {
namespace linear {
DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate);
/**
* \class GPUCoordinateUpdater
*
* \brief Coordinate descent algorithm that updates one feature per iteration
*/
class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
public:
// set training parameter
void Configure(Args const& args) override {
tparam_.UpdateAllowUnknown(args);
coord_param_.UpdateAllowUnknown(args);
selector_.reset(FeatureSelector::Create(tparam_.feature_selector));
monitor_.Init("GPUCoordinateUpdater");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("linear_train_param"), &tparam_);
FromJson(config.at("coordinate_param"), &coord_param_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["linear_train_param"] = ToJson(tparam_);
out["coordinate_param"] = ToJson(coord_param_);
}
void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) {
if (ctx_->gpu_id < 0) return;
num_row_ = static_cast<size_t>(p_fmat->Info().num_row_);
CHECK(p_fmat->SingleColBlock());
SparsePage const &batch = *(p_fmat->GetBatches<CSCPage>(ctx_).begin());
auto page = batch.GetView();
if (IsEmpty()) {
return;
}
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
// The begin and end indices for the section of each column associated with
// this device
std::vector<std::pair<bst_uint, bst_uint>> column_segments;
row_ptr_ = {0};
// iterate through columns
for (size_t fidx = 0; fidx < batch.Size(); fidx++) {
common::Span<Entry const> col = page[fidx];
auto cmp = [](Entry e1, Entry e2) {
return e1.index < e2.index;
};
auto column_begin =
std::lower_bound(col.cbegin(), col.cend(),
xgboost::Entry(0, 0.0f), cmp);
auto column_end =
std::lower_bound(col.cbegin(), col.cend(),
xgboost::Entry(num_row_, 0.0f), cmp);
column_segments.emplace_back(static_cast<bst_uint>(column_begin - col.cbegin()),
static_cast<bst_uint>(column_end - col.cbegin()));
row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin));
}
data_.resize(row_ptr_.back());
gpair_.resize(num_row_ * model_param.num_output_group);
for (size_t fidx = 0; fidx < batch.Size(); fidx++) {
auto col = page[fidx];
auto seg = column_segments[fidx];
dh::safe_cuda(cudaMemcpy(
data_.data().get() + row_ptr_[fidx],
col.data() + seg.first,
sizeof(Entry) * (seg.second - seg.first), cudaMemcpyHostToDevice));
}
}
void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat,
gbm::GBLinearModel *model, double sum_instance_weight) override {
tparam_.DenormalizePenalties(sum_instance_weight);
monitor_.Start("LazyInitDevice");
this->LazyInitDevice(p_fmat, *(model->learner_model_param));
monitor_.Stop("LazyInitDevice");
monitor_.Start("UpdateGpair");
auto &in_gpair_host = in_gpair->ConstHostVector();
// Update gpair
if (ctx_->gpu_id >= 0) {
this->UpdateGpair(in_gpair_host);
}
monitor_.Stop("UpdateGpair");
monitor_.Start("UpdateBias");
this->UpdateBias(model);
monitor_.Stop("UpdateBias");
// prepare for updating the weights
selector_->Setup(ctx_, *model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm,
tparam_.reg_lambda_denorm, coord_param_.top_k);
monitor_.Start("UpdateFeature");
for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group;
++group_idx) {
for (auto i = 0U; i < model->learner_model_param->num_feature; i++) {
auto fidx =
selector_->NextFeature(ctx_, i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat,
tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm);
if (fidx < 0) break;
this->UpdateFeature(fidx, group_idx, model);
}
}
monitor_.Stop("UpdateFeature");
}
void UpdateBias(gbm::GBLinearModel *model) {
for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group;
++group_idx) {
// Get gradient
auto grad = GradientPair(0, 0);
if (ctx_->gpu_id >= 0) {
grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group);
}
auto dbias = static_cast<float>(
tparam_.learning_rate *
CoordinateDeltaBias(grad.GetGrad(), grad.GetHess()));
model->Bias()[group_idx] += dbias;
// Update residual
if (ctx_->gpu_id >= 0) {
UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group);
}
}
}
void UpdateFeature(int fidx, int group_idx,
gbm::GBLinearModel *model) {
bst_float &w = (*model)[fidx][group_idx];
// Get gradient
auto grad = GradientPair(0, 0);
if (ctx_->gpu_id >= 0) {
grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx);
}
auto dw = static_cast<float>(tparam_.learning_rate *
CoordinateDelta(grad.GetGrad(), grad.GetHess(),
w, tparam_.reg_alpha_denorm,
tparam_.reg_lambda_denorm));
w += dw;
if (ctx_->gpu_id >= 0) {
UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx);
}
}
// This needs to be public because of the __device__ lambda.
GradientPair GetBiasGradient(int group_idx, int num_group) {
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
auto counting = thrust::make_counting_iterator(0ull);
auto f = [=] __device__(size_t idx) {
return idx * num_group + group_idx;
}; // NOLINT
thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip(
counting, f);
auto perm = thrust::make_permutation_iterator(gpair_.data(), skip);
return dh::SumReduction(perm, num_row_);
}
// This needs to be public because of the __device__ lambda.
void UpdateBiasResidual(float dbias, int group_idx, int num_groups) {
if (dbias == 0.0f) return;
auto d_gpair = dh::ToSpan(gpair_);
dh::LaunchN(num_row_, [=] __device__(size_t idx) {
auto &g = d_gpair[idx * num_groups + group_idx];
g += GradientPair(g.GetHess() * dbias, 0);
});
}
// This needs to be public because of the __device__ lambda.
GradientPair GetGradient(int group_idx, int num_group, int fidx) {
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
common::Span<xgboost::Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]);
size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx];
common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_);
auto counting = thrust::make_counting_iterator(0ull);
auto f = [=] __device__(size_t idx) {
auto entry = d_col[idx];
auto g = d_gpair[entry.index * num_group + group_idx];
return GradientPair{g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue};
}; // NOLINT
thrust::transform_iterator<decltype(f), decltype(counting), GradientPair>
multiply_iterator(counting, f);
return dh::SumReduction(multiply_iterator, col_size);
}
// This needs to be public because of the __device__ lambda.
void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) {
common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_);
common::Span<Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]);
size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx];
dh::LaunchN(col_size, [=] __device__(size_t idx) {
auto entry = d_col[idx];
auto &g = d_gpair[entry.index * num_groups + group_idx];
g += GradientPair(g.GetHess() * dw * entry.fvalue, 0);
});
}
private:
bool IsEmpty() {
return num_row_ == 0;
}
void UpdateGpair(const std::vector<GradientPair> &host_gpair) {
dh::safe_cuda(cudaMemcpyAsync(
gpair_.data().get(),
host_gpair.data(),
gpair_.size() * sizeof(GradientPair), cudaMemcpyHostToDevice));
}
// training parameter
LinearTrainParam tparam_;
CoordinateParam coord_param_;
std::unique_ptr<FeatureSelector> selector_;
common::Monitor monitor_;
std::vector<size_t> row_ptr_;
dh::device_vector<xgboost::Entry> data_;
dh::caching_device_vector<GradientPair> gpair_;
size_t num_row_;
};
XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent")
.describe(
"Update linear model according to coordinate descent algorithm. GPU "
"accelerated.")
.set_body([]() { return new GPUCoordinateUpdater(); });
} // namespace linear
} // namespace xgboost
|
69fb3ce5569b27016c2cea6bc2e339855f8722d0.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief
* ragged_ops
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
* Mobvoi Inc. (authors: Fangjun Kuang)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <memory>
#include <vector>
#include "hipcub/hipcub.hpp"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/moderngpu_allocator.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_ops.h"
#include "moderngpu/kernel_mergesort.hxx"
namespace {
/*
A helper function used in RaggedShape3;
if both first and second are non-NULL, it will check if the context of them
is compatible or not and return that context if compatible;
if one of them is NULL, returns the other one's context.
*/
static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first,
const k2::Array1<int32_t> *second) {
K2_CHECK(first != nullptr || second != nullptr)
<< "At least one of first and second must be non-NULL";
if (first == nullptr)
return second->Context();
else if (second == nullptr)
return first->Context();
else
return k2::GetContext(*first, *second);
}
} // namespace
namespace k2 {
RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes,
int32_t max_num_axes, int32_t min_num_elements,
int32_t max_num_elements) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetCpuContext();
K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes &&
min_num_elements >= 0 && max_num_elements >= min_num_elements);
int32_t num_axes = RandInt(min_num_axes, max_num_axes);
int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements);
bool done_repeats = false;
std::vector<RaggedShapeDim> axes(num_axes - 1);
for (int32_t axis = num_axes - 2; axis >= 0; axis--) {
// this axis will have row_ids of length num_elements and
// row_splits of length to be determined.
int32_t cur_row_split = 0;
std::vector<int32_t> row_splits_vec;
std::vector<int32_t> row_ids_vec;
row_splits_vec.push_back(cur_row_split);
// The reason for "|| RandInt(0, 2) == 0)" is so that even if there
// are no elements we can still potentially generate empty row-splits.
while (cur_row_split < num_elements || RandInt(0, 2) == 0) {
int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split);
cur_row_split += split_size;
// sometimes we have a bunch of empty rows in a row (this will test out
// more of the code), so here we generate a bunch of empty rows, but we
// just do this only once (that's why we declare `done_repeats` here).
if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) {
int32_t num_repeats = RandIntGeometric(1, 128);
row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split);
// don't need to set `row_ids_vec` as there's no element.
done_repeats = true;
}
row_splits_vec.push_back(cur_row_split);
if (set_row_ids) {
int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2;
row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row);
}
}
axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec);
if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec);
axes[axis].cached_tot_size = num_elements;
num_elements = axes[axis].row_splits.Dim() - 1;
}
// RaggedShape(axes, true) will check the returned RaggedShape for
// consistency.
return RaggedShape(axes, true);
}
RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids,
int32_t cached_tot_size) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(row_splits != nullptr || row_ids != nullptr)
<< "At least one of row_splits and row_ids must be defined";
ContextPtr ctx = ::GetContext(row_splits, row_ids);
if (cached_tot_size != -1) {
if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim());
if (row_splits != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size, row_splits->Back()) << "Bad row splits is: "
<< *row_splits;
}
}
std::vector<RaggedShapeDim> axes(1);
if (row_splits != nullptr) {
axes[0].row_splits = *row_splits;
} else {
// we need to work out row_splits as we always require row_splits is not
// empty for RaggedShape. Note here we suppose the last element in row_ids
// is num_rows - 1, i.e. there's no empty rows after row `row_ids[-1]`.
int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1;
Array1<int32_t> row_splits_array(ctx, num_rows + 1);
RowIdsToRowSplits(*row_ids, &row_splits_array);
axes[0].row_splits = row_splits_array;
}
if (row_ids != nullptr) axes[0].row_ids = *row_ids;
if (cached_tot_size == -1) {
cached_tot_size =
row_ids != nullptr ? row_ids->Dim() : axes[0].row_splits.Back();
}
axes[0].cached_tot_size = cached_tot_size;
// note below line will check if row_splits and row_ids are valid and agree
// with each other.
return RaggedShape(axes);
}
RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) {
NVTX_RANGE(K2_FUNC);
if (a.NumElements() != b.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements()
<< " vs. " << b.Dim0();
}
const auto &a_axes = a.Axes();
const auto &b_axes = b.Axes();
std::vector<RaggedShapeDim> axes(a_axes.size() + b_axes.size());
std::size_t a_size = a_axes.size(), b_size = b_axes.size();
for (std::size_t i = 0; i < a_size; ++i) axes[i] = a_axes[i];
for (std::size_t i = 0; i < b_size; ++i) axes[i + a_size] = b_axes[i];
return RaggedShape(axes);
}
RaggedShape RaggedShape3(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape1 = RaggedShape2(row_splits1, row_ids1, cached_tot_size1);
Array1<int32_t> temp_array;
if (row_splits2 == nullptr) {
K2_CHECK_NE(row_ids2, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array = Array1<int32_t>(row_ids2->Context(), shape1.NumElements() + 1);
row_splits2 = &temp_array;
RowIdsToRowSplits(*row_ids2, row_splits2);
}
return ComposeRaggedShapes(
shape1, RaggedShape2(row_splits2, row_ids2, cached_tot_size2));
}
RaggedShape RaggedShape4(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2,
Array1<int32_t> *row_splits3,
Array1<int32_t> *row_ids3, int32_t cached_tot_size3) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape12 = RaggedShape3(row_splits1, row_ids1, cached_tot_size1,
row_splits2, row_ids2, cached_tot_size2);
Array1<int32_t> temp_array;
if (row_splits3 == nullptr) {
K2_CHECK_NE(row_ids3, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array =
Array1<int32_t>(row_ids3->Context(), shape12.NumElements() + 1);
row_splits3 = &temp_array;
RowIdsToRowSplits(*row_ids3, row_splits3);
}
return ComposeRaggedShapes(
shape12, RaggedShape2(row_splits3, row_ids3, cached_tot_size3));
}
RaggedShape RaggedShapeFromTotSizes(ContextPtr c, int32_t num_axes,
int32_t *tot_sizes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeDim> axes(num_axes - 1);
// In future we might choose to allocate everything in one big array, to avoid
// multiple allocations, but for now just do it the simple way.
for (int32_t axis = 1; axis < num_axes; ++axis) {
axes[axis - 1].row_splits = Array1<int32_t>(c, tot_sizes[axis - 1] + 1);
axes[axis - 1].row_ids = Array1<int32_t>(c, tot_sizes[axis]);
axes[axis - 1].cached_tot_size = tot_sizes[axis];
}
// Not check here as we did not set the values of row_splits and row_ids
return RaggedShape(axes, false);
}
Array1<int32_t *> GetRowSplitsPtr(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
std::vector<int32_t *> row_splits_start(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
Array1<int32_t> &cur_splits = src.RowSplits(i);
row_splits_start[i - 1] = cur_splits.Data();
}
return Array1<int32_t *>(src.Context(), row_splits_start);
}
// See declaration in ragged.h for documentation of its purpose and interface.
RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) {
// If axis == 0, initial row_splits and row_ids will look like the following,
// if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes
// would be pushed forward.
//
// If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would
// look like the following, if for instance the src.TotSize(axis) = 8:
// [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ].
//
// The reason why the code is different for axis == 0, is that in that case we
// are really making visible an "implicit" axis of the input `src`; we could
// call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's
// row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from
// an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 <
// Dim0().
NVTX_RANGE(K2_FUNC);
ContextPtr c = src.Context();
K2_CHECK(axis >= 0 && axis <= src.NumAxes());
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
int32_t num_axes_in = src.NumAxes();
// Note: in RaggedShape, the vector of RaggedShapeDim is of length
// num_axes - 1, so the output will have one more axis than the input.
std::vector<RaggedShapeDim> axes_out(num_axes_in);
int32_t row_splits_dim, row_ids_dim;
Array1<int32_t> mem;
if (axis == 0) {
row_splits_dim = 2; // e.g. [ 0 5 ]
row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ]
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem, (int32_t i)->void {
if (i == 1)
mem_data[i] = row_ids_dim;
else
mem_data[i] = 0;
});
} else {
int32_t tot_size = src.TotSize(axis);
row_splits_dim = tot_size + 1;
row_ids_dim = tot_size;
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem2,
(int32_t i)->void { mem_data[i] = i % (tot_size + 1); });
}
axes_out[axis].row_splits = mem.Range(0, row_splits_dim);
axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim);
axes_out[axis].cached_tot_size = row_ids_dim;
for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i];
// Note: the returned array has `num_axes_in + 1` axes, so its
// array of RaggedShapeDim is of length `num_axes_in`.
for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1];
return RaggedShape(axes_out);
}
std::vector<RaggedShape> UnsqueezeParallel(int32_t num_srcs, RaggedShape **src,
int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(axis, 0);
std::vector<RaggedShape> ans;
if (num_srcs == 0) return ans;
ans.reserve(num_srcs);
ContextPtr c = src[0]->Context();
std::vector<int32_t> all_row_splits_vec(num_srcs * 2);
int32_t max_dim = 0;
// all_row_splits_vec will contain [ 0 d0 0 d1 0 d2 .. ]
// where d0 == src[0]->Dim0(), d1 == src[1]->Dim0()..
for (int32_t i = 0; i < num_srcs; i++) {
int32_t this_dim0 = src[i]->Dim0();
if (this_dim0 > max_dim) max_dim = this_dim0;
all_row_splits_vec[i * 2] = 0;
all_row_splits_vec[i * 2 + 1] = this_dim0;
}
Array1<int32_t> all_row_splits(c, all_row_splits_vec);
Array1<int32_t> all_row_ids(c, max_dim, 0);
for (int32_t i = 0; i < num_srcs; i++) {
int32_t num_axes = src[i]->NumAxes();
std::vector<RaggedShapeDim> axes;
axes.reserve(num_axes); // note, the size of the `axes` of a RaggedShape
// is its NumAxes() - 1.
axes.resize(1);
int32_t this_old_dim0 = all_row_splits_vec[i * 2 + 1];
axes[0].row_splits = all_row_splits.Range(i * 2, 2);
axes[0].row_ids = all_row_ids.Range(0, this_old_dim0);
axes[0].cached_tot_size = this_old_dim0;
axes.insert(axes.end(), src[i]->Axes().begin(), src[i]->Axes().end());
ans.emplace_back(axes);
}
return ans;
}
/*
Internal function used in Index(), which gets certain arrays used internally.
@param [in] src Source shape to be indexed
@param [in] src_row_splits_ptrs Result of calling GetRowSplitsPtr(src)
@param [in] new2old Array of indexes into axis 0 of src
@param [out] old_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()), whose (i,j)'th
element contains the offset into axis i of `src`
where the slice of `src` with index0 (i.e. index
into 0'th-axis of `src`) equal to `new2old[j]`
begins.
@param [out] new_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()+1), whose (i,j)'th
element contains the offset into axis i of `ans`
where the data in `ans` corresponding to
index j (i.e. index j into axis 0 of `ans`) begins.
Note: `ans` is the result of Index(), with
ans.Dim0() == new2old.Dim().
*/
inline void GetOldAndNewOffsets(RaggedShape &src,
const Array1<int32_t *> &src_row_splits_ptrs,
const Array1<int32_t> &new2old,
Array2<int32_t> *old_offsets,
Array2<int32_t> *new_offsets) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(src.NumAxes() > 1);
ContextPtr &c = src.Context();
int32_t num_axes = src.NumAxes(), ans_dim0 = new2old.Dim();
int32_t *const *src_row_splits_ptrs_data = src_row_splits_ptrs.Data();
const int32_t *new2old_data = new2old.Data();
*old_offsets = Array2<int32_t>(c, num_axes, ans_dim0);
*new_offsets = Array2<int32_t>(c, num_axes, ans_dim0 + 1);
auto old_offsets_acc = old_offsets->Accessor(),
new_offsets_acc = new_offsets->Accessor();
// Set old_offsets; and for now, set new_offsets to the corresponding
// sizes of the output slices.
K2_EVAL(
c, ans_dim0, lambda_set_offsets, (int32_t i)->void {
// 0 <= i < ans_dim0
int32_t old_offset = new2old_data[i], old_offset_next = old_offset + 1;
for (int32_t axis = 0;; axis++) {
old_offsets_acc(axis, i) = old_offset;
// Below, 'new_offsets_acc' currently contains the size rather
// than the offset; we need to do exclusive-sum.
new_offsets_acc(axis, i) = old_offset_next - old_offset;
if (axis + 1 == num_axes) return;
old_offset = src_row_splits_ptrs_data[axis][old_offset];
old_offset_next = src_row_splits_ptrs_data[axis][old_offset_next];
}
});
ExclusiveSum(*new_offsets, new_offsets);
}
RaggedShape Index(RaggedShape &src, const Array1<int32_t> &new2old,
Array1<int32_t> *elem_indexes /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = src.Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
K2_CHECK(IsCompatible(src, new2old));
int32_t num_axes = src.NumAxes(), src_dim0 = src.Dim0(),
ans_dim0 = new2old.Dim();
if (ans_dim0 == 0) {
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, 0);
return EmptyRaggedShape(c, num_axes);
}
Array1<int32_t *> src_row_splits_ptrs = GetRowSplitsPtr(src);
Array2<int32_t> old_offsets, // num_axes by ans_dim0
new_offsets; // num_axes by (ans_dim0 + 1).
GetOldAndNewOffsets(src, src_row_splits_ptrs, new2old, &old_offsets,
&new_offsets);
Array1<int32_t> tot_sizes_out =
Array1<int32_t>(new_offsets.Col(ans_dim0)).To(GetCpuContext());
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, tot_sizes_out.Back());
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.Data());
auto old_offsets_acc = old_offsets.Accessor(),
new_offsets_acc = new_offsets.Accessor();
ParallelRunner pr(c);
std::vector<hipStream_t> streams(num_axes);
int32_t num_jobs = ans_dim0 * 2; // note: this formula is not a heuristic;
// it's how TaskRedirect works..
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *new_offsets_ptr = new_offsets_acc.Row(axis);
TaskRedirect *task_redirect_ptr = task_redirects_acc.Row(axis);
GetTaskRedirect(c, ans_dim0, new_offsets_ptr, task_redirect_ptr);
}
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
{
int32_t *this_new_row_splits = ans.RowSplits(axis + 1).Data();
const int32_t *this_old_row_splits = src.RowSplits(axis + 1).Data();
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// 0 <= ans_idx0 < ans_dim0; and 0 <= thread_idx < num_threads,
// num_threads may have any value > 0 as far as this code is concerned.
//
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_splits array are as given by
// the `axis`'th row of `offsets`; the values in the array
// are related to those in the `axis+1`'th row.
int32_t this_new_offset = new_offsets_acc(axis, ans_idx0),
next_new_offset = new_offsets_acc(axis, ans_idx0 + 1),
num_rows = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis, ans_idx0),
value_offset = new_offsets_acc(axis + 1, ans_idx0) -
old_offsets_acc(axis + 1, ans_idx0);
// Using <= instead of < below causes threads for different ans_idx0 to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_new_row_splits[this_new_offset + thread_idx] =
value_offset + this_old_row_splits[this_old_offset + thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
}
{
int32_t *this_new_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *this_old_row_ids = src.RowIds(axis + 1).Data();
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
if (elem_indexes == nullptr || axis != num_axes - 2) {
// If we don't need to write to `elem_indexes`... [caution: the next
// code block differs from this only by a statement that sets
// `elem_indexes` and they should be kept in sync.]
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_ids array are as given by
// the `axis+1`'th row of `offsets`; the values in the array
// are related to those in the `axis`'th row.
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
} else {
int32_t *elem_indexes_data = elem_indexes->Data();
// We need to write to `elem_indexes`. Note: this code block only
// differs from the above by an extra statement regarding
// `elem_indexes`. Comments have been removed.
auto lambda_set_row_ids_and_elem_indexes =
[=] __host__ __device__(int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
elem_indexes_data[this_new_offset + thread_idx] =
this_old_offset + thread_idx;
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops,
lambda_set_row_ids_and_elem_indexes);
}
}
}
#if !defined(NDEBUG)
ans.Check();
#endif
return ans;
}
Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) {
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
ContextPtr ctx = src[0]->Context();
Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1);
int32_t *src_offsets_data = src_offsets.Data();
int32_t src_offsets_stride0 = src_offsets.ElemStride0();
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
for (int32_t axis = 0; axis <= num_axes_in; ++axis) {
int32_t sum = 0;
for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column
src_offsets_data[axis * src_offsets_stride0 + i] = sum;
if (i < num_srcs) {
sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1));
}
}
}
return src_offsets;
}
void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits,
Array1<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
src.Populate();
std::vector<int32_t *> row_splits_ptrs(axes - 1);
std::vector<int32_t *> row_ids_ptrs(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
row_splits_ptrs[i - 1] = src.RowSplits(i).Data();
row_ids_ptrs[i - 1] = src.RowIds(i).Data();
}
ContextPtr ctx = src.Context();
*row_splits = Array1<int32_t *>(ctx, row_splits_ptrs);
*row_ids = Array1<int32_t *>(ctx, row_ids_ptrs);
}
void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src,
Array2<int32_t *> *row_splits,
Array2<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
K2_CHECK_GE(num_axes_in, 2);
ContextPtr ctx = src[0]->Context();
// check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
int32_t **splits_ptr_data = row_splits_ptrs.Data();
int32_t **ids_ptr_data = row_ids_ptrs.Data();
int32_t stride0 = row_splits_ptrs.ElemStride0();
K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0());
for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) {
for (int32_t i = 0; i != num_srcs; ++i) {
splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data();
ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data();
}
}
*row_splits = row_splits_ptrs.To(ctx);
*row_ids = row_ids_ptrs.To(ctx);
}
RaggedShape Append(int32_t axis, int32_t num_srcs, RaggedShape **src) {
NVTX_RANGE(K2_FUNC);
if (num_srcs == 1) return **src;
K2_CHECK_GT(num_srcs, 1);
if (axis == 1) {
RaggedShape temp = Stack(axis, num_srcs, src);
return RemoveAxis(temp, axis);
}
K2_CHECK_EQ(axis, 0) << "Append() with axis > 1 not yet supported";
int32_t num_axes = src[0]->NumAxes();
ContextPtr c = src[0]->Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes, src[i]->NumAxes());
K2_CHECK(IsCompatible(*src[0], *src[i]));
}
// `offsets` will be on CPU for now.
Array2<int32_t> offsets = GetOffsets(num_srcs, src);
auto offsets_acc = offsets.Accessor();
std::vector<int32_t> tot_sizes_out(num_axes);
for (int32_t axis = 0; axis < num_axes; ++axis)
tot_sizes_out[axis] = offsets_acc(axis + 1, num_srcs);
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.data());
Array2<int32_t *> src_row_splits, src_row_ids;
GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids);
auto src_row_splits_acc = src_row_splits.Accessor(),
src_row_ids_acc = src_row_ids.Accessor();
offsets = offsets.To(c);
offsets_acc = offsets.Accessor(); // on GPU now (if we're using one)
ParallelRunner pr(c);
std::vector<hipStream_t> streams(num_axes);
int32_t num_jobs = num_srcs * 2;
// task_redirects is a device array (if using GPU).
// We have `num_axes - 1` different sets of row_splits/row_ids to
// populate but they have different sizes; the total number of distinct
// sizes is `num_axes`.
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
// populate task_redirects (these allocate blocks of threads roughly
// proportionally to the amount of data to process from this source.
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *offsets = offsets_acc.Row(axis + 1);
// c->GetCudaStream() == stream[axis] as it has been overridden by With
GetTaskRedirect(c, num_srcs, offsets, task_redirects_acc.Row(axis));
}
for (int32_t axis = 0; axis < num_axes - 1; axis++) {
// first set the row-splits.
int32_t **this_src_row_splits = src_row_splits_acc.Row(axis),
**this_src_row_ids = src_row_ids_acc.Row(axis);
int32_t *this_dest_row_splits = ans.RowSplits(axis + 1).Data(),
*this_dest_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *offsets_this_axis = offsets_acc.Row(axis + 1),
*offsets_next_axis = offsets_acc.Row(axis + 2);
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_this_axis[src_idx],
next_offset = offsets_this_axis[src_idx + 1],
this_value_offset = offsets_next_axis[src_idx],
num_rows = next_offset - this_offset;
int32_t *src_row_splits_ptr = this_src_row_splits[src_idx];
// Using <= instead of < below causes threads for different src_idx to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_dest_row_splits[this_offset + thread_idx] =
this_value_offset + src_row_splits_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
{ // set the row-ids
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_next_axis[src_idx],
next_offset = offsets_next_axis[src_idx + 1],
this_value_offset = offsets_this_axis[src_idx],
num_elems = next_offset - this_offset;
int32_t *src_row_ids_ptr = this_src_row_ids[src_idx];
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_dest_row_ids[this_offset + thread_idx] =
this_value_offset + src_row_ids_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis + 1],
target_num_loops = (tot_work > 1000000 ? 4 : 2);
// TODO(haowen): maybe we should launch kernels for row_splits and row_ids
// in different streams
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
}
}
return ans;
}
RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
K2_CHECK(axis >= 0 && axis < src.NumAxes());
// note, `axes_in` is of dim src.NumAxes() - 1.
// Also note: axes_in[i] pertains to the relationship between
// axes i and i+1 in the source.
src.Populate();
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
std::vector<RaggedShapeDim> axes_out(axes_in.size() - 1);
int32_t axes_out_size = static_cast<int32_t>(axes_out.size());
for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i];
if (axis > 0 && axis + 1 < src.NumAxes()) {
axes_out[axis - 1].row_ids =
axes_in[axis - 1].row_ids[axes_in[axis].row_ids];
axes_out[axis - 1].row_splits =
axes_in[axis].row_splits[axes_in[axis - 1].row_splits];
axes_out[axis - 1].cached_tot_size = axes_out[axis - 1].row_ids.Dim();
}
for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1];
return RaggedShape(axes_out);
}
RaggedShape MakeTransposable(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 1) return src;
ContextPtr c = src.Context();
int32_t num_axes = src.NumAxes();
int32_t max_size = src.MaxSize(1);
if (max_size <= 0) return src;
int32_t ans_tot_size1 = max_size * src_dim0;
src.Populate();
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
std::vector<RaggedShapeDim> axes_out(num_axes - 1);
const int32_t *src_row_splits1_data = src.RowSplits(1).Data();
const int32_t *src_row_ids1_data = src.RowIds(1).Data();
{
ParallelRunner pr(c);
RaggedShapeDim &axis1_shape = axes_out[0];
{
// set ans.RowSplits(1);
With w(pr.NewStream());
axis1_shape.row_splits = Range(c, src_dim0 + 1, 0, max_size);
}
{
// set ans.RowIds(1);
With w(pr.NewStream());
axis1_shape.row_ids = Array1<int32_t>(c, ans_tot_size1);
int32_t *row_ids1_data = axis1_shape.row_ids.Data();
axis1_shape.cached_tot_size = ans_tot_size1;
K2_EVAL(
c, ans_tot_size1, lambda_set_row_ids1,
(int32_t i)->void { row_ids1_data[i] = i / max_size; });
}
if (num_axes > 2) {
RaggedShapeDim &axis2_shape = axes_out[1];
const int32_t *src_row_splits2_data = src.RowSplits(2).Data();
{
// set ans.RowSplits(2);
With w(pr.NewStream());
axis2_shape.cached_tot_size = src.TotSize(2);
axis2_shape.row_splits = Array1<int32_t>(c, ans_tot_size1 + 1);
int32_t *ans_row_splits2_data = axis2_shape.row_splits.Data();
K2_EVAL(
c, ans_tot_size1 + 1, lambda_set_row_splits2,
(int32_t idx01)->void {
if (idx01 == ans_tot_size1) {
ans_row_splits2_data[idx01] =
src_row_splits2_data[src_tot_size1];
return;
}
int32_t idx0 = idx01 / max_size, idx1 = idx01 % max_size;
int32_t idx0x = src_row_splits1_data[idx0],
idx0x_next = src_row_splits1_data[idx0 + 1];
int32_t num_elems_this_row = idx0x_next - idx0x;
if (idx1 < num_elems_this_row)
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x + idx1];
else
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x_next]; // append empty row
});
}
{
// set ans.RowIds(2);
With w(pr.NewStream());
int32_t tot_size2 = src.TotSize(2);
axis2_shape.row_ids = Array1<int32_t>(c, tot_size2);
int32_t *ans_row_ids2_data = axis2_shape.row_ids.Data();
const int32_t *src_row_ids2_data = src.RowIds(2).Data();
K2_EVAL(
c, tot_size2, lambda_set_row_ids2, (int32_t idx012)->void {
int32_t src_idx01 = src_row_ids2_data[idx012];
int32_t src_idx0 = src_row_ids1_data[src_idx01];
int32_t src_idx1 = src_idx01 - src_row_splits1_data[src_idx0];
ans_row_ids2_data[idx012] = (src_idx0 * max_size) + src_idx1;
});
}
}
}
// copy left row_splits and row_ids;
for (int32_t i = 2; i < num_axes - 1; ++i) axes_out[i] = axes_in[i];
return RaggedShape(axes_out);
}
// transpose axes 0 and 1.
RaggedShape Transpose(RaggedShape &src, Array1<int32_t> *value_indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
ContextPtr c = src.Context();
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 0) return src;
int32_t src_dim1 = src_tot_size1 / src_dim0;
K2_CHECK_EQ(src_tot_size1 % src_dim0, 0)
<< "Transpose(): all dims on axis 0 must be the same.\n"
<< "src_tot_size1: " << src_tot_size1 << "\n"
<< "src_dim0: " << src_dim0 << ", array is: " << src;
K2_DCHECK(
Equal(src.RowSplits(1), Range(c, src.RowSplits(1).Dim(), 0, src_dim1)))
<< " Expected row-splits to be evenly spaced: " << src.RowSplits(1);
RaggedShape src_no_axis0 = RemoveAxis(src, 0);
K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1);
// `renumbering` is a `new2old` map, that maps from the first index in
// src_no_axis0_renumbered
// to the first index into src_no_axis0.
Array1<int32_t> renumbering(c, src_tot_size1);
int32_t *renumbering_data = renumbering.Data();
K2_EVAL(
c, src_tot_size1, lambda_set_renumbering, (int32_t i)->void {
int32_t j = i % src_dim0, k = i / src_dim0, i_old = j * src_dim1 + k;
renumbering_data[i] = i_old;
});
RaggedShape src_no_axis0_renumbered =
Index(src_no_axis0, renumbering, value_indexes);
int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1,
row_ids_dim = src_tot_size1;
std::vector<RaggedShapeDim> ans_axis0(1);
Array1<int32_t> mem(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, row_splits_dim + row_ids_dim, lambda_set_row_info, (int32_t i)->void {
int32_t val;
if (i >= row_splits_dim) {
// row_ids
int32_t elem_idx = i - row_splits_dim;
val = elem_idx / src_dim0;
} else {
// row_splits
int32_t row_idx = i;
val = row_idx * src_dim0;
}
mem_data[i] = val;
});
ans_axis0[0].row_splits = mem.Range(0, row_splits_dim);
ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim);
ans_axis0[0].cached_tot_size = row_ids_dim;
RaggedShape temp(ans_axis0);
return ComposeRaggedShapes(temp, src_no_axis0_renumbered);
}
RaggedShape Stack(int32_t axis, int32_t num_srcs, RaggedShape **src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
K2_CHECK(axis >= 0 && axis <= 1);
ContextPtr c = src[0]->Context();
int32_t num_axes = src[0]->NumAxes();
// Check if they have the same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes, src[i]->NumAxes());
K2_CHECK(c->IsCompatible(*src[i]->Context()));
}
std::vector<RaggedShape> unsqueezed = UnsqueezeParallel(num_srcs, src, 0);
std::vector<RaggedShape *> unsqueezed_ptrs(num_srcs);
for (int32_t i = 0; i < num_srcs; i++) unsqueezed_ptrs[i] = &(unsqueezed[i]);
RaggedShape ans = Append(0, num_srcs, unsqueezed_ptrs.data());
// Transpose will check if all src->Dim0() has the same value.
if (axis == 1) ans = Transpose(ans);
return ans;
}
RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) {
NVTX_RANGE(K2_FUNC);
// row_splits= [
Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems);
Array1<int32_t> row_ids(c, num_elems, 0);
return RaggedShape2(&row_splits, &row_ids, num_elems);
}
RaggedShape RegularRaggedShape(ContextPtr &c, int32_t dim0, int32_t dim1) {
NVTX_RANGE(K2_FUNC);
Array1<int32_t> row_splits = Range<int32_t>(c, dim0 + 1, 0, dim1);
int32_t *row_splits_data = row_splits.Data();
Array1<int32_t> row_ids(c, dim0 * dim1);
int32_t *row_ids_data = row_ids.Data();
K2_EVAL2(
c, dim0, dim1, lambda_set_row_ids,
(int32_t i, int32_t j)->void { row_ids_data[i * dim1 + j] = i; });
return RaggedShape2(&row_splits, &row_ids, dim0 * dim1);
}
Ragged<int32_t> GetCountsPartitioned(Ragged<int32_t> &src,
RaggedShape &ans_ragged_shape) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_EQ(ans_ragged_shape.NumAxes(), 2);
K2_CHECK(IsCompatible(src, ans_ragged_shape));
K2_CHECK_EQ(src.Dim0(), ans_ragged_shape.Dim0());
const Array1<int32_t> &values = src.values;
const Array1<int32_t> &row_splits = ans_ragged_shape.RowSplits(1);
int32_t n = ans_ragged_shape.NumElements();
Array1<int32_t> counts = GetCounts(values, n);
return Ragged<int32_t>(ans_ragged_shape, counts);
}
static Array1<int32_t> GetTransposeReorderingCpu(Ragged<int32_t> &src,
int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
std::vector<std::vector<int32_t>> column_indexes(num_cols); // [column][row]
const int32_t *values_data = src.values.Data();
int32_t n = src.values.Dim();
for (int32_t i = 0; i != n; ++i) {
int32_t bucket = values_data[i];
column_indexes[bucket].push_back(i);
}
Array1<int32_t> ans(src.Context(), n);
int32_t *ans_data = ans.Data();
for (int32_t i = 0; i != num_cols; ++i) {
std::copy(column_indexes[i].begin(), column_indexes[i].end(), ans_data);
ans_data += column_indexes[i].size();
}
return ans;
}
static Array1<int32_t> GetTransposeReorderingThreeAxesCuda(Ragged<int32_t> &src,
int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 3);
ContextPtr &context = src.Context();
K2_CHECK_EQ(context->GetDeviceType(), kCuda);
const Array1<int32_t> &row_splits1 = src.RowSplits(1);
const int32_t *row_ids2_data = src.RowIds(2).Data();
const int32_t *value_data = src.values.Data();
Array1<int32_t> segments = src.RowSplits(2)[row_splits1];
auto lambda_comp = [=] __device__(int32_t a_idx012,
int32_t b_idx012) -> bool {
int32_t a_col_index = value_data[a_idx012];
int32_t b_col_index = value_data[b_idx012];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// at this point, a_idx012 and b_idx012 belong to the same column;
// then we sort by its row indexes
int32_t a_idx01 = row_ids2_data[a_idx012];
int32_t b_idx01 = row_ids2_data[b_idx012];
if (a_idx01 < b_idx01) return true;
if (a_idx01 > b_idx01) return false;
// at this point, a_idx012 and b_idx012 are duplicate elements
return false; // either true or false is fine
};
std::unique_ptr<mgpu::context_t> mgpu_context =
GetModernGpuAllocator(context);
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
K2_CUDA_SAFE_CALL(mgpu::segmented_sort(ans.Data(), // keys
ans.Dim(), // count
segments.Data(), // segments
segments.Dim() - 1, // num_segments
lambda_comp, *mgpu_context));
return ans;
}
Array1<int32_t> GetTransposeReordering(Ragged<int32_t> &src, int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
ContextPtr &context = src.Context();
if (src.NumAxes() < 2) {
// src is empty
return Array1<int32_t>(context, 0);
}
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) return GetTransposeReorderingCpu(src, num_cols);
K2_CHECK_EQ(device_type, kCuda);
if (src.NumAxes() == 3)
return GetTransposeReorderingThreeAxesCuda(src, num_cols);
const int32_t *row_splits1_data = src.RowSplits(src.NumAxes() - 1).Data();
const int32_t *row_ids1_data = src.RowIds(src.NumAxes() - 1).Data();
const int32_t *value_data = src.values.Data();
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
auto lambda_comp = [=] __device__(int32_t a_idx01, int32_t b_idx01) -> bool {
int32_t a_idx0 = row_ids1_data[a_idx01];
int32_t b_idx0 = row_ids1_data[b_idx01];
int32_t a_col_index = value_data[a_idx01];
int32_t b_col_index = value_data[b_idx01];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// now we have a_col_index == b_col_index
if (a_idx0 < b_idx0) return true; // sort by row indexes
if (a_idx0 > b_idx0) return false;
// now we have a_idx0 == b_idx0 && a_col_index == b_col_index
// this entry is duplicated in the sparse matrix.
return false; // we can return either true or false here.
};
std::unique_ptr<mgpu::context_t> mgpu_context =
GetModernGpuAllocator(context);
K2_CUDA_SAFE_CALL(mgpu::mergesort(ans.Data(), n, lambda_comp, *mgpu_context));
return ans;
}
RaggedShape ChangeSublistSize(RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeDim axes).
std::vector<RaggedShapeDim> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Axes()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1),
src_num_elems = src.TotSize(last_axis),
num_elems = src_num_elems + size_delta * num_rows;
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
ans_axes.back().row_ids = Array1<int32_t>(c, num_elems);
ans_axes.back().cached_tot_size = num_elems;
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(),
*src_row_ids_data = src.RowIds(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data(),
*row_ids_data = ans_axes.back().row_ids.Data();
{
ParallelRunner pr(c);
{
With w(pr.NewStream());
K2_EVAL(
c, num_rows + 1, lambda_set_row_splits, (int32_t idx0)->void {
row_splits_data[idx0] =
src_row_splits_data[idx0] + size_delta * idx0;
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, src_num_elems, lambda_set_row_ids1, (int32_t src_idx01)->void {
int32_t src_idx0 = src_row_ids_data[src_idx01],
src_idx0x = src_row_splits_data[src_idx0],
src_idx1 = src_idx01 - src_idx0x,
new_idx0x = row_splits_data[src_idx0],
new_idx0x_next = row_splits_data[src_idx0 + 1],
new_idx01 = new_idx0x + src_idx1;
// it's only necessary to guard the next statement with in 'if'
// because size_delta might be negative.
if (new_idx01 < new_idx0x_next) row_ids_data[new_idx01] = src_idx0;
});
}
if (size_delta > 0) {
// This sets the row-ids that are not set by lambda_set_row_ids1.
With w(pr.NewStream());
K2_EVAL(
c, num_rows * size_delta, lambda_set_row_ids2, (int32_t i)->void {
int32_t idx0 = i / size_delta, n = i % size_delta,
next_idx0 = idx0 + 1;
// The following formula is the same as the one in
// lambda_set_row_splits; we want to compute the new value of
// row_splits_data[next_idx0] without waiting for that kernel to
// terminate.
int32_t next_idx0x =
src_row_splits_data[next_idx0] + size_delta * next_idx0;
row_ids_data[next_idx0x - 1 - n] = idx0;
});
}
// make the ParallelRunner go out of scope (should do this before any
// validation code that gets invoked by the constructor of RaggedShape
// below).
}
return RaggedShape(ans_axes);
}
// TODO(dan): this could definitely be made more efficient.
RaggedShape ChangeSublistSizePinned(RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeDim axes).
std::vector<RaggedShapeDim> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Axes()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1);
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data();
K2_EVAL(c, num_rows, lambda_set_row_sizes, (int32_t idx0) -> void {
int32_t orig_size = src_row_splits_data[idx0 + 1] -
src_row_splits_data[idx0],
size;
if (orig_size == 0 || orig_size + size_delta <= 0)
size = 0;
else
size = orig_size + size_delta;
row_splits_data[idx0] = size;
});
ExclusiveSum(ans_axes.back().row_splits, &ans_axes.back().row_splits);
ans_axes.back().row_ids = Array1<int32_t>(c, ans_axes.back().row_splits.Back());
RowSplitsToRowIds(ans_axes.back().row_splits,
&ans_axes.back().row_ids);
ans_axes.back().cached_tot_size = ans_axes.back().row_ids.Dim();
return RaggedShape(ans_axes);
}
RaggedShape Prefix(RaggedShape &src, int32_t n) {
NVTX_RANGE(K2_FUNC);
int32_t dim0 = src.Dim0();
K2_CHECK(n >= 0 && n <= dim0);
src.Populate();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
std::vector<RaggedShapeDim> axes_out(axes_in.size());
int32_t row_end = n;
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits = axes_in[axis].row_splits.Arange(0, row_end + 1);
// notice here we may do a memory copy from GPU to CPU.
row_end = axes_in[axis].row_splits[row_end];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
return RaggedShape(axes_out);
}
std::vector<RaggedShape> GetPrefixes(RaggedShape &src,
const std::vector<int32_t> &sizes) {
NVTX_RANGE(K2_FUNC);
src.Populate();
int32_t dim0 = src.Dim0();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
ContextPtr &c = src.Context();
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
// get those row_end elements at each axis.
int32_t ans_size = static_cast<int32_t>(sizes.size());
Array1<int32_t> row_ends(c, num_axes * ans_size);
Array1<int32_t> sizes_array(GetCpuContext(), sizes);
Array1<int32_t> indexes = row_ends.Arange(0, ans_size);
indexes.CopyFrom(sizes_array);
for (int32_t axis = 1; axis < num_axes; ++axis) {
Array1<int32_t> curr_axis_row_ends =
row_ends.Arange(axis * ans_size, (axis + 1) * ans_size);
axes_in[axis - 1].row_splits.Index(indexes, &curr_axis_row_ends);
indexes = curr_axis_row_ends;
}
row_ends = row_ends.To(GetCpuContext());
std::vector<RaggedShape> ans(ans_size);
for (int32_t i = 0; i != ans_size; ++i) {
std::vector<RaggedShapeDim> axes_out(axes_in.size());
int32_t row_end = row_ends[i];
K2_CHECK(row_end >= 0 && row_end <= dim0);
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits =
axes_in[axis].row_splits.Arange(0, row_end + 1);
row_end = row_ends[i + (axis + 1) * ans_size];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
ans[i] = RaggedShape(axes_out, false);
}
return ans;
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &renumbering) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(renumbering.NumOldElems(), src.NumElements());
// Make sure final row-ids are populated.
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeDim> axes = src.Axes();
axes.back().row_ids = axes.back().row_ids[renumbering.New2Old()];
axes.back().row_splits = renumbering.Old2New()[axes.back().row_splits];
axes.back().cached_tot_size = axes.back().row_ids.Dim();
return RaggedShape(axes);
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &r_before_last,
Renumbering &r_last) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(r_before_last.NumOldElems(), src.TotSize(src.NumAxes() - 2));
K2_CHECK_EQ(r_last.NumOldElems(), src.NumElements());
// Make sure final and before-final row-ids are populated.
src.RowIds(src.NumAxes() - 2);
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeDim> axes = src.Axes();
// Suppose this shape has 3 axes (0,1,2). Its NumAxes()==3;
// axes.size()==2.
// r_before_last deals with the numbering on axis 1.
// r_last deals with the numbering on axis 2.
RaggedShapeDim &before_last = axes[axes.size() - 2],
&last = axes[axes.size() - 1];
int32_t new_tot_size1 = r_before_last.NumNewElems(),
new_tot_size2 = r_last.NumNewElems();
ContextPtr c = src.Context();
Array1<int32_t> before_last_row_ids(c, new_tot_size1),
last_row_splits(c, new_tot_size1 + 1), last_row_ids(c, new_tot_size2);
// The variable names below use this 3-axis assumption but the
// code will work for greater number of axes.
int32_t *new_row_ids1_data = before_last_row_ids.Data(),
*new_row_splits2_data = last_row_splits.Data(),
*new_row_ids2_data = last_row_ids.Data();
const int32_t *old_row_ids1_data = before_last.row_ids.Data(),
*old_row_splits2_data = last.row_splits.Data(),
*old_row_ids2_data = last.row_ids.Data();
const int32_t *idx01_new2old_data = r_before_last.New2Old().Data(),
*idx01_old2new_data = r_before_last.Old2New().Data(),
*idx012_new2old_data = r_last.New2Old().Data(),
*idx012_old2new_data = r_last.Old2New().Data();
ParallelRunner pr(c);
{
With w(pr.NewStream());
// before_last.row_splits maps from idx0 -> idx01 (contains idx01's). Map
// the idx01's; the idx0s stay the same.
before_last.row_splits = r_before_last.Old2New()[before_last.row_splits];
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size1 + 1, lambda_set_row_ids1_and_row_splits2,
(int32_t new_idx01)->void {
// row_ids1 maps from idx01 -> idx0. Select subset of
// idx01's; the idx0 stays the same.
int32_t old_idx01 = idx01_new2old_data[new_idx01];
if (new_idx01 < new_tot_size1)
new_row_ids1_data[new_idx01] = old_row_ids1_data[old_idx01];
// row_splits2 maps from idx01 -> idx012. Map both indexes.
// idx01's; the idx0 stays the same.
new_row_splits2_data[new_idx01] =
idx012_old2new_data[old_row_splits2_data[old_idx01]];
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size2, lambda_set_row_ids2, (int32_t new_idx012)->void {
// row_ids2 maps from idx012 -> idx01. Both must be mapped.
int32_t old_idx012 = idx012_new2old_data[new_idx012];
int32_t old_idx01 = old_row_ids2_data[old_idx012],
new_idx01 = idx01_old2new_data[old_idx01];
new_row_ids2_data[new_idx012] = new_idx01;
});
}
before_last.row_ids = before_last_row_ids;
before_last.cached_tot_size = new_tot_size1;
last.row_splits = last_row_splits;
last.row_ids = last_row_ids;
last.cached_tot_size = new_tot_size2;
return RaggedShape(axes);
}
RaggedShape EmptyRaggedShape(ContextPtr &c, int32_t num_axes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeDim> axes(num_axes - 1);
axes[0].row_splits = Array1<int32_t>(c, 1, 0);
// row_ids will be the empty vector, with context `c`.
axes[0].row_ids = axes[0].row_splits.Range(0, 0);
axes[0].cached_tot_size = 0;
for (int32_t a = 1; a + 1 < num_axes; a++) axes[a] = axes[0];
return RaggedShape(axes);
}
Array1<int32_t> GetDecreasingSizeOrder(RaggedShape &shape) {
ContextPtr c = shape.Context();
Array1<int32_t> sizes = RowSplitsToSizes(shape.RowSplits(1));
Array1<int32_t> index_map;
Sort<int32_t, GreaterThan<int32_t>>(&sizes, &index_map);
return index_map;
}
} // namespace k2
| 69fb3ce5569b27016c2cea6bc2e339855f8722d0.cu | /**
* @brief
* ragged_ops
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
* Mobvoi Inc. (authors: Fangjun Kuang)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <memory>
#include <vector>
#include "cub/cub.cuh"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/moderngpu_allocator.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_ops.h"
#include "moderngpu/kernel_mergesort.hxx"
namespace {
/*
A helper function used in RaggedShape3;
if both first and second are non-NULL, it will check if the context of them
is compatible or not and return that context if compatible;
if one of them is NULL, returns the other one's context.
*/
static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first,
const k2::Array1<int32_t> *second) {
K2_CHECK(first != nullptr || second != nullptr)
<< "At least one of first and second must be non-NULL";
if (first == nullptr)
return second->Context();
else if (second == nullptr)
return first->Context();
else
return k2::GetContext(*first, *second);
}
} // namespace
namespace k2 {
RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes,
int32_t max_num_axes, int32_t min_num_elements,
int32_t max_num_elements) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetCpuContext();
K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes &&
min_num_elements >= 0 && max_num_elements >= min_num_elements);
int32_t num_axes = RandInt(min_num_axes, max_num_axes);
int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements);
bool done_repeats = false;
std::vector<RaggedShapeDim> axes(num_axes - 1);
for (int32_t axis = num_axes - 2; axis >= 0; axis--) {
// this axis will have row_ids of length num_elements and
// row_splits of length to be determined.
int32_t cur_row_split = 0;
std::vector<int32_t> row_splits_vec;
std::vector<int32_t> row_ids_vec;
row_splits_vec.push_back(cur_row_split);
// The reason for "|| RandInt(0, 2) == 0)" is so that even if there
// are no elements we can still potentially generate empty row-splits.
while (cur_row_split < num_elements || RandInt(0, 2) == 0) {
int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split);
cur_row_split += split_size;
// sometimes we have a bunch of empty rows in a row (this will test out
// more of the code), so here we generate a bunch of empty rows, but we
// just do this only once (that's why we declare `done_repeats` here).
if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) {
int32_t num_repeats = RandIntGeometric(1, 128);
row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split);
// don't need to set `row_ids_vec` as there's no element.
done_repeats = true;
}
row_splits_vec.push_back(cur_row_split);
if (set_row_ids) {
int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2;
row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row);
}
}
axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec);
if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec);
axes[axis].cached_tot_size = num_elements;
num_elements = axes[axis].row_splits.Dim() - 1;
}
// RaggedShape(axes, true) will check the returned RaggedShape for
// consistency.
return RaggedShape(axes, true);
}
RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids,
int32_t cached_tot_size) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(row_splits != nullptr || row_ids != nullptr)
<< "At least one of row_splits and row_ids must be defined";
ContextPtr ctx = ::GetContext(row_splits, row_ids);
if (cached_tot_size != -1) {
if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim());
if (row_splits != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size, row_splits->Back()) << "Bad row splits is: "
<< *row_splits;
}
}
std::vector<RaggedShapeDim> axes(1);
if (row_splits != nullptr) {
axes[0].row_splits = *row_splits;
} else {
// we need to work out row_splits as we always require row_splits is not
// empty for RaggedShape. Note here we suppose the last element in row_ids
// is num_rows - 1, i.e. there's no empty rows after row `row_ids[-1]`.
int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1;
Array1<int32_t> row_splits_array(ctx, num_rows + 1);
RowIdsToRowSplits(*row_ids, &row_splits_array);
axes[0].row_splits = row_splits_array;
}
if (row_ids != nullptr) axes[0].row_ids = *row_ids;
if (cached_tot_size == -1) {
cached_tot_size =
row_ids != nullptr ? row_ids->Dim() : axes[0].row_splits.Back();
}
axes[0].cached_tot_size = cached_tot_size;
// note below line will check if row_splits and row_ids are valid and agree
// with each other.
return RaggedShape(axes);
}
RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) {
NVTX_RANGE(K2_FUNC);
if (a.NumElements() != b.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements()
<< " vs. " << b.Dim0();
}
const auto &a_axes = a.Axes();
const auto &b_axes = b.Axes();
std::vector<RaggedShapeDim> axes(a_axes.size() + b_axes.size());
std::size_t a_size = a_axes.size(), b_size = b_axes.size();
for (std::size_t i = 0; i < a_size; ++i) axes[i] = a_axes[i];
for (std::size_t i = 0; i < b_size; ++i) axes[i + a_size] = b_axes[i];
return RaggedShape(axes);
}
RaggedShape RaggedShape3(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape1 = RaggedShape2(row_splits1, row_ids1, cached_tot_size1);
Array1<int32_t> temp_array;
if (row_splits2 == nullptr) {
K2_CHECK_NE(row_ids2, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array = Array1<int32_t>(row_ids2->Context(), shape1.NumElements() + 1);
row_splits2 = &temp_array;
RowIdsToRowSplits(*row_ids2, row_splits2);
}
return ComposeRaggedShapes(
shape1, RaggedShape2(row_splits2, row_ids2, cached_tot_size2));
}
RaggedShape RaggedShape4(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2,
Array1<int32_t> *row_splits3,
Array1<int32_t> *row_ids3, int32_t cached_tot_size3) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape12 = RaggedShape3(row_splits1, row_ids1, cached_tot_size1,
row_splits2, row_ids2, cached_tot_size2);
Array1<int32_t> temp_array;
if (row_splits3 == nullptr) {
K2_CHECK_NE(row_ids3, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array =
Array1<int32_t>(row_ids3->Context(), shape12.NumElements() + 1);
row_splits3 = &temp_array;
RowIdsToRowSplits(*row_ids3, row_splits3);
}
return ComposeRaggedShapes(
shape12, RaggedShape2(row_splits3, row_ids3, cached_tot_size3));
}
RaggedShape RaggedShapeFromTotSizes(ContextPtr c, int32_t num_axes,
int32_t *tot_sizes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeDim> axes(num_axes - 1);
// In future we might choose to allocate everything in one big array, to avoid
// multiple allocations, but for now just do it the simple way.
for (int32_t axis = 1; axis < num_axes; ++axis) {
axes[axis - 1].row_splits = Array1<int32_t>(c, tot_sizes[axis - 1] + 1);
axes[axis - 1].row_ids = Array1<int32_t>(c, tot_sizes[axis]);
axes[axis - 1].cached_tot_size = tot_sizes[axis];
}
// Not check here as we did not set the values of row_splits and row_ids
return RaggedShape(axes, false);
}
Array1<int32_t *> GetRowSplitsPtr(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
std::vector<int32_t *> row_splits_start(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
Array1<int32_t> &cur_splits = src.RowSplits(i);
row_splits_start[i - 1] = cur_splits.Data();
}
return Array1<int32_t *>(src.Context(), row_splits_start);
}
// See declaration in ragged.h for documentation of its purpose and interface.
RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) {
// If axis == 0, initial row_splits and row_ids will look like the following,
// if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes
// would be pushed forward.
//
// If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would
// look like the following, if for instance the src.TotSize(axis) = 8:
// [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ].
//
// The reason why the code is different for axis == 0, is that in that case we
// are really making visible an "implicit" axis of the input `src`; we could
// call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's
// row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from
// an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 <
// Dim0().
NVTX_RANGE(K2_FUNC);
ContextPtr c = src.Context();
K2_CHECK(axis >= 0 && axis <= src.NumAxes());
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
int32_t num_axes_in = src.NumAxes();
// Note: in RaggedShape, the vector of RaggedShapeDim is of length
// num_axes - 1, so the output will have one more axis than the input.
std::vector<RaggedShapeDim> axes_out(num_axes_in);
int32_t row_splits_dim, row_ids_dim;
Array1<int32_t> mem;
if (axis == 0) {
row_splits_dim = 2; // e.g. [ 0 5 ]
row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ]
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem, (int32_t i)->void {
if (i == 1)
mem_data[i] = row_ids_dim;
else
mem_data[i] = 0;
});
} else {
int32_t tot_size = src.TotSize(axis);
row_splits_dim = tot_size + 1;
row_ids_dim = tot_size;
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem2,
(int32_t i)->void { mem_data[i] = i % (tot_size + 1); });
}
axes_out[axis].row_splits = mem.Range(0, row_splits_dim);
axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim);
axes_out[axis].cached_tot_size = row_ids_dim;
for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i];
// Note: the returned array has `num_axes_in + 1` axes, so its
// array of RaggedShapeDim is of length `num_axes_in`.
for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1];
return RaggedShape(axes_out);
}
std::vector<RaggedShape> UnsqueezeParallel(int32_t num_srcs, RaggedShape **src,
int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(axis, 0);
std::vector<RaggedShape> ans;
if (num_srcs == 0) return ans;
ans.reserve(num_srcs);
ContextPtr c = src[0]->Context();
std::vector<int32_t> all_row_splits_vec(num_srcs * 2);
int32_t max_dim = 0;
// all_row_splits_vec will contain [ 0 d0 0 d1 0 d2 .. ]
// where d0 == src[0]->Dim0(), d1 == src[1]->Dim0()..
for (int32_t i = 0; i < num_srcs; i++) {
int32_t this_dim0 = src[i]->Dim0();
if (this_dim0 > max_dim) max_dim = this_dim0;
all_row_splits_vec[i * 2] = 0;
all_row_splits_vec[i * 2 + 1] = this_dim0;
}
Array1<int32_t> all_row_splits(c, all_row_splits_vec);
Array1<int32_t> all_row_ids(c, max_dim, 0);
for (int32_t i = 0; i < num_srcs; i++) {
int32_t num_axes = src[i]->NumAxes();
std::vector<RaggedShapeDim> axes;
axes.reserve(num_axes); // note, the size of the `axes` of a RaggedShape
// is its NumAxes() - 1.
axes.resize(1);
int32_t this_old_dim0 = all_row_splits_vec[i * 2 + 1];
axes[0].row_splits = all_row_splits.Range(i * 2, 2);
axes[0].row_ids = all_row_ids.Range(0, this_old_dim0);
axes[0].cached_tot_size = this_old_dim0;
axes.insert(axes.end(), src[i]->Axes().begin(), src[i]->Axes().end());
ans.emplace_back(axes);
}
return ans;
}
/*
Internal function used in Index(), which gets certain arrays used internally.
@param [in] src Source shape to be indexed
@param [in] src_row_splits_ptrs Result of calling GetRowSplitsPtr(src)
@param [in] new2old Array of indexes into axis 0 of src
@param [out] old_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()), whose (i,j)'th
element contains the offset into axis i of `src`
where the slice of `src` with index0 (i.e. index
into 0'th-axis of `src`) equal to `new2old[j]`
begins.
@param [out] new_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()+1), whose (i,j)'th
element contains the offset into axis i of `ans`
where the data in `ans` corresponding to
index j (i.e. index j into axis 0 of `ans`) begins.
Note: `ans` is the result of Index(), with
ans.Dim0() == new2old.Dim().
*/
inline void GetOldAndNewOffsets(RaggedShape &src,
const Array1<int32_t *> &src_row_splits_ptrs,
const Array1<int32_t> &new2old,
Array2<int32_t> *old_offsets,
Array2<int32_t> *new_offsets) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(src.NumAxes() > 1);
ContextPtr &c = src.Context();
int32_t num_axes = src.NumAxes(), ans_dim0 = new2old.Dim();
int32_t *const *src_row_splits_ptrs_data = src_row_splits_ptrs.Data();
const int32_t *new2old_data = new2old.Data();
*old_offsets = Array2<int32_t>(c, num_axes, ans_dim0);
*new_offsets = Array2<int32_t>(c, num_axes, ans_dim0 + 1);
auto old_offsets_acc = old_offsets->Accessor(),
new_offsets_acc = new_offsets->Accessor();
// Set old_offsets; and for now, set new_offsets to the corresponding
// sizes of the output slices.
K2_EVAL(
c, ans_dim0, lambda_set_offsets, (int32_t i)->void {
// 0 <= i < ans_dim0
int32_t old_offset = new2old_data[i], old_offset_next = old_offset + 1;
for (int32_t axis = 0;; axis++) {
old_offsets_acc(axis, i) = old_offset;
// Below, 'new_offsets_acc' currently contains the size rather
// than the offset; we need to do exclusive-sum.
new_offsets_acc(axis, i) = old_offset_next - old_offset;
if (axis + 1 == num_axes) return;
old_offset = src_row_splits_ptrs_data[axis][old_offset];
old_offset_next = src_row_splits_ptrs_data[axis][old_offset_next];
}
});
ExclusiveSum(*new_offsets, new_offsets);
}
RaggedShape Index(RaggedShape &src, const Array1<int32_t> &new2old,
Array1<int32_t> *elem_indexes /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = src.Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
K2_CHECK(IsCompatible(src, new2old));
int32_t num_axes = src.NumAxes(), src_dim0 = src.Dim0(),
ans_dim0 = new2old.Dim();
if (ans_dim0 == 0) {
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, 0);
return EmptyRaggedShape(c, num_axes);
}
Array1<int32_t *> src_row_splits_ptrs = GetRowSplitsPtr(src);
Array2<int32_t> old_offsets, // num_axes by ans_dim0
new_offsets; // num_axes by (ans_dim0 + 1).
GetOldAndNewOffsets(src, src_row_splits_ptrs, new2old, &old_offsets,
&new_offsets);
Array1<int32_t> tot_sizes_out =
Array1<int32_t>(new_offsets.Col(ans_dim0)).To(GetCpuContext());
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, tot_sizes_out.Back());
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.Data());
auto old_offsets_acc = old_offsets.Accessor(),
new_offsets_acc = new_offsets.Accessor();
ParallelRunner pr(c);
std::vector<cudaStream_t> streams(num_axes);
int32_t num_jobs = ans_dim0 * 2; // note: this formula is not a heuristic;
// it's how TaskRedirect works..
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *new_offsets_ptr = new_offsets_acc.Row(axis);
TaskRedirect *task_redirect_ptr = task_redirects_acc.Row(axis);
GetTaskRedirect(c, ans_dim0, new_offsets_ptr, task_redirect_ptr);
}
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
{
int32_t *this_new_row_splits = ans.RowSplits(axis + 1).Data();
const int32_t *this_old_row_splits = src.RowSplits(axis + 1).Data();
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// 0 <= ans_idx0 < ans_dim0; and 0 <= thread_idx < num_threads,
// num_threads may have any value > 0 as far as this code is concerned.
//
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_splits array are as given by
// the `axis`'th row of `offsets`; the values in the array
// are related to those in the `axis+1`'th row.
int32_t this_new_offset = new_offsets_acc(axis, ans_idx0),
next_new_offset = new_offsets_acc(axis, ans_idx0 + 1),
num_rows = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis, ans_idx0),
value_offset = new_offsets_acc(axis + 1, ans_idx0) -
old_offsets_acc(axis + 1, ans_idx0);
// Using <= instead of < below causes threads for different ans_idx0 to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_new_row_splits[this_new_offset + thread_idx] =
value_offset + this_old_row_splits[this_old_offset + thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
}
{
int32_t *this_new_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *this_old_row_ids = src.RowIds(axis + 1).Data();
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
if (elem_indexes == nullptr || axis != num_axes - 2) {
// If we don't need to write to `elem_indexes`... [caution: the next
// code block differs from this only by a statement that sets
// `elem_indexes` and they should be kept in sync.]
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_ids array are as given by
// the `axis+1`'th row of `offsets`; the values in the array
// are related to those in the `axis`'th row.
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
} else {
int32_t *elem_indexes_data = elem_indexes->Data();
// We need to write to `elem_indexes`. Note: this code block only
// differs from the above by an extra statement regarding
// `elem_indexes`. Comments have been removed.
auto lambda_set_row_ids_and_elem_indexes =
[=] __host__ __device__(int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
elem_indexes_data[this_new_offset + thread_idx] =
this_old_offset + thread_idx;
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops,
lambda_set_row_ids_and_elem_indexes);
}
}
}
#if !defined(NDEBUG)
ans.Check();
#endif
return ans;
}
Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) {
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
ContextPtr ctx = src[0]->Context();
Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1);
int32_t *src_offsets_data = src_offsets.Data();
int32_t src_offsets_stride0 = src_offsets.ElemStride0();
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
for (int32_t axis = 0; axis <= num_axes_in; ++axis) {
int32_t sum = 0;
for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column
src_offsets_data[axis * src_offsets_stride0 + i] = sum;
if (i < num_srcs) {
sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1));
}
}
}
return src_offsets;
}
void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits,
Array1<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
src.Populate();
std::vector<int32_t *> row_splits_ptrs(axes - 1);
std::vector<int32_t *> row_ids_ptrs(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
row_splits_ptrs[i - 1] = src.RowSplits(i).Data();
row_ids_ptrs[i - 1] = src.RowIds(i).Data();
}
ContextPtr ctx = src.Context();
*row_splits = Array1<int32_t *>(ctx, row_splits_ptrs);
*row_ids = Array1<int32_t *>(ctx, row_ids_ptrs);
}
void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src,
Array2<int32_t *> *row_splits,
Array2<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
K2_CHECK_GE(num_axes_in, 2);
ContextPtr ctx = src[0]->Context();
// check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
int32_t **splits_ptr_data = row_splits_ptrs.Data();
int32_t **ids_ptr_data = row_ids_ptrs.Data();
int32_t stride0 = row_splits_ptrs.ElemStride0();
K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0());
for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) {
for (int32_t i = 0; i != num_srcs; ++i) {
splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data();
ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data();
}
}
*row_splits = row_splits_ptrs.To(ctx);
*row_ids = row_ids_ptrs.To(ctx);
}
RaggedShape Append(int32_t axis, int32_t num_srcs, RaggedShape **src) {
NVTX_RANGE(K2_FUNC);
if (num_srcs == 1) return **src;
K2_CHECK_GT(num_srcs, 1);
if (axis == 1) {
RaggedShape temp = Stack(axis, num_srcs, src);
return RemoveAxis(temp, axis);
}
K2_CHECK_EQ(axis, 0) << "Append() with axis > 1 not yet supported";
int32_t num_axes = src[0]->NumAxes();
ContextPtr c = src[0]->Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes, src[i]->NumAxes());
K2_CHECK(IsCompatible(*src[0], *src[i]));
}
// `offsets` will be on CPU for now.
Array2<int32_t> offsets = GetOffsets(num_srcs, src);
auto offsets_acc = offsets.Accessor();
std::vector<int32_t> tot_sizes_out(num_axes);
for (int32_t axis = 0; axis < num_axes; ++axis)
tot_sizes_out[axis] = offsets_acc(axis + 1, num_srcs);
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.data());
Array2<int32_t *> src_row_splits, src_row_ids;
GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids);
auto src_row_splits_acc = src_row_splits.Accessor(),
src_row_ids_acc = src_row_ids.Accessor();
offsets = offsets.To(c);
offsets_acc = offsets.Accessor(); // on GPU now (if we're using one)
ParallelRunner pr(c);
std::vector<cudaStream_t> streams(num_axes);
int32_t num_jobs = num_srcs * 2;
// task_redirects is a device array (if using GPU).
// We have `num_axes - 1` different sets of row_splits/row_ids to
// populate but they have different sizes; the total number of distinct
// sizes is `num_axes`.
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
// populate task_redirects (these allocate blocks of threads roughly
// proportionally to the amount of data to process from this source.
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *offsets = offsets_acc.Row(axis + 1);
// c->GetCudaStream() == stream[axis] as it has been overridden by With
GetTaskRedirect(c, num_srcs, offsets, task_redirects_acc.Row(axis));
}
for (int32_t axis = 0; axis < num_axes - 1; axis++) {
// first set the row-splits.
int32_t **this_src_row_splits = src_row_splits_acc.Row(axis),
**this_src_row_ids = src_row_ids_acc.Row(axis);
int32_t *this_dest_row_splits = ans.RowSplits(axis + 1).Data(),
*this_dest_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *offsets_this_axis = offsets_acc.Row(axis + 1),
*offsets_next_axis = offsets_acc.Row(axis + 2);
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_this_axis[src_idx],
next_offset = offsets_this_axis[src_idx + 1],
this_value_offset = offsets_next_axis[src_idx],
num_rows = next_offset - this_offset;
int32_t *src_row_splits_ptr = this_src_row_splits[src_idx];
// Using <= instead of < below causes threads for different src_idx to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_dest_row_splits[this_offset + thread_idx] =
this_value_offset + src_row_splits_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
{ // set the row-ids
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_next_axis[src_idx],
next_offset = offsets_next_axis[src_idx + 1],
this_value_offset = offsets_this_axis[src_idx],
num_elems = next_offset - this_offset;
int32_t *src_row_ids_ptr = this_src_row_ids[src_idx];
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_dest_row_ids[this_offset + thread_idx] =
this_value_offset + src_row_ids_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis + 1],
target_num_loops = (tot_work > 1000000 ? 4 : 2);
// TODO(haowen): maybe we should launch kernels for row_splits and row_ids
// in different streams
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
}
}
return ans;
}
RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
K2_CHECK(axis >= 0 && axis < src.NumAxes());
// note, `axes_in` is of dim src.NumAxes() - 1.
// Also note: axes_in[i] pertains to the relationship between
// axes i and i+1 in the source.
src.Populate();
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
std::vector<RaggedShapeDim> axes_out(axes_in.size() - 1);
int32_t axes_out_size = static_cast<int32_t>(axes_out.size());
for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i];
if (axis > 0 && axis + 1 < src.NumAxes()) {
axes_out[axis - 1].row_ids =
axes_in[axis - 1].row_ids[axes_in[axis].row_ids];
axes_out[axis - 1].row_splits =
axes_in[axis].row_splits[axes_in[axis - 1].row_splits];
axes_out[axis - 1].cached_tot_size = axes_out[axis - 1].row_ids.Dim();
}
for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1];
return RaggedShape(axes_out);
}
RaggedShape MakeTransposable(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 1) return src;
ContextPtr c = src.Context();
int32_t num_axes = src.NumAxes();
int32_t max_size = src.MaxSize(1);
if (max_size <= 0) return src;
int32_t ans_tot_size1 = max_size * src_dim0;
src.Populate();
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
std::vector<RaggedShapeDim> axes_out(num_axes - 1);
const int32_t *src_row_splits1_data = src.RowSplits(1).Data();
const int32_t *src_row_ids1_data = src.RowIds(1).Data();
{
ParallelRunner pr(c);
RaggedShapeDim &axis1_shape = axes_out[0];
{
// set ans.RowSplits(1);
With w(pr.NewStream());
axis1_shape.row_splits = Range(c, src_dim0 + 1, 0, max_size);
}
{
// set ans.RowIds(1);
With w(pr.NewStream());
axis1_shape.row_ids = Array1<int32_t>(c, ans_tot_size1);
int32_t *row_ids1_data = axis1_shape.row_ids.Data();
axis1_shape.cached_tot_size = ans_tot_size1;
K2_EVAL(
c, ans_tot_size1, lambda_set_row_ids1,
(int32_t i)->void { row_ids1_data[i] = i / max_size; });
}
if (num_axes > 2) {
RaggedShapeDim &axis2_shape = axes_out[1];
const int32_t *src_row_splits2_data = src.RowSplits(2).Data();
{
// set ans.RowSplits(2);
With w(pr.NewStream());
axis2_shape.cached_tot_size = src.TotSize(2);
axis2_shape.row_splits = Array1<int32_t>(c, ans_tot_size1 + 1);
int32_t *ans_row_splits2_data = axis2_shape.row_splits.Data();
K2_EVAL(
c, ans_tot_size1 + 1, lambda_set_row_splits2,
(int32_t idx01)->void {
if (idx01 == ans_tot_size1) {
ans_row_splits2_data[idx01] =
src_row_splits2_data[src_tot_size1];
return;
}
int32_t idx0 = idx01 / max_size, idx1 = idx01 % max_size;
int32_t idx0x = src_row_splits1_data[idx0],
idx0x_next = src_row_splits1_data[idx0 + 1];
int32_t num_elems_this_row = idx0x_next - idx0x;
if (idx1 < num_elems_this_row)
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x + idx1];
else
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x_next]; // append empty row
});
}
{
// set ans.RowIds(2);
With w(pr.NewStream());
int32_t tot_size2 = src.TotSize(2);
axis2_shape.row_ids = Array1<int32_t>(c, tot_size2);
int32_t *ans_row_ids2_data = axis2_shape.row_ids.Data();
const int32_t *src_row_ids2_data = src.RowIds(2).Data();
K2_EVAL(
c, tot_size2, lambda_set_row_ids2, (int32_t idx012)->void {
int32_t src_idx01 = src_row_ids2_data[idx012];
int32_t src_idx0 = src_row_ids1_data[src_idx01];
int32_t src_idx1 = src_idx01 - src_row_splits1_data[src_idx0];
ans_row_ids2_data[idx012] = (src_idx0 * max_size) + src_idx1;
});
}
}
}
// copy left row_splits and row_ids;
for (int32_t i = 2; i < num_axes - 1; ++i) axes_out[i] = axes_in[i];
return RaggedShape(axes_out);
}
// transpose axes 0 and 1.
RaggedShape Transpose(RaggedShape &src, Array1<int32_t> *value_indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
ContextPtr c = src.Context();
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 0) return src;
int32_t src_dim1 = src_tot_size1 / src_dim0;
K2_CHECK_EQ(src_tot_size1 % src_dim0, 0)
<< "Transpose(): all dims on axis 0 must be the same.\n"
<< "src_tot_size1: " << src_tot_size1 << "\n"
<< "src_dim0: " << src_dim0 << ", array is: " << src;
K2_DCHECK(
Equal(src.RowSplits(1), Range(c, src.RowSplits(1).Dim(), 0, src_dim1)))
<< " Expected row-splits to be evenly spaced: " << src.RowSplits(1);
RaggedShape src_no_axis0 = RemoveAxis(src, 0);
K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1);
// `renumbering` is a `new2old` map, that maps from the first index in
// src_no_axis0_renumbered
// to the first index into src_no_axis0.
Array1<int32_t> renumbering(c, src_tot_size1);
int32_t *renumbering_data = renumbering.Data();
K2_EVAL(
c, src_tot_size1, lambda_set_renumbering, (int32_t i)->void {
int32_t j = i % src_dim0, k = i / src_dim0, i_old = j * src_dim1 + k;
renumbering_data[i] = i_old;
});
RaggedShape src_no_axis0_renumbered =
Index(src_no_axis0, renumbering, value_indexes);
int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1,
row_ids_dim = src_tot_size1;
std::vector<RaggedShapeDim> ans_axis0(1);
Array1<int32_t> mem(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, row_splits_dim + row_ids_dim, lambda_set_row_info, (int32_t i)->void {
int32_t val;
if (i >= row_splits_dim) {
// row_ids
int32_t elem_idx = i - row_splits_dim;
val = elem_idx / src_dim0;
} else {
// row_splits
int32_t row_idx = i;
val = row_idx * src_dim0;
}
mem_data[i] = val;
});
ans_axis0[0].row_splits = mem.Range(0, row_splits_dim);
ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim);
ans_axis0[0].cached_tot_size = row_ids_dim;
RaggedShape temp(ans_axis0);
return ComposeRaggedShapes(temp, src_no_axis0_renumbered);
}
RaggedShape Stack(int32_t axis, int32_t num_srcs, RaggedShape **src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
K2_CHECK(axis >= 0 && axis <= 1);
ContextPtr c = src[0]->Context();
int32_t num_axes = src[0]->NumAxes();
// Check if they have the same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes, src[i]->NumAxes());
K2_CHECK(c->IsCompatible(*src[i]->Context()));
}
std::vector<RaggedShape> unsqueezed = UnsqueezeParallel(num_srcs, src, 0);
std::vector<RaggedShape *> unsqueezed_ptrs(num_srcs);
for (int32_t i = 0; i < num_srcs; i++) unsqueezed_ptrs[i] = &(unsqueezed[i]);
RaggedShape ans = Append(0, num_srcs, unsqueezed_ptrs.data());
// Transpose will check if all src->Dim0() has the same value.
if (axis == 1) ans = Transpose(ans);
return ans;
}
RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) {
NVTX_RANGE(K2_FUNC);
// row_splits= [
Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems);
Array1<int32_t> row_ids(c, num_elems, 0);
return RaggedShape2(&row_splits, &row_ids, num_elems);
}
RaggedShape RegularRaggedShape(ContextPtr &c, int32_t dim0, int32_t dim1) {
NVTX_RANGE(K2_FUNC);
Array1<int32_t> row_splits = Range<int32_t>(c, dim0 + 1, 0, dim1);
int32_t *row_splits_data = row_splits.Data();
Array1<int32_t> row_ids(c, dim0 * dim1);
int32_t *row_ids_data = row_ids.Data();
K2_EVAL2(
c, dim0, dim1, lambda_set_row_ids,
(int32_t i, int32_t j)->void { row_ids_data[i * dim1 + j] = i; });
return RaggedShape2(&row_splits, &row_ids, dim0 * dim1);
}
Ragged<int32_t> GetCountsPartitioned(Ragged<int32_t> &src,
RaggedShape &ans_ragged_shape) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_EQ(ans_ragged_shape.NumAxes(), 2);
K2_CHECK(IsCompatible(src, ans_ragged_shape));
K2_CHECK_EQ(src.Dim0(), ans_ragged_shape.Dim0());
const Array1<int32_t> &values = src.values;
const Array1<int32_t> &row_splits = ans_ragged_shape.RowSplits(1);
int32_t n = ans_ragged_shape.NumElements();
Array1<int32_t> counts = GetCounts(values, n);
return Ragged<int32_t>(ans_ragged_shape, counts);
}
static Array1<int32_t> GetTransposeReorderingCpu(Ragged<int32_t> &src,
int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
std::vector<std::vector<int32_t>> column_indexes(num_cols); // [column][row]
const int32_t *values_data = src.values.Data();
int32_t n = src.values.Dim();
for (int32_t i = 0; i != n; ++i) {
int32_t bucket = values_data[i];
column_indexes[bucket].push_back(i);
}
Array1<int32_t> ans(src.Context(), n);
int32_t *ans_data = ans.Data();
for (int32_t i = 0; i != num_cols; ++i) {
std::copy(column_indexes[i].begin(), column_indexes[i].end(), ans_data);
ans_data += column_indexes[i].size();
}
return ans;
}
static Array1<int32_t> GetTransposeReorderingThreeAxesCuda(Ragged<int32_t> &src,
int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 3);
ContextPtr &context = src.Context();
K2_CHECK_EQ(context->GetDeviceType(), kCuda);
const Array1<int32_t> &row_splits1 = src.RowSplits(1);
const int32_t *row_ids2_data = src.RowIds(2).Data();
const int32_t *value_data = src.values.Data();
Array1<int32_t> segments = src.RowSplits(2)[row_splits1];
auto lambda_comp = [=] __device__(int32_t a_idx012,
int32_t b_idx012) -> bool {
int32_t a_col_index = value_data[a_idx012];
int32_t b_col_index = value_data[b_idx012];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// at this point, a_idx012 and b_idx012 belong to the same column;
// then we sort by its row indexes
int32_t a_idx01 = row_ids2_data[a_idx012];
int32_t b_idx01 = row_ids2_data[b_idx012];
if (a_idx01 < b_idx01) return true;
if (a_idx01 > b_idx01) return false;
// at this point, a_idx012 and b_idx012 are duplicate elements
return false; // either true or false is fine
};
std::unique_ptr<mgpu::context_t> mgpu_context =
GetModernGpuAllocator(context);
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
K2_CUDA_SAFE_CALL(mgpu::segmented_sort(ans.Data(), // keys
ans.Dim(), // count
segments.Data(), // segments
segments.Dim() - 1, // num_segments
lambda_comp, *mgpu_context));
return ans;
}
Array1<int32_t> GetTransposeReordering(Ragged<int32_t> &src, int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
ContextPtr &context = src.Context();
if (src.NumAxes() < 2) {
// src is empty
return Array1<int32_t>(context, 0);
}
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) return GetTransposeReorderingCpu(src, num_cols);
K2_CHECK_EQ(device_type, kCuda);
if (src.NumAxes() == 3)
return GetTransposeReorderingThreeAxesCuda(src, num_cols);
const int32_t *row_splits1_data = src.RowSplits(src.NumAxes() - 1).Data();
const int32_t *row_ids1_data = src.RowIds(src.NumAxes() - 1).Data();
const int32_t *value_data = src.values.Data();
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
auto lambda_comp = [=] __device__(int32_t a_idx01, int32_t b_idx01) -> bool {
int32_t a_idx0 = row_ids1_data[a_idx01];
int32_t b_idx0 = row_ids1_data[b_idx01];
int32_t a_col_index = value_data[a_idx01];
int32_t b_col_index = value_data[b_idx01];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// now we have a_col_index == b_col_index
if (a_idx0 < b_idx0) return true; // sort by row indexes
if (a_idx0 > b_idx0) return false;
// now we have a_idx0 == b_idx0 && a_col_index == b_col_index
// this entry is duplicated in the sparse matrix.
return false; // we can return either true or false here.
};
std::unique_ptr<mgpu::context_t> mgpu_context =
GetModernGpuAllocator(context);
K2_CUDA_SAFE_CALL(mgpu::mergesort(ans.Data(), n, lambda_comp, *mgpu_context));
return ans;
}
RaggedShape ChangeSublistSize(RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeDim axes).
std::vector<RaggedShapeDim> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Axes()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1),
src_num_elems = src.TotSize(last_axis),
num_elems = src_num_elems + size_delta * num_rows;
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
ans_axes.back().row_ids = Array1<int32_t>(c, num_elems);
ans_axes.back().cached_tot_size = num_elems;
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(),
*src_row_ids_data = src.RowIds(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data(),
*row_ids_data = ans_axes.back().row_ids.Data();
{
ParallelRunner pr(c);
{
With w(pr.NewStream());
K2_EVAL(
c, num_rows + 1, lambda_set_row_splits, (int32_t idx0)->void {
row_splits_data[idx0] =
src_row_splits_data[idx0] + size_delta * idx0;
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, src_num_elems, lambda_set_row_ids1, (int32_t src_idx01)->void {
int32_t src_idx0 = src_row_ids_data[src_idx01],
src_idx0x = src_row_splits_data[src_idx0],
src_idx1 = src_idx01 - src_idx0x,
new_idx0x = row_splits_data[src_idx0],
new_idx0x_next = row_splits_data[src_idx0 + 1],
new_idx01 = new_idx0x + src_idx1;
// it's only necessary to guard the next statement with in 'if'
// because size_delta might be negative.
if (new_idx01 < new_idx0x_next) row_ids_data[new_idx01] = src_idx0;
});
}
if (size_delta > 0) {
// This sets the row-ids that are not set by lambda_set_row_ids1.
With w(pr.NewStream());
K2_EVAL(
c, num_rows * size_delta, lambda_set_row_ids2, (int32_t i)->void {
int32_t idx0 = i / size_delta, n = i % size_delta,
next_idx0 = idx0 + 1;
// The following formula is the same as the one in
// lambda_set_row_splits; we want to compute the new value of
// row_splits_data[next_idx0] without waiting for that kernel to
// terminate.
int32_t next_idx0x =
src_row_splits_data[next_idx0] + size_delta * next_idx0;
row_ids_data[next_idx0x - 1 - n] = idx0;
});
}
// make the ParallelRunner go out of scope (should do this before any
// validation code that gets invoked by the constructor of RaggedShape
// below).
}
return RaggedShape(ans_axes);
}
// TODO(dan): this could definitely be made more efficient.
RaggedShape ChangeSublistSizePinned(RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeDim axes).
std::vector<RaggedShapeDim> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Axes()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1);
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data();
K2_EVAL(c, num_rows, lambda_set_row_sizes, (int32_t idx0) -> void {
int32_t orig_size = src_row_splits_data[idx0 + 1] -
src_row_splits_data[idx0],
size;
if (orig_size == 0 || orig_size + size_delta <= 0)
size = 0;
else
size = orig_size + size_delta;
row_splits_data[idx0] = size;
});
ExclusiveSum(ans_axes.back().row_splits, &ans_axes.back().row_splits);
ans_axes.back().row_ids = Array1<int32_t>(c, ans_axes.back().row_splits.Back());
RowSplitsToRowIds(ans_axes.back().row_splits,
&ans_axes.back().row_ids);
ans_axes.back().cached_tot_size = ans_axes.back().row_ids.Dim();
return RaggedShape(ans_axes);
}
RaggedShape Prefix(RaggedShape &src, int32_t n) {
NVTX_RANGE(K2_FUNC);
int32_t dim0 = src.Dim0();
K2_CHECK(n >= 0 && n <= dim0);
src.Populate();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
std::vector<RaggedShapeDim> axes_out(axes_in.size());
int32_t row_end = n;
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits = axes_in[axis].row_splits.Arange(0, row_end + 1);
// notice here we may do a memory copy from GPU to CPU.
row_end = axes_in[axis].row_splits[row_end];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
return RaggedShape(axes_out);
}
std::vector<RaggedShape> GetPrefixes(RaggedShape &src,
const std::vector<int32_t> &sizes) {
NVTX_RANGE(K2_FUNC);
src.Populate();
int32_t dim0 = src.Dim0();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
ContextPtr &c = src.Context();
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
// get those row_end elements at each axis.
int32_t ans_size = static_cast<int32_t>(sizes.size());
Array1<int32_t> row_ends(c, num_axes * ans_size);
Array1<int32_t> sizes_array(GetCpuContext(), sizes);
Array1<int32_t> indexes = row_ends.Arange(0, ans_size);
indexes.CopyFrom(sizes_array);
for (int32_t axis = 1; axis < num_axes; ++axis) {
Array1<int32_t> curr_axis_row_ends =
row_ends.Arange(axis * ans_size, (axis + 1) * ans_size);
axes_in[axis - 1].row_splits.Index(indexes, &curr_axis_row_ends);
indexes = curr_axis_row_ends;
}
row_ends = row_ends.To(GetCpuContext());
std::vector<RaggedShape> ans(ans_size);
for (int32_t i = 0; i != ans_size; ++i) {
std::vector<RaggedShapeDim> axes_out(axes_in.size());
int32_t row_end = row_ends[i];
K2_CHECK(row_end >= 0 && row_end <= dim0);
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits =
axes_in[axis].row_splits.Arange(0, row_end + 1);
row_end = row_ends[i + (axis + 1) * ans_size];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
ans[i] = RaggedShape(axes_out, false);
}
return ans;
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &renumbering) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(renumbering.NumOldElems(), src.NumElements());
// Make sure final row-ids are populated.
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeDim> axes = src.Axes();
axes.back().row_ids = axes.back().row_ids[renumbering.New2Old()];
axes.back().row_splits = renumbering.Old2New()[axes.back().row_splits];
axes.back().cached_tot_size = axes.back().row_ids.Dim();
return RaggedShape(axes);
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &r_before_last,
Renumbering &r_last) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(r_before_last.NumOldElems(), src.TotSize(src.NumAxes() - 2));
K2_CHECK_EQ(r_last.NumOldElems(), src.NumElements());
// Make sure final and before-final row-ids are populated.
src.RowIds(src.NumAxes() - 2);
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeDim> axes = src.Axes();
// Suppose this shape has 3 axes (0,1,2). Its NumAxes()==3;
// axes.size()==2.
// r_before_last deals with the numbering on axis 1.
// r_last deals with the numbering on axis 2.
RaggedShapeDim &before_last = axes[axes.size() - 2],
&last = axes[axes.size() - 1];
int32_t new_tot_size1 = r_before_last.NumNewElems(),
new_tot_size2 = r_last.NumNewElems();
ContextPtr c = src.Context();
Array1<int32_t> before_last_row_ids(c, new_tot_size1),
last_row_splits(c, new_tot_size1 + 1), last_row_ids(c, new_tot_size2);
// The variable names below use this 3-axis assumption but the
// code will work for greater number of axes.
int32_t *new_row_ids1_data = before_last_row_ids.Data(),
*new_row_splits2_data = last_row_splits.Data(),
*new_row_ids2_data = last_row_ids.Data();
const int32_t *old_row_ids1_data = before_last.row_ids.Data(),
*old_row_splits2_data = last.row_splits.Data(),
*old_row_ids2_data = last.row_ids.Data();
const int32_t *idx01_new2old_data = r_before_last.New2Old().Data(),
*idx01_old2new_data = r_before_last.Old2New().Data(),
*idx012_new2old_data = r_last.New2Old().Data(),
*idx012_old2new_data = r_last.Old2New().Data();
ParallelRunner pr(c);
{
With w(pr.NewStream());
// before_last.row_splits maps from idx0 -> idx01 (contains idx01's). Map
// the idx01's; the idx0s stay the same.
before_last.row_splits = r_before_last.Old2New()[before_last.row_splits];
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size1 + 1, lambda_set_row_ids1_and_row_splits2,
(int32_t new_idx01)->void {
// row_ids1 maps from idx01 -> idx0. Select subset of
// idx01's; the idx0 stays the same.
int32_t old_idx01 = idx01_new2old_data[new_idx01];
if (new_idx01 < new_tot_size1)
new_row_ids1_data[new_idx01] = old_row_ids1_data[old_idx01];
// row_splits2 maps from idx01 -> idx012. Map both indexes.
// idx01's; the idx0 stays the same.
new_row_splits2_data[new_idx01] =
idx012_old2new_data[old_row_splits2_data[old_idx01]];
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size2, lambda_set_row_ids2, (int32_t new_idx012)->void {
// row_ids2 maps from idx012 -> idx01. Both must be mapped.
int32_t old_idx012 = idx012_new2old_data[new_idx012];
int32_t old_idx01 = old_row_ids2_data[old_idx012],
new_idx01 = idx01_old2new_data[old_idx01];
new_row_ids2_data[new_idx012] = new_idx01;
});
}
before_last.row_ids = before_last_row_ids;
before_last.cached_tot_size = new_tot_size1;
last.row_splits = last_row_splits;
last.row_ids = last_row_ids;
last.cached_tot_size = new_tot_size2;
return RaggedShape(axes);
}
RaggedShape EmptyRaggedShape(ContextPtr &c, int32_t num_axes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeDim> axes(num_axes - 1);
axes[0].row_splits = Array1<int32_t>(c, 1, 0);
// row_ids will be the empty vector, with context `c`.
axes[0].row_ids = axes[0].row_splits.Range(0, 0);
axes[0].cached_tot_size = 0;
for (int32_t a = 1; a + 1 < num_axes; a++) axes[a] = axes[0];
return RaggedShape(axes);
}
Array1<int32_t> GetDecreasingSizeOrder(RaggedShape &shape) {
ContextPtr c = shape.Context();
Array1<int32_t> sizes = RowSplitsToSizes(shape.RowSplits(1));
Array1<int32_t> index_map;
Sort<int32_t, GreaterThan<int32_t>>(&sizes, &index_map);
return index_map;
}
} // namespace k2
|
bde9fbae7582ed64034d91fc21bfde55a4350d50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Fast Lee Hologram computation using CUDA
Programmed by Shay Ohayon
DiCarlo Lab @ MIT
Revision History
Version 0.1 10/22/2014
*/
#include <stdio.h>
#include "mex.h"
#include <Windows.h>
#include <math.h>
#define MIN(a,b) (a)<(b)?(a):(b)
#define M_PI 3.14159265358979323846
const int DMDwidth = 1024;
const int DMDheight = 768;
const int effectiveDMDwidth = DMDheight;
__global__ void computeCuda(double *inputPhases, bool *binaryPatterns, double *carrierWave, int patternSizeX, int patternSizeY, int numReferencePixels, int leeBlockSize) {
int z = blockDim.x * blockIdx.x + threadIdx.x;
long long output_offset = DMDwidth*DMDheight*z;
long long input_offset = patternSizeX*patternSizeY*z;
double phaseAngle = 0;
for (int x = 0; x < DMDwidth; x++)
{
int sampleX = (x - numReferencePixels) / leeBlockSize;
for (int y = 0; y < DMDheight; y++)
{
phaseAngle = 0.0;
if (y >= numReferencePixels && y < DMDheight - numReferencePixels && x >= numReferencePixels && x < effectiveDMDwidth - numReferencePixels)
{
int sampleY = (y - numReferencePixels) / leeBlockSize;
assert(sampleX >= 0 && sampleY >= 0 && sampleX < patternSizeX && sampleY < patternSizeY);
phaseAngle = inputPhases[input_offset + sampleX*patternSizeY + sampleY];
}
binaryPatterns[output_offset + x*DMDheight + y] = (0.5 * (1 + cos(carrierWave[x*DMDheight + y] - phaseAngle))) > 0.5;
}
}
}
void compute(int z, double *inputPhases, bool *binaryPatterns, double *carrierWave, int patternSizeX, int patternSizeY, int numReferencePixels, int leeBlockSize)
{
long long output_offset = DMDwidth*DMDheight*z;
long long input_offset = patternSizeX*patternSizeY*z;
double phaseAngle = 0;
for (int x = 0; x < DMDwidth; x++)
{
int sampleX = (x - numReferencePixels) / leeBlockSize;
for (int y = 0; y < DMDheight; y++)
{
phaseAngle = 0.0;
if (y >= numReferencePixels && y < DMDheight - numReferencePixels && x >= numReferencePixels && x < effectiveDMDwidth - numReferencePixels)
{
int sampleY = (y - numReferencePixels) / leeBlockSize;
assert(sampleX >= 0 && sampleY >= 0 && sampleX < patternSizeX && sampleY < patternSizeY);
phaseAngle = inputPhases[input_offset + sampleX*patternSizeY + sampleY];
}
binaryPatterns[output_offset + x*DMDheight + y] = (0.5 * (1 + cos(carrierWave[x*DMDheight + y] - phaseAngle))) > 0.5;
}
}
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[]) {
if (nrhs < 3 || nlhs != 1)
{
mexPrintf("Use: OutputBinaryPatterns = FastLeeHologram(inputPhases (NxNxM), numReferencePixels, leeBlockSize, selectedCarrier);");
return;
}
double *inputPhases = (double*) mxGetData(prhs[0]);
int numReferencePixels = *(double*)mxGetData(prhs[1]);
int leeBlockSize = *(double*)mxGetData(prhs[2]);
double selectedCarrier = *(double*) mxGetData(prhs[3]);
const int numDim = mxGetNumberOfDimensions(prhs[0]);
const int *dataSize = mxGetDimensions(prhs[0]);
int numPatterns = 1;
int patternSizeX = dataSize[0];
int patternSizeY = dataSize[1];
if (numDim > 2)
{
numPatterns = dataSize[2];
}
// allocate memory for output
const int outputDimSize[3] = { DMDheight, DMDwidth, numPatterns };
plhs[0] = mxCreateLogicalArray(3, outputDimSize);
bool* binaryPatterns = (bool*)mxGetData(plhs[0]);
// allocate memory for the reference wave
double *carrierWave = new double[DMDheight*DMDwidth];
for (int x = 0; x < DMDwidth; x++)
{
for (int y = 0; y < DMDheight; y++)
{
carrierWave[x*DMDheight +y] = 2.0 * M_PI*(x - y)*selectedCarrier;
}
}
double* d_inputPhases;
long inputSize = sizeof(double) * patternSizeX * patternSizeY * numPatterns;
hipMalloc(&d_inputPhases, inputSize);
hipMemcpy(d_inputPhases, inputPhases, inputSize, hipMemcpyHostToDevice);
int maxThreadsPerBlock = 256;
int numBlocks = numPatterns / maxThreadsPerBlock;
computeCuda << <numBlocks, maxThreadsPerBlock >> >(inputPhases, binaryPatterns, carrierWave, patternSizeX, patternSizeY, numReferencePixels, leeBlockSize);
/*
for (int z = 0; z < numPatterns; z++)
{
compute(z, inputPhases, binaryPatterns, carrierWave, patternSizeX, patternSizeY, numReferencePixels, leeBlockSize);
}
*/
delete carrierWave;
}
| bde9fbae7582ed64034d91fc21bfde55a4350d50.cu | /*
Fast Lee Hologram computation using CUDA
Programmed by Shay Ohayon
DiCarlo Lab @ MIT
Revision History
Version 0.1 10/22/2014
*/
#include <stdio.h>
#include "mex.h"
#include <Windows.h>
#include <math.h>
#define MIN(a,b) (a)<(b)?(a):(b)
#define M_PI 3.14159265358979323846
const int DMDwidth = 1024;
const int DMDheight = 768;
const int effectiveDMDwidth = DMDheight;
__global__ void computeCuda(double *inputPhases, bool *binaryPatterns, double *carrierWave, int patternSizeX, int patternSizeY, int numReferencePixels, int leeBlockSize) {
int z = blockDim.x * blockIdx.x + threadIdx.x;
long long output_offset = DMDwidth*DMDheight*z;
long long input_offset = patternSizeX*patternSizeY*z;
double phaseAngle = 0;
for (int x = 0; x < DMDwidth; x++)
{
int sampleX = (x - numReferencePixels) / leeBlockSize;
for (int y = 0; y < DMDheight; y++)
{
phaseAngle = 0.0;
if (y >= numReferencePixels && y < DMDheight - numReferencePixels && x >= numReferencePixels && x < effectiveDMDwidth - numReferencePixels)
{
int sampleY = (y - numReferencePixels) / leeBlockSize;
assert(sampleX >= 0 && sampleY >= 0 && sampleX < patternSizeX && sampleY < patternSizeY);
phaseAngle = inputPhases[input_offset + sampleX*patternSizeY + sampleY];
}
binaryPatterns[output_offset + x*DMDheight + y] = (0.5 * (1 + cos(carrierWave[x*DMDheight + y] - phaseAngle))) > 0.5;
}
}
}
void compute(int z, double *inputPhases, bool *binaryPatterns, double *carrierWave, int patternSizeX, int patternSizeY, int numReferencePixels, int leeBlockSize)
{
long long output_offset = DMDwidth*DMDheight*z;
long long input_offset = patternSizeX*patternSizeY*z;
double phaseAngle = 0;
for (int x = 0; x < DMDwidth; x++)
{
int sampleX = (x - numReferencePixels) / leeBlockSize;
for (int y = 0; y < DMDheight; y++)
{
phaseAngle = 0.0;
if (y >= numReferencePixels && y < DMDheight - numReferencePixels && x >= numReferencePixels && x < effectiveDMDwidth - numReferencePixels)
{
int sampleY = (y - numReferencePixels) / leeBlockSize;
assert(sampleX >= 0 && sampleY >= 0 && sampleX < patternSizeX && sampleY < patternSizeY);
phaseAngle = inputPhases[input_offset + sampleX*patternSizeY + sampleY];
}
binaryPatterns[output_offset + x*DMDheight + y] = (0.5 * (1 + cos(carrierWave[x*DMDheight + y] - phaseAngle))) > 0.5;
}
}
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[]) {
if (nrhs < 3 || nlhs != 1)
{
mexPrintf("Use: OutputBinaryPatterns = FastLeeHologram(inputPhases (NxNxM), numReferencePixels, leeBlockSize, selectedCarrier);");
return;
}
double *inputPhases = (double*) mxGetData(prhs[0]);
int numReferencePixels = *(double*)mxGetData(prhs[1]);
int leeBlockSize = *(double*)mxGetData(prhs[2]);
double selectedCarrier = *(double*) mxGetData(prhs[3]);
const int numDim = mxGetNumberOfDimensions(prhs[0]);
const int *dataSize = mxGetDimensions(prhs[0]);
int numPatterns = 1;
int patternSizeX = dataSize[0];
int patternSizeY = dataSize[1];
if (numDim > 2)
{
numPatterns = dataSize[2];
}
// allocate memory for output
const int outputDimSize[3] = { DMDheight, DMDwidth, numPatterns };
plhs[0] = mxCreateLogicalArray(3, outputDimSize);
bool* binaryPatterns = (bool*)mxGetData(plhs[0]);
// allocate memory for the reference wave
double *carrierWave = new double[DMDheight*DMDwidth];
for (int x = 0; x < DMDwidth; x++)
{
for (int y = 0; y < DMDheight; y++)
{
carrierWave[x*DMDheight +y] = 2.0 * M_PI*(x - y)*selectedCarrier;
}
}
double* d_inputPhases;
long inputSize = sizeof(double) * patternSizeX * patternSizeY * numPatterns;
cudaMalloc(&d_inputPhases, inputSize);
cudaMemcpy(d_inputPhases, inputPhases, inputSize, cudaMemcpyHostToDevice);
int maxThreadsPerBlock = 256;
int numBlocks = numPatterns / maxThreadsPerBlock;
computeCuda << <numBlocks, maxThreadsPerBlock >> >(inputPhases, binaryPatterns, carrierWave, patternSizeX, patternSizeY, numReferencePixels, leeBlockSize);
/*
for (int z = 0; z < numPatterns; z++)
{
compute(z, inputPhases, binaryPatterns, carrierWave, patternSizeX, patternSizeY, numReferencePixels, leeBlockSize);
}
*/
delete carrierWave;
}
|
419e97d56ce2425988a83d115e44c9de880c5630.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[192,48] --blockDim=[16,8]
// in host invocation
// assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS);
// assert(imageW % COLUMNS_BLOCKDIM_X == 0);
// assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0);
#define KERNEL_RADIUS 8
#define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1)
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 8
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 1
__constant__ float c_Kernel[KERNEL_LENGTH];
__global__ void convolutionColumnsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__requires(pitch == 3072);
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Compute and store results
// __syncthreads();
// #pragma unroll
for (int i = COLUMNS_HALO_STEPS;
#define base (baseY * pitch + baseX)
__invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)%(COLUMNS_BLOCKDIM_Y * pitch) == 0)),
__invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)/(COLUMNS_BLOCKDIM_Y * pitch) >= COLUMNS_HALO_STEPS)),
__invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)/(COLUMNS_BLOCKDIM_Y * pitch) < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS)),
i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
float sum = 0;
// #pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
| 419e97d56ce2425988a83d115e44c9de880c5630.cu | //pass
//--gridDim=[192,48] --blockDim=[16,8]
// in host invocation
// assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS);
// assert(imageW % COLUMNS_BLOCKDIM_X == 0);
// assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0);
#define KERNEL_RADIUS 8
#define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1)
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 8
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 1
__constant__ float c_Kernel[KERNEL_LENGTH];
__global__ void convolutionColumnsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__requires(pitch == 3072);
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Compute and store results
// __syncthreads();
// #pragma unroll
for (int i = COLUMNS_HALO_STEPS;
#define base (baseY * pitch + baseX)
__invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)%(COLUMNS_BLOCKDIM_Y * pitch) == 0)),
__invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)/(COLUMNS_BLOCKDIM_Y * pitch) >= COLUMNS_HALO_STEPS)),
__invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)/(COLUMNS_BLOCKDIM_Y * pitch) < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS)),
i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
float sum = 0;
// #pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
|
f195a2b93c0fbb8c48824e54682a4784eec09032.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgeaxpy.cu normal z -> s, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
#include "common_magmasparse.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
sgeaxpy_kernel(
int num_rows,
int num_cols,
float alpha,
float * dx,
float beta,
float * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is a dense matrix (vector block) stored in
magma_s_matrix format.
Arguments
---------
@param[in]
alpha float
scalar multiplier.
@param[in]
X magma_s_matrix
input/output matrix Y.
@param[in]
beta float
scalar multiplier.
@param[in,out]
Y magma_s_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C"
magma_int_t
magma_sgeaxpy(
float alpha,
magma_s_matrix X,
float beta,
magma_s_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sgeaxpy_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, alpha, X.dval, beta, Y->dval );
return MAGMA_SUCCESS;
}
| f195a2b93c0fbb8c48824e54682a4784eec09032.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgeaxpy.cu normal z -> s, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
#include "common_magmasparse.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
sgeaxpy_kernel(
int num_rows,
int num_cols,
float alpha,
float * dx,
float beta,
float * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is a dense matrix (vector block) stored in
magma_s_matrix format.
Arguments
---------
@param[in]
alpha float
scalar multiplier.
@param[in]
X magma_s_matrix
input/output matrix Y.
@param[in]
beta float
scalar multiplier.
@param[in,out]
Y magma_s_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C"
magma_int_t
magma_sgeaxpy(
float alpha,
magma_s_matrix X,
float beta,
magma_s_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
sgeaxpy_kernel<<< grid, threads, 0, queue >>>
( m, n, alpha, X.dval, beta, Y->dval );
return MAGMA_SUCCESS;
}
|
5b4268b73e104908d8ff01c9190f36edf7fc0018.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
// for some reason, const float kBNLL_THRESHOLD = 50.; does not work with nvcc on windows
#define kBNLL_THRESHOLD 50.0f
template <typename Dtype>
__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void BNLLBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer);
} // namespace caffe
| 5b4268b73e104908d8ff01c9190f36edf7fc0018.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
// for some reason, const float kBNLL_THRESHOLD = 50.; does not work with nvcc on windows
#define kBNLL_THRESHOLD 50.0f
template <typename Dtype>
__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void BNLLBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer);
} // namespace caffe
|
ae9f7959b69a78f7161a9fd4be5d8ca215cbc4e2.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the spopecific language governing permissions and
limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(cudnn_deterministic);
DECLARE_uint64(conv_workspace_size_limit);
DECLARE_bool(cudnn_exhaustive_search);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
static inline bool IsVoltaOrLater(const platform::CUDADeviceContext& dev_ctx) {
return dev_ctx.GetComputeCapability() >= 70;
}
template <typename T>
class CUDNNConvOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
const Tensor* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
// Tensor Core introduced from Volta GPUs supports more faster conv op
// with FP16 in NHWC data format.
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
// We will only do data format conversion from NHWC to NCHW.
// cudnn will convert NCHW to NHWC automatically on Tensor Core.
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
VLOG(3) << "Compute ConvOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// ------------ transformed tensor -----------
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
Tensor transformed_filter_channel(filter->type());
T* output_data = nullptr;
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output,
&transformed_output);
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output.ShareDataWith(*output);
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
} else {
transformed_filter_channel.ShareDataWith(*filter);
}
output_data = transformed_output.data<T>();
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_input,
&transformed_filter_channel,
&transformed_output,
strides,
padding_common,
dilations,
dtype};
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_format = GetCudnnTensorFormat(layout);
args.handle = handle;
args.cdesc.set(dtype, padding_common, strides, dilations);
#if CUDNN_VERSION_MIN(7, 0, 1)
// cudnn 7 can support groups, no need to do it manually
// FIXME(typhoonzero): find a better way to disable groups
// rather than setting it to 1.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnSetConvolutionGroupCount(args.cdesc.desc(),
groups));
groups = 1;
#endif
args.idesc.set(transformed_input, layout_format);
args.wdesc.set(transformed_filter_channel, layout_format, groups);
args.odesc.set(transformed_output, layout_format);
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNHWC, &o_n, &o_c, &o_d,
&o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size = 0; // final workspace to allocate.
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo{};
using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
algo = search::Find<T>(args, exhaustive_search, false, ctx);
workspace_size = search::GetWorkspaceSize(args, algo);
#if CUDNN_VERSION_MIN(7, 0, 1)
// when groups > 1, SearchAlgorithm find algo is CUDNN_CONVOLUTION_\
// FWD_ALGO_WINOGRAD_NONFUSED, but this kind of algorithm is unstable
// in forward computation, so change the algorithm to CUDNN_CONVOLUTION_\
// FWD_ALGO_IMPLICIT_GEMM manually.
if (ctx.Attr<int>("groups") > 1) {
algo = static_cast<cudnnConvolutionFwdAlgo_t>(0);
}
#endif
// ------------------- cudnn conv forward ---------------------
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f;
// VLOG(4) << "Conv: use_addto = " << ctx.Attr<bool>("use_addto");
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args.idesc.desc(),
input_data + i * group_offset_in, args.wdesc.desc(),
filter_data + i * group_offset_filter, args.cdesc.desc(),
algo, workspace_ptr, workspace_size, &beta,
args.odesc.desc(), output_data + i * group_offset_out));
},
workspace_size);
}
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_output, output);
}
}
};
template <typename T>
class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
}
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
}
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
VLOG(3) << "Compute ConvGradOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// transform Tensor
Tensor transformed_input_channel(input->type());
Tensor transformed_output_grad_channel(output_grad->type());
Tensor transformed_input_grad_channel(input->type());
Tensor transformed_filter_channel(filter->type());
Tensor transformed_filter_grad_channel(filter->type());
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input, output_grad, input_grad and tensor from "
"NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
if (input_grad) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
// NOTE(zhiqiu): If inplace_addto strategy is enabled, we need to copy
// the data of input_grad to transformed_input_grad_channel.
if (ctx.Attr<bool>("use_addto")) {
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
}
}
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output_grad_channel.ShareDataWith(*output_grad);
if (input_grad) {
transformed_input_grad_channel.ShareDataWith(*input_grad);
}
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter and filter_grad tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
if (filter_grad) {
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter_grad, &transformed_filter_grad_channel);
}
} else {
transformed_filter_channel.ShareDataWith(*filter);
if (filter_grad) {
transformed_filter_grad_channel.ShareDataWith(*filter_grad);
}
}
// update paddings
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
// cuDNN only supports padding the same amount on every dimension.
// So we create a new padded input tensor.
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input(input->type());
Tensor transformed_input_grad(input->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
transformed_input_grad.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (input_grad) {
transformed_input_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (input_grad) {
transformed_input_grad.ShareDataWith(transformed_input_grad_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* output_grad_data = transformed_output_grad_channel.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
T* filter_grad_data = nullptr;
T* input_grad_data = nullptr;
T* transformed_input_grad_data = nullptr;
ConvArgs args1{&transformed_input_grad,
&transformed_filter_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args2{&transformed_input,
&transformed_filter_grad_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations,
dtype};
auto handle = dev_ctx.cudnn_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_tensor = GetCudnnTensorFormat(layout);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNHWC, &o_n,
&o_c, &o_d, &o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n,
&o_c, &o_d, &o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn backward algorithm ---------------------
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
size_t workspace_size = 0;
int iwo_groups = groups;
int c_groups = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (input_grad) {
// ------------------- cudnn descriptors ---------------------
input_grad_data = input_grad->data<T>();
transformed_input_grad_data = transformed_input_grad.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_input_grad, layout_tensor);
args1.wdesc.set(transformed_filter_channel, layout_tensor, iwo_groups);
args1.odesc.set(transformed_output_grad_channel, layout_tensor);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search1::Find<T>(args1, exhaustive_search, deterministic, ctx);
workspace_size =
::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
}
if (filter_grad) {
// ------------------- cudnn descriptors ---------------------
filter_grad_data = transformed_filter_grad_channel.data<T>();
args2.handle = handle;
args2.idesc.set(transformed_input, layout_tensor);
args2.wdesc.set(transformed_filter_grad_channel, layout_tensor,
iwo_groups);
args2.odesc.set(transformed_output_grad_channel, layout_tensor);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search2::Find<T>(args2, exhaustive_search, deterministic, ctx);
workspace_size = ::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
}
// ------------------- cudnn conv backward data ---------------------
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f;
VLOG(4) << "Conv_grad: use_addto = " << ctx.Attr<bool>("use_addto");
if (input_grad) {
// When beta is 0, it is unnecessary to reset input_grad.
// When beta is 1, the output cannot be reset since addt strategy used.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args1.wdesc.desc(),
filter_data + i * group_offset_filter, args1.odesc.desc(),
output_grad_data + i * group_offset_out,
args1.cdesc.desc(), data_algo, cudnn_workspace_ptr,
workspace_size, &beta, args1.idesc.desc(),
transformed_input_grad_data + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
std::vector<int> starts(transformed_input_channel.dims().size(), 0);
std::vector<int> axes(transformed_input_channel.dims().size(), 0);
for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
transformed_input_grad_channel.mutable_data(ctx.GetPlace());
if (transformed_input_channel.dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
}
}
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_input_grad_channel, input_grad);
}
}
// filter_grad do not use inplace addto.
ScalingParamType<T> beta_filter = 0.0f;
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
input_data + i * group_offset_in, args2.odesc.desc(),
output_grad_data + i * group_offset_out,
args2.cdesc.desc(), filter_algo, cudnn_workspace_ptr,
workspace_size, &beta_filter, args2.wdesc.desc(),
filter_grad_data + i * group_offset_filter));
},
workspace_size);
}
if (compute_format == DataLayout::kNHWC) {
TransToChannelFirst<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_filter_grad_channel, filter_grad);
}
}
}
};
/*
* Inputs: I, W, dO, ddI, ddW
* Outputs: ddO, dW, dI
* ddo = conv(ddI, W) + conv(I, ddW)
* dW = conv_bp_filter(ddI, dO)
* dI = conv_bp_data(ddW, dO)
*/
template <typename T>
class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto X = ctx.Input<Tensor>("Input");
auto W = ctx.Input<Tensor>("Filter");
auto dO = ctx.Input<Tensor>("DOutput");
auto ddX = ctx.Input<Tensor>("DDInput");
auto ddW = ctx.Input<Tensor>("DDFilter");
auto ddO = ctx.Output<Tensor>("DDOutput");
auto dW = ctx.Output<Tensor>("DFilter");
auto dX = ctx.Output<Tensor>("DInput");
if (ddO) {
ddO->mutable_data<T>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, ddO, static_cast<T>(0));
}
if (dW) {
dW->mutable_data<T>(ctx.GetPlace());
}
if (dX) {
dX->mutable_data<T>(ctx.GetPlace());
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
Tensor transformed_X_channel(X->type());
Tensor transformed_dO_channel(dO->type());
Tensor transformed_ddX_channel(X->type());
Tensor transformed_ddO_channel(dO->type());
Tensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dX, &transformed_dX_channel);
transformed_dX_channel.mutable_data<T>(ctx.GetPlace());
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_X(X->type());
Tensor transformed_ddX(X->type());
Tensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
transformed_X =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (ddX) {
transformed_ddX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
if (dX) {
transformed_dX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = platform::CudnnDataType<T>::type;
auto handle = dev_ctx.cudnn_handle();
ConvArgs args1{&transformed_ddX,
W,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args2{
&transformed_X, ddW, &transformed_ddO_channel, strides, padding_common,
dilations, dtype};
ConvArgs args3{&transformed_ddX,
dW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args4{
&transformed_dX, ddW, &transformed_dO_channel, strides, padding_common,
dilations, dtype};
cudnnConvolutionFwdAlgo_t fwd_algo1 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionFwdAlgo_t fwd_algo2 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
auto layout = GetCudnnTensorFormat(DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1);
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, ctx);
workspace_size = ::max(workspace_size,
search2::GetWorkspaceSize(args2, fwd_algo2));
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search3::Find<T>(args3, exhaustive_search, deterministic, ctx);
workspace_size = ::max(workspace_size,
search3::GetWorkspaceSize(args3, filter_algo));
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search4::Find<T>(args4, exhaustive_search, deterministic, ctx);
workspace_size =
::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo));
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f :
// 0.0f;
// VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto");
auto wkspace_handle = dev_ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
ddx + i * group_offset_in, args1.wdesc.desc(),
w + i * group_offset_filter, args1.cdesc.desc(),
fwd_algo1, workspace_ptr, workspace_size, &beta,
args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (ddW) {
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args2.idesc.desc(),
x + i * group_offset_in, args2.wdesc.desc(),
ddw + i * group_offset_filter, args2.cdesc.desc(),
fwd_algo2, workspace_ptr, workspace_size, &alpha,
args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args3.idesc.desc(),
ddx + i * group_offset_in, args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(), filter_algo, workspace_ptr,
workspace_size, &beta, args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
}
if (dX && ddW) {
ddw = ddW->data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args4.wdesc.desc(),
ddw + i * group_offset_filter, args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(), data_algo, workspace_ptr,
workspace_size, &beta, args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_dX_channel, dX);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(
depthwise_conv2d_grad_grad,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
| ae9f7959b69a78f7161a9fd4be5d8ca215cbc4e2.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the spopecific language governing permissions and
limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(cudnn_deterministic);
DECLARE_uint64(conv_workspace_size_limit);
DECLARE_bool(cudnn_exhaustive_search);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
static inline bool IsVoltaOrLater(const platform::CUDADeviceContext& dev_ctx) {
return dev_ctx.GetComputeCapability() >= 70;
}
template <typename T>
class CUDNNConvOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
const Tensor* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
// Tensor Core introduced from Volta GPUs supports more faster conv op
// with FP16 in NHWC data format.
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
// We will only do data format conversion from NHWC to NCHW.
// cudnn will convert NCHW to NHWC automatically on Tensor Core.
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
VLOG(3) << "Compute ConvOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// ------------ transformed tensor -----------
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
Tensor transformed_filter_channel(filter->type());
T* output_data = nullptr;
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output,
&transformed_output);
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output.ShareDataWith(*output);
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
} else {
transformed_filter_channel.ShareDataWith(*filter);
}
output_data = transformed_output.data<T>();
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_input,
&transformed_filter_channel,
&transformed_output,
strides,
padding_common,
dilations,
dtype};
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_format = GetCudnnTensorFormat(layout);
args.handle = handle;
args.cdesc.set(dtype, padding_common, strides, dilations);
#if CUDNN_VERSION_MIN(7, 0, 1)
// cudnn 7 can support groups, no need to do it manually
// FIXME(typhoonzero): find a better way to disable groups
// rather than setting it to 1.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnSetConvolutionGroupCount(args.cdesc.desc(),
groups));
groups = 1;
#endif
args.idesc.set(transformed_input, layout_format);
args.wdesc.set(transformed_filter_channel, layout_format, groups);
args.odesc.set(transformed_output, layout_format);
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNHWC, &o_n, &o_c, &o_d,
&o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size = 0; // final workspace to allocate.
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo{};
using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
algo = search::Find<T>(args, exhaustive_search, false, ctx);
workspace_size = search::GetWorkspaceSize(args, algo);
#if CUDNN_VERSION_MIN(7, 0, 1)
// when groups > 1, SearchAlgorithm find algo is CUDNN_CONVOLUTION_\
// FWD_ALGO_WINOGRAD_NONFUSED, but this kind of algorithm is unstable
// in forward computation, so change the algorithm to CUDNN_CONVOLUTION_\
// FWD_ALGO_IMPLICIT_GEMM manually.
if (ctx.Attr<int>("groups") > 1) {
algo = static_cast<cudnnConvolutionFwdAlgo_t>(0);
}
#endif
// ------------------- cudnn conv forward ---------------------
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f;
// VLOG(4) << "Conv: use_addto = " << ctx.Attr<bool>("use_addto");
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args.idesc.desc(),
input_data + i * group_offset_in, args.wdesc.desc(),
filter_data + i * group_offset_filter, args.cdesc.desc(),
algo, workspace_ptr, workspace_size, &beta,
args.odesc.desc(), output_data + i * group_offset_out));
},
workspace_size);
}
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_output, output);
}
}
};
template <typename T>
class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
}
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
}
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
VLOG(3) << "Compute ConvGradOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// transform Tensor
Tensor transformed_input_channel(input->type());
Tensor transformed_output_grad_channel(output_grad->type());
Tensor transformed_input_grad_channel(input->type());
Tensor transformed_filter_channel(filter->type());
Tensor transformed_filter_grad_channel(filter->type());
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input, output_grad, input_grad and tensor from "
"NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
if (input_grad) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
// NOTE(zhiqiu): If inplace_addto strategy is enabled, we need to copy
// the data of input_grad to transformed_input_grad_channel.
if (ctx.Attr<bool>("use_addto")) {
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
}
}
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output_grad_channel.ShareDataWith(*output_grad);
if (input_grad) {
transformed_input_grad_channel.ShareDataWith(*input_grad);
}
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter and filter_grad tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
if (filter_grad) {
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter_grad, &transformed_filter_grad_channel);
}
} else {
transformed_filter_channel.ShareDataWith(*filter);
if (filter_grad) {
transformed_filter_grad_channel.ShareDataWith(*filter_grad);
}
}
// update paddings
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
// cuDNN only supports padding the same amount on every dimension.
// So we create a new padded input tensor.
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input(input->type());
Tensor transformed_input_grad(input->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
transformed_input_grad.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (input_grad) {
transformed_input_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (input_grad) {
transformed_input_grad.ShareDataWith(transformed_input_grad_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* output_grad_data = transformed_output_grad_channel.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
T* filter_grad_data = nullptr;
T* input_grad_data = nullptr;
T* transformed_input_grad_data = nullptr;
ConvArgs args1{&transformed_input_grad,
&transformed_filter_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args2{&transformed_input,
&transformed_filter_grad_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations,
dtype};
auto handle = dev_ctx.cudnn_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_tensor = GetCudnnTensorFormat(layout);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNHWC, &o_n,
&o_c, &o_d, &o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n,
&o_c, &o_d, &o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn backward algorithm ---------------------
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
size_t workspace_size = 0;
int iwo_groups = groups;
int c_groups = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (input_grad) {
// ------------------- cudnn descriptors ---------------------
input_grad_data = input_grad->data<T>();
transformed_input_grad_data = transformed_input_grad.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_input_grad, layout_tensor);
args1.wdesc.set(transformed_filter_channel, layout_tensor, iwo_groups);
args1.odesc.set(transformed_output_grad_channel, layout_tensor);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search1::Find<T>(args1, exhaustive_search, deterministic, ctx);
workspace_size =
std::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
}
if (filter_grad) {
// ------------------- cudnn descriptors ---------------------
filter_grad_data = transformed_filter_grad_channel.data<T>();
args2.handle = handle;
args2.idesc.set(transformed_input, layout_tensor);
args2.wdesc.set(transformed_filter_grad_channel, layout_tensor,
iwo_groups);
args2.odesc.set(transformed_output_grad_channel, layout_tensor);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search2::Find<T>(args2, exhaustive_search, deterministic, ctx);
workspace_size = std::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
}
// ------------------- cudnn conv backward data ---------------------
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f;
VLOG(4) << "Conv_grad: use_addto = " << ctx.Attr<bool>("use_addto");
if (input_grad) {
// When beta is 0, it is unnecessary to reset input_grad.
// When beta is 1, the output cannot be reset since addt strategy used.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args1.wdesc.desc(),
filter_data + i * group_offset_filter, args1.odesc.desc(),
output_grad_data + i * group_offset_out,
args1.cdesc.desc(), data_algo, cudnn_workspace_ptr,
workspace_size, &beta, args1.idesc.desc(),
transformed_input_grad_data + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
std::vector<int> starts(transformed_input_channel.dims().size(), 0);
std::vector<int> axes(transformed_input_channel.dims().size(), 0);
for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
transformed_input_grad_channel.mutable_data(ctx.GetPlace());
if (transformed_input_channel.dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
}
}
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_input_grad_channel, input_grad);
}
}
// filter_grad do not use inplace addto.
ScalingParamType<T> beta_filter = 0.0f;
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
input_data + i * group_offset_in, args2.odesc.desc(),
output_grad_data + i * group_offset_out,
args2.cdesc.desc(), filter_algo, cudnn_workspace_ptr,
workspace_size, &beta_filter, args2.wdesc.desc(),
filter_grad_data + i * group_offset_filter));
},
workspace_size);
}
if (compute_format == DataLayout::kNHWC) {
TransToChannelFirst<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_filter_grad_channel, filter_grad);
}
}
}
};
/*
* Inputs: I, W, dO, ddI, ddW
* Outputs: ddO, dW, dI
* ddo = conv(ddI, W) + conv(I, ddW)
* dW = conv_bp_filter(ddI, dO)
* dI = conv_bp_data(ddW, dO)
*/
template <typename T>
class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto X = ctx.Input<Tensor>("Input");
auto W = ctx.Input<Tensor>("Filter");
auto dO = ctx.Input<Tensor>("DOutput");
auto ddX = ctx.Input<Tensor>("DDInput");
auto ddW = ctx.Input<Tensor>("DDFilter");
auto ddO = ctx.Output<Tensor>("DDOutput");
auto dW = ctx.Output<Tensor>("DFilter");
auto dX = ctx.Output<Tensor>("DInput");
if (ddO) {
ddO->mutable_data<T>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, ddO, static_cast<T>(0));
}
if (dW) {
dW->mutable_data<T>(ctx.GetPlace());
}
if (dX) {
dX->mutable_data<T>(ctx.GetPlace());
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
Tensor transformed_X_channel(X->type());
Tensor transformed_dO_channel(dO->type());
Tensor transformed_ddX_channel(X->type());
Tensor transformed_ddO_channel(dO->type());
Tensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dX, &transformed_dX_channel);
transformed_dX_channel.mutable_data<T>(ctx.GetPlace());
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_X(X->type());
Tensor transformed_ddX(X->type());
Tensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
transformed_X =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (ddX) {
transformed_ddX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
if (dX) {
transformed_dX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = platform::CudnnDataType<T>::type;
auto handle = dev_ctx.cudnn_handle();
ConvArgs args1{&transformed_ddX,
W,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args2{
&transformed_X, ddW, &transformed_ddO_channel, strides, padding_common,
dilations, dtype};
ConvArgs args3{&transformed_ddX,
dW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args4{
&transformed_dX, ddW, &transformed_dO_channel, strides, padding_common,
dilations, dtype};
cudnnConvolutionFwdAlgo_t fwd_algo1 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionFwdAlgo_t fwd_algo2 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
auto layout = GetCudnnTensorFormat(DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1);
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, ctx);
workspace_size = std::max(workspace_size,
search2::GetWorkspaceSize(args2, fwd_algo2));
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search3::Find<T>(args3, exhaustive_search, deterministic, ctx);
workspace_size = std::max(workspace_size,
search3::GetWorkspaceSize(args3, filter_algo));
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search4::Find<T>(args4, exhaustive_search, deterministic, ctx);
workspace_size =
std::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo));
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f :
// 0.0f;
// VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto");
auto wkspace_handle = dev_ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
ddx + i * group_offset_in, args1.wdesc.desc(),
w + i * group_offset_filter, args1.cdesc.desc(),
fwd_algo1, workspace_ptr, workspace_size, &beta,
args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (ddW) {
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args2.idesc.desc(),
x + i * group_offset_in, args2.wdesc.desc(),
ddw + i * group_offset_filter, args2.cdesc.desc(),
fwd_algo2, workspace_ptr, workspace_size, &alpha,
args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args3.idesc.desc(),
ddx + i * group_offset_in, args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(), filter_algo, workspace_ptr,
workspace_size, &beta, args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
}
if (dX && ddW) {
ddw = ddW->data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args4.wdesc.desc(),
ddw + i * group_offset_filter, args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(), data_algo, workspace_ptr,
workspace_size, &beta, args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_dX_channel, dX);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(
depthwise_conv2d_grad_grad,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
|
e191cbb8769f24294f7ebff5fa49b1f5891e725d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "knn_test_helper.cuh"
namespace ML {
namespace KNN {
namespace opg {
template <>
void generate_partitions(float* data,
float* outputs,
size_t n_rows,
int n_cols,
int n_clusters,
int my_rank,
hipStream_t stream)
{
Random::make_blobs<float, int>(data,
(int*)outputs,
(int)n_rows,
(int)n_cols,
n_clusters,
stream,
true,
nullptr,
nullptr,
1.0,
-10.0,
10.0,
my_rank);
MLCommon::LinAlg::convert_array(outputs, (int*)outputs, n_rows, stream);
}
class KNNRegressTest : public ::testing::TestWithParam<KNNParams> {
public:
bool runTest(const KNNParams& params)
{
KNNTestHelper<float> knn_th;
knn_th.generate_data(params);
/**
* Execute knn_regress()
*/
knn_regress(knn_th.handle,
&(knn_th.out_parts),
&(knn_th.out_i_parts),
&(knn_th.out_d_parts),
knn_th.index_parts,
*(knn_th.idx_desc),
knn_th.query_parts,
*(knn_th.query_desc),
knn_th.y,
false,
false,
params.k,
params.n_outputs,
params.batch_size,
true);
knn_th.display_results();
knn_th.release_ressources(params);
int actual = 1;
int expected = 1;
return raft::CompareApprox<int>(1)(actual, expected);
}
};
const std::vector<KNNParams> inputs = {{5, 1, 8, 50, 3, 2, 2, 12}};
typedef KNNRegressTest KNNReTest;
TEST_P(KNNReTest, Result) { ASSERT_TRUE(runTest(GetParam())); }
INSTANTIATE_TEST_CASE_P(KNNRegressTest, KNNReTest, ::testing::ValuesIn(inputs));
} // namespace opg
} // namespace KNN
} // namespace ML
| e191cbb8769f24294f7ebff5fa49b1f5891e725d.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "knn_test_helper.cuh"
namespace ML {
namespace KNN {
namespace opg {
template <>
void generate_partitions(float* data,
float* outputs,
size_t n_rows,
int n_cols,
int n_clusters,
int my_rank,
cudaStream_t stream)
{
Random::make_blobs<float, int>(data,
(int*)outputs,
(int)n_rows,
(int)n_cols,
n_clusters,
stream,
true,
nullptr,
nullptr,
1.0,
-10.0,
10.0,
my_rank);
MLCommon::LinAlg::convert_array(outputs, (int*)outputs, n_rows, stream);
}
class KNNRegressTest : public ::testing::TestWithParam<KNNParams> {
public:
bool runTest(const KNNParams& params)
{
KNNTestHelper<float> knn_th;
knn_th.generate_data(params);
/**
* Execute knn_regress()
*/
knn_regress(knn_th.handle,
&(knn_th.out_parts),
&(knn_th.out_i_parts),
&(knn_th.out_d_parts),
knn_th.index_parts,
*(knn_th.idx_desc),
knn_th.query_parts,
*(knn_th.query_desc),
knn_th.y,
false,
false,
params.k,
params.n_outputs,
params.batch_size,
true);
knn_th.display_results();
knn_th.release_ressources(params);
int actual = 1;
int expected = 1;
return raft::CompareApprox<int>(1)(actual, expected);
}
};
const std::vector<KNNParams> inputs = {{5, 1, 8, 50, 3, 2, 2, 12}};
typedef KNNRegressTest KNNReTest;
TEST_P(KNNReTest, Result) { ASSERT_TRUE(runTest(GetParam())); }
INSTANTIATE_TEST_CASE_P(KNNRegressTest, KNNReTest, ::testing::ValuesIn(inputs));
} // namespace opg
} // namespace KNN
} // namespace ML
|
0efea4364ad2c3b126d6c198bab18d05e993768f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/PQScanMultiPassPrecomputed.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/PQCodeLoad.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <limits>
namespace faiss { namespace gpu {
// For precomputed codes, this calculates and loads code distances
// into smem
template <typename LookupT, typename LookupVecT>
inline __device__ void
loadPrecomputedTerm(LookupT* smem,
LookupT* term2Start,
LookupT* term3Start,
int numCodes) {
constexpr int kWordSize = sizeof(LookupVecT) / sizeof(LookupT);
// We can only use vector loads if the data is guaranteed to be
// aligned. The codes are innermost, so if it is evenly divisible,
// then any slice will be aligned.
if (numCodes % kWordSize == 0) {
constexpr int kUnroll = 2;
// Load the data by float4 for efficiency, and then handle any remainder
// limitVec is the number of whole vec words we can load, in terms
// of whole blocks performing the load
int limitVec = numCodes / (kUnroll * kWordSize * blockDim.x);
limitVec *= kUnroll * blockDim.x;
LookupVecT* smemV = (LookupVecT*) smem;
LookupVecT* term2StartV = (LookupVecT*) term2Start;
LookupVecT* term3StartV = (LookupVecT*) term3Start;
for (int i = threadIdx.x; i < limitVec; i += kUnroll * blockDim.x) {
LookupVecT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] =
LoadStore<LookupVecT>::load(&term2StartV[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LookupVecT q =
LoadStore<LookupVecT>::load(&term3StartV[i + j * blockDim.x]);
vals[j] = Math<LookupVecT>::add(vals[j], q);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LoadStore<LookupVecT>::store(&smemV[i + j * blockDim.x], vals[j]);
}
}
// This is where we start loading the remainder that does not evenly
// fit into kUnroll x blockDim.x
int remainder = limitVec * kWordSize;
for (int i = remainder + threadIdx.x; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
} else {
// Potential unaligned load
constexpr int kUnroll = 4;
int limit = utils::roundDown(numCodes, kUnroll * blockDim.x);
int i = threadIdx.x;
for (; i < limit; i += kUnroll * blockDim.x) {
LookupT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = term2Start[i + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = Math<LookupT>::add(vals[j], term3Start[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
smem[i + j * blockDim.x] = vals[j];
}
}
for (; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
}
}
template <int NumSubQuantizers, typename LookupT, typename LookupVecT>
__global__ void
pqScanPrecomputedMultiPass(Tensor<float, 2, true> queries,
Tensor<float, 2, true> precompTerm1,
Tensor<LookupT, 3, true> precompTerm2,
Tensor<LookupT, 3, true> precompTerm3,
Tensor<int, 2, true> topQueryToCentroid,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
// precomputed term 2 + 3 storage
// (sub q)(code id)
extern __shared__ char smemTerm23[];
LookupT* term23 = (LookupT*) smemTerm23;
// Each block handles a single query
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
auto codesPerSubQuantizer = precompTerm2.getSize(2);
auto precompTermSize = precompTerm2.getSize(1) * codesPerSubQuantizer;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
unsigned char* codeList = (unsigned char*) listCodes[listId];
int limit = listLengths[listId];
constexpr int kNumCode32 = NumSubQuantizers <= 4 ? 1 :
(NumSubQuantizers / 4);
unsigned int code32[kNumCode32];
unsigned int nextCode32[kNumCode32];
// We double-buffer the code loading, which improves memory utilization
if (threadIdx.x < limit) {
LoadCode32<NumSubQuantizers>::load(code32, codeList, threadIdx.x);
}
// Load precomputed terms 1, 2, 3
float term1 = precompTerm1[queryId][probeId];
loadPrecomputedTerm<LookupT, LookupVecT>(term23,
precompTerm2[listId].data(),
precompTerm3[queryId].data(),
precompTermSize);
// Prevent WAR dependencies
__syncthreads();
// Each thread handles one code element in the list, with a
// block-wide stride
for (int codeIndex = threadIdx.x;
codeIndex < limit;
codeIndex += blockDim.x) {
// Prefetch next codes
if (codeIndex + blockDim.x < limit) {
LoadCode32<NumSubQuantizers>::load(
nextCode32, codeList, codeIndex + blockDim.x);
}
float dist = term1;
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
constexpr int kBytesPerCode32 =
NumSubQuantizers < 4 ? NumSubQuantizers : 4;
if (kBytesPerCode32 == 1) {
auto code = code32[0];
dist = ConvertTo<float>::to(term23[code]);
} else {
#pragma unroll
for (int byte = 0; byte < kBytesPerCode32; ++byte) {
auto code = getByte(code32[word], byte * 8, 8);
auto offset =
codesPerSubQuantizer * (word * kBytesPerCode32 + byte);
dist += ConvertTo<float>::to(term23[offset + code]);
}
}
}
// Write out intermediate distance result
// We do not maintain indices here, in order to reduce global
// memory traffic. Those are recovered in the final selection step.
distanceOut[codeIndex] = dist;
// Rotate buffers
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
code32[word] = nextCode32[word];
}
}
}
void
runMultiPassTile(GpuResources* res,
Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
hipStream_t stream) {
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(res, topQueryToCentroid, listLengths, prefixSumOffsets,
thrustMem, stream);
// Convert all codes to a distance, and write out (distance,
// index) values for all intermediate results
{
auto kThreadsPerBlock = 256;
auto grid = dim3(topQueryToCentroid.getSize(1),
topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
// pq precomputed terms (2 + 3)
auto smem = useFloat16Lookup ? sizeof(half) : sizeof(float);
smem *= numSubQuantizers * numSubQuantizerCodes;
FAISS_ASSERT(smem <= getMaxSharedMemPerBlockCurrentDevice());
#define RUN_PQ_OPT(NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T) \
do { \
auto precompTerm2T = precompTerm2.toTensor<LOOKUP_T>(); \
auto precompTerm3T = precompTerm3.toTensor<LOOKUP_T>(); \
\
hipLaunchKernelGGL(( pqScanPrecomputedMultiPass<NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T>) \
, dim3(grid), dim3(block), smem, stream, \
queries, \
precompTerm1, \
precompTerm2T, \
precompTerm3T, \
topQueryToCentroid, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
#define RUN_PQ(NUM_SUB_Q) \
do { \
if (useFloat16Lookup) { \
RUN_PQ_OPT(NUM_SUB_Q, half, Half8); \
} else { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} \
} while (0)
switch (bytesPerCode) {
case 1:
RUN_PQ(1);
break;
case 2:
RUN_PQ(2);
break;
case 3:
RUN_PQ(3);
break;
case 4:
RUN_PQ(4);
break;
case 8:
RUN_PQ(8);
break;
case 12:
RUN_PQ(12);
break;
case 16:
RUN_PQ(16);
break;
case 20:
RUN_PQ(20);
break;
case 24:
RUN_PQ(24);
break;
case 28:
RUN_PQ(28);
break;
case 32:
RUN_PQ(32);
break;
case 40:
RUN_PQ(40);
break;
case 48:
RUN_PQ(48);
break;
case 56:
RUN_PQ(56);
break;
case 64:
RUN_PQ(64);
break;
case 96:
RUN_PQ(96);
break;
default:
FAISS_ASSERT(false);
break;
}
CUDA_TEST_ERROR();
#undef RUN_PQ
#undef RUN_PQ_OPT
}
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
topQueryToCentroid.getSize(1),
k,
false, // L2 distance chooses smallest
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
topQueryToCentroid,
k,
false, // L2 distance chooses smallest
outDistances,
outIndices,
stream);
CUDA_TEST_ERROR();
}
void runPQScanMultiPassPrecomputed(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = topQueryToCentroid.getSize(1);
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
res, makeTempAlloc(AllocType::Other, stream),
{kThrustMemSize});
DeviceTensor<char, 1, true> thrustMem2(
res, makeTempAlloc(AllocType::Other, stream),
{kThrustMemSize});
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = res->getTempMemoryAvailableCurrentDevice();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = ::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <=
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe + 1});
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe + 1});
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true> allDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true> heapDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true> heapIndices2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto coarseIndicesView =
topQueryToCentroid.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto term1View =
precompTerm1.narrowOutermost(query, numQueriesInTile);
auto term3View =
precompTerm3.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runMultiPassTile(res,
queryView,
term1View,
precompTerm2,
term3View,
coarseIndicesView,
useFloat16Lookup,
bytesPerCode,
numSubQuantizers,
numSubQuantizerCodes,
listCodes,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
| 0efea4364ad2c3b126d6c198bab18d05e993768f.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/PQScanMultiPassPrecomputed.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/PQCodeLoad.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <limits>
namespace faiss { namespace gpu {
// For precomputed codes, this calculates and loads code distances
// into smem
template <typename LookupT, typename LookupVecT>
inline __device__ void
loadPrecomputedTerm(LookupT* smem,
LookupT* term2Start,
LookupT* term3Start,
int numCodes) {
constexpr int kWordSize = sizeof(LookupVecT) / sizeof(LookupT);
// We can only use vector loads if the data is guaranteed to be
// aligned. The codes are innermost, so if it is evenly divisible,
// then any slice will be aligned.
if (numCodes % kWordSize == 0) {
constexpr int kUnroll = 2;
// Load the data by float4 for efficiency, and then handle any remainder
// limitVec is the number of whole vec words we can load, in terms
// of whole blocks performing the load
int limitVec = numCodes / (kUnroll * kWordSize * blockDim.x);
limitVec *= kUnroll * blockDim.x;
LookupVecT* smemV = (LookupVecT*) smem;
LookupVecT* term2StartV = (LookupVecT*) term2Start;
LookupVecT* term3StartV = (LookupVecT*) term3Start;
for (int i = threadIdx.x; i < limitVec; i += kUnroll * blockDim.x) {
LookupVecT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] =
LoadStore<LookupVecT>::load(&term2StartV[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LookupVecT q =
LoadStore<LookupVecT>::load(&term3StartV[i + j * blockDim.x]);
vals[j] = Math<LookupVecT>::add(vals[j], q);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LoadStore<LookupVecT>::store(&smemV[i + j * blockDim.x], vals[j]);
}
}
// This is where we start loading the remainder that does not evenly
// fit into kUnroll x blockDim.x
int remainder = limitVec * kWordSize;
for (int i = remainder + threadIdx.x; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
} else {
// Potential unaligned load
constexpr int kUnroll = 4;
int limit = utils::roundDown(numCodes, kUnroll * blockDim.x);
int i = threadIdx.x;
for (; i < limit; i += kUnroll * blockDim.x) {
LookupT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = term2Start[i + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = Math<LookupT>::add(vals[j], term3Start[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
smem[i + j * blockDim.x] = vals[j];
}
}
for (; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
}
}
template <int NumSubQuantizers, typename LookupT, typename LookupVecT>
__global__ void
pqScanPrecomputedMultiPass(Tensor<float, 2, true> queries,
Tensor<float, 2, true> precompTerm1,
Tensor<LookupT, 3, true> precompTerm2,
Tensor<LookupT, 3, true> precompTerm3,
Tensor<int, 2, true> topQueryToCentroid,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
// precomputed term 2 + 3 storage
// (sub q)(code id)
extern __shared__ char smemTerm23[];
LookupT* term23 = (LookupT*) smemTerm23;
// Each block handles a single query
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
auto codesPerSubQuantizer = precompTerm2.getSize(2);
auto precompTermSize = precompTerm2.getSize(1) * codesPerSubQuantizer;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
unsigned char* codeList = (unsigned char*) listCodes[listId];
int limit = listLengths[listId];
constexpr int kNumCode32 = NumSubQuantizers <= 4 ? 1 :
(NumSubQuantizers / 4);
unsigned int code32[kNumCode32];
unsigned int nextCode32[kNumCode32];
// We double-buffer the code loading, which improves memory utilization
if (threadIdx.x < limit) {
LoadCode32<NumSubQuantizers>::load(code32, codeList, threadIdx.x);
}
// Load precomputed terms 1, 2, 3
float term1 = precompTerm1[queryId][probeId];
loadPrecomputedTerm<LookupT, LookupVecT>(term23,
precompTerm2[listId].data(),
precompTerm3[queryId].data(),
precompTermSize);
// Prevent WAR dependencies
__syncthreads();
// Each thread handles one code element in the list, with a
// block-wide stride
for (int codeIndex = threadIdx.x;
codeIndex < limit;
codeIndex += blockDim.x) {
// Prefetch next codes
if (codeIndex + blockDim.x < limit) {
LoadCode32<NumSubQuantizers>::load(
nextCode32, codeList, codeIndex + blockDim.x);
}
float dist = term1;
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
constexpr int kBytesPerCode32 =
NumSubQuantizers < 4 ? NumSubQuantizers : 4;
if (kBytesPerCode32 == 1) {
auto code = code32[0];
dist = ConvertTo<float>::to(term23[code]);
} else {
#pragma unroll
for (int byte = 0; byte < kBytesPerCode32; ++byte) {
auto code = getByte(code32[word], byte * 8, 8);
auto offset =
codesPerSubQuantizer * (word * kBytesPerCode32 + byte);
dist += ConvertTo<float>::to(term23[offset + code]);
}
}
}
// Write out intermediate distance result
// We do not maintain indices here, in order to reduce global
// memory traffic. Those are recovered in the final selection step.
distanceOut[codeIndex] = dist;
// Rotate buffers
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
code32[word] = nextCode32[word];
}
}
}
void
runMultiPassTile(GpuResources* res,
Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
cudaStream_t stream) {
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(res, topQueryToCentroid, listLengths, prefixSumOffsets,
thrustMem, stream);
// Convert all codes to a distance, and write out (distance,
// index) values for all intermediate results
{
auto kThreadsPerBlock = 256;
auto grid = dim3(topQueryToCentroid.getSize(1),
topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
// pq precomputed terms (2 + 3)
auto smem = useFloat16Lookup ? sizeof(half) : sizeof(float);
smem *= numSubQuantizers * numSubQuantizerCodes;
FAISS_ASSERT(smem <= getMaxSharedMemPerBlockCurrentDevice());
#define RUN_PQ_OPT(NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T) \
do { \
auto precompTerm2T = precompTerm2.toTensor<LOOKUP_T>(); \
auto precompTerm3T = precompTerm3.toTensor<LOOKUP_T>(); \
\
pqScanPrecomputedMultiPass<NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T> \
<<<grid, block, smem, stream>>>( \
queries, \
precompTerm1, \
precompTerm2T, \
precompTerm3T, \
topQueryToCentroid, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
#define RUN_PQ(NUM_SUB_Q) \
do { \
if (useFloat16Lookup) { \
RUN_PQ_OPT(NUM_SUB_Q, half, Half8); \
} else { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} \
} while (0)
switch (bytesPerCode) {
case 1:
RUN_PQ(1);
break;
case 2:
RUN_PQ(2);
break;
case 3:
RUN_PQ(3);
break;
case 4:
RUN_PQ(4);
break;
case 8:
RUN_PQ(8);
break;
case 12:
RUN_PQ(12);
break;
case 16:
RUN_PQ(16);
break;
case 20:
RUN_PQ(20);
break;
case 24:
RUN_PQ(24);
break;
case 28:
RUN_PQ(28);
break;
case 32:
RUN_PQ(32);
break;
case 40:
RUN_PQ(40);
break;
case 48:
RUN_PQ(48);
break;
case 56:
RUN_PQ(56);
break;
case 64:
RUN_PQ(64);
break;
case 96:
RUN_PQ(96);
break;
default:
FAISS_ASSERT(false);
break;
}
CUDA_TEST_ERROR();
#undef RUN_PQ
#undef RUN_PQ_OPT
}
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
topQueryToCentroid.getSize(1),
k,
false, // L2 distance chooses smallest
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
topQueryToCentroid,
k,
false, // L2 distance chooses smallest
outDistances,
outIndices,
stream);
CUDA_TEST_ERROR();
}
void runPQScanMultiPassPrecomputed(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = topQueryToCentroid.getSize(1);
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
res, makeTempAlloc(AllocType::Other, stream),
{kThrustMemSize});
DeviceTensor<char, 1, true> thrustMem2(
res, makeTempAlloc(AllocType::Other, stream),
{kThrustMemSize});
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = res->getTempMemoryAvailableCurrentDevice();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = std::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <=
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe + 1});
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe + 1});
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true> allDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true> heapDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true> heapIndices2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
std::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto coarseIndicesView =
topQueryToCentroid.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto term1View =
precompTerm1.narrowOutermost(query, numQueriesInTile);
auto term3View =
precompTerm3.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runMultiPassTile(res,
queryView,
term1View,
precompTerm2,
term3View,
coarseIndicesView,
useFloat16Lookup,
bytesPerCode,
numSubQuantizers,
numSubQuantizerCodes,
listCodes,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
|
337ce4e2151ef5ba087f9801b608a88ac7534e16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* readvalidatePSF_control.cu
*
* Created on: Apr 19, 2017
* Author: gabriel
*/
#include "0_Mainparameters.h"
string PSFDATA = "/lambda_488/Calib/system_PSF.bin";
double *double_PSF; // on host
float MaxPSF=0.0f, SumPSF = 0.0f;
void PSFprepare(void) {
char * memblock;
int size;
// XMLDocument doc;
// MaxPSF = 0.; // also used as extern
string PSFraw = resourcesdirectory + PSFDATA;
const char * PSFImagefile = "results/A_PSF.pgm";
unsigned char *i_PSF = (unsigned char *) calloc(TA.PSF_size, sizeof(unsigned char)); // on host
double* double_PSF = (double*)std::malloc(TA.Nb_Rows_PSF*TA.Nb_Cols_PSF * sizeof(double));
hipMallocManaged(&original_PSF, PSFZoom * PSFZoom * sizeof(float));
hipMallocManaged(&test2_psf, PSFZoom * PSFZoom * sizeof(float));
hipMallocManaged(&PSFARRAY, PSFZoom * PSFZoom * sizeof(float));
//read pPSF bin file
std::ifstream PSFile(PSFraw.c_str(), ios::in | ios::binary | ios::ate);
size = (PSFile.tellg()); // the data is stored in doubles of 8 bytes in the file
size -= byte_skipped; // removes byte_skipped
memblock = new char[size];
PSFile.seekg(byte_skipped, ios::beg); // 4 first bytes are offset
PSFile.read(memblock, size);
PSFile.close();
double_PSF = (double*) memblock; //reinterpret the chars stored in the file as double
for (int i = 0; i < TA.Nb_Rows_PSF*TA.Nb_Cols_PSF; i++) {
*(original_PSF + i) = *(double_PSF + i)+0.000001; // change to float
SumPSF += *(original_PSF+i);
if (MaxPSF < *(original_PSF + i))
MaxPSF = *(original_PSF + i); // sanity check, check max
}
verbosefile << " PSF \u24F5 Nb_Rows: " << TA.Nb_Rows_PSF << " Nb_Cols " << TA.Nb_Cols_PSF;
verbosefile << " size " << size << " Max: " << MaxPSF << " Sum " << SumPSF << std::endl;
verbosefile << " PSF \u24F5 Nb_Rows: " << TA.Nb_Rows_PSF << " Nb_Cols " << TA.Nb_Cols_PSF;
verbosefile << " size " << size << " Max: " << MaxPSF << " Sum " << SumPSF << std::endl;
// write pPSF original image to disk
/////////////////////////////////
for (int i = 0; i <= TA.PSF_size; i++)
i_PSF[i] = 255.0*original_PSF[i]/MaxPSF; // image value
verbosefile << " PSF \u24F5 function read: Path to pPSF original" << PSFImagefile << endl;
sdkSavePGM(PSFImagefile, i_PSF, TA.Nb_Rows_PSF, TA.Nb_Cols_PSF);
free(i_PSF);
}
bool PSFvalidateonhost(void) {
bool testPSF;
double MaxPSF;
double Sum3PSF = 0, max3PSF =0;
hipMallocManaged(&PSF_valid, TA.PSF_size * sizeof(float)); // representation of pPSF available in global memory
unsigned char *i_PSF = (unsigned char *) calloc(TA.PSF_size, sizeof(unsigned char)); // on host
const char * PSFValidationimage = "results/A_PSFDevice.pgm";
dim3 dimBlock(1, 1, 1);
dim3 dimGrid(1,1, 1);
// Execute the pPSF kernel
hipLaunchKernelGGL(( PSFvalidateondevice), dim3(dimGrid), dim3(dimBlock), 0, 0, TA.Nb_Rows_PSF, TA.Nb_Cols_PSF);
hipDeviceSynchronize();
for(int row = 0; row < TA.Nb_Rows_PSF; row++)
for( int col = 0; col < TA.Nb_Cols_PSF; col++){
Sum3PSF += *(PSF_valid + row*TA.Nb_Cols_PSF + col);
max3PSF = max(*(PSF_valid + row*TA.Nb_Cols_PSF + col), max3PSF);
}
verbosefile << " PSF \u24F5 Sum3PSF " << Sum3PSF << " max3PSF " << max3PSF << endl;
// write pPSF image validation to disk
/////////////////////////////////
MaxPSF = 0.0f;
for (int i = 0; i <= TA.PSF_size; i++) {
MaxPSF = max(MaxPSF, PSF_valid[i]); // sanity check, check max
}
for (int i = 0; i <= TA.PSF_size; i++)
i_PSF[i] = 255.0*PSF_valid[i]/MaxPSF; // Validation image value
verbosefile << " PSF \u24F5 Path to pPSF validation ..." << PSFValidationimage << endl;
sdkSavePGM(PSFValidationimage, i_PSF, TA.Nb_Rows_PSF, TA.Nb_Cols_PSF);
verbosefile << " PSF \u24F5 Comparing files ... \n";
testPSF = compareData(PSF_valid,
original_PSF,
TA.Nb_Cols_PSF*TA.Nb_Rows_PSF,
MAX_EPSILON_ERROR/1000,
0.15f);
for (int jPSF = 0; jPSF < TA.PSF_size; jPSF++)
Sumdel[1] += fabsf(*(PSF_valid+jPSF)- *(original_PSF+jPSF));
hipFree(PSF_valid);
return(testPSF);
}
| 337ce4e2151ef5ba087f9801b608a88ac7534e16.cu | /*
* readvalidatePSF_control.cu
*
* Created on: Apr 19, 2017
* Author: gabriel
*/
#include "0_Mainparameters.h"
string PSFDATA = "/lambda_488/Calib/system_PSF.bin";
double *double_PSF; // on host
float MaxPSF=0.0f, SumPSF = 0.0f;
void PSFprepare(void) {
char * memblock;
int size;
// XMLDocument doc;
// MaxPSF = 0.; // also used as extern
string PSFraw = resourcesdirectory + PSFDATA;
const char * PSFImagefile = "results/A_PSF.pgm";
unsigned char *i_PSF = (unsigned char *) calloc(TA.PSF_size, sizeof(unsigned char)); // on host
double* double_PSF = (double*)std::malloc(TA.Nb_Rows_PSF*TA.Nb_Cols_PSF * sizeof(double));
cudaMallocManaged(&original_PSF, PSFZoom * PSFZoom * sizeof(float));
cudaMallocManaged(&test2_psf, PSFZoom * PSFZoom * sizeof(float));
cudaMallocManaged(&PSFARRAY, PSFZoom * PSFZoom * sizeof(float));
//read pPSF bin file
std::ifstream PSFile(PSFraw.c_str(), ios::in | ios::binary | ios::ate);
size = (PSFile.tellg()); // the data is stored in doubles of 8 bytes in the file
size -= byte_skipped; // removes byte_skipped
memblock = new char[size];
PSFile.seekg(byte_skipped, ios::beg); // 4 first bytes are offset
PSFile.read(memblock, size);
PSFile.close();
double_PSF = (double*) memblock; //reinterpret the chars stored in the file as double
for (int i = 0; i < TA.Nb_Rows_PSF*TA.Nb_Cols_PSF; i++) {
*(original_PSF + i) = *(double_PSF + i)+0.000001; // change to float
SumPSF += *(original_PSF+i);
if (MaxPSF < *(original_PSF + i))
MaxPSF = *(original_PSF + i); // sanity check, check max
}
verbosefile << " PSF \u24F5 Nb_Rows: " << TA.Nb_Rows_PSF << " Nb_Cols " << TA.Nb_Cols_PSF;
verbosefile << " size " << size << " Max: " << MaxPSF << " Sum " << SumPSF << std::endl;
verbosefile << " PSF \u24F5 Nb_Rows: " << TA.Nb_Rows_PSF << " Nb_Cols " << TA.Nb_Cols_PSF;
verbosefile << " size " << size << " Max: " << MaxPSF << " Sum " << SumPSF << std::endl;
// write pPSF original image to disk
/////////////////////////////////
for (int i = 0; i <= TA.PSF_size; i++)
i_PSF[i] = 255.0*original_PSF[i]/MaxPSF; // image value
verbosefile << " PSF \u24F5 function read: Path to pPSF original" << PSFImagefile << endl;
sdkSavePGM(PSFImagefile, i_PSF, TA.Nb_Rows_PSF, TA.Nb_Cols_PSF);
free(i_PSF);
}
bool PSFvalidateonhost(void) {
bool testPSF;
double MaxPSF;
double Sum3PSF = 0, max3PSF =0;
cudaMallocManaged(&PSF_valid, TA.PSF_size * sizeof(float)); // representation of pPSF available in global memory
unsigned char *i_PSF = (unsigned char *) calloc(TA.PSF_size, sizeof(unsigned char)); // on host
const char * PSFValidationimage = "results/A_PSFDevice.pgm";
dim3 dimBlock(1, 1, 1);
dim3 dimGrid(1,1, 1);
// Execute the pPSF kernel
PSFvalidateondevice<<<dimGrid, dimBlock, 0>>>( TA.Nb_Rows_PSF, TA.Nb_Cols_PSF);
cudaDeviceSynchronize();
for(int row = 0; row < TA.Nb_Rows_PSF; row++)
for( int col = 0; col < TA.Nb_Cols_PSF; col++){
Sum3PSF += *(PSF_valid + row*TA.Nb_Cols_PSF + col);
max3PSF = max(*(PSF_valid + row*TA.Nb_Cols_PSF + col), max3PSF);
}
verbosefile << " PSF \u24F5 Sum3PSF " << Sum3PSF << " max3PSF " << max3PSF << endl;
// write pPSF image validation to disk
/////////////////////////////////
MaxPSF = 0.0f;
for (int i = 0; i <= TA.PSF_size; i++) {
MaxPSF = max(MaxPSF, PSF_valid[i]); // sanity check, check max
}
for (int i = 0; i <= TA.PSF_size; i++)
i_PSF[i] = 255.0*PSF_valid[i]/MaxPSF; // Validation image value
verbosefile << " PSF \u24F5 Path to pPSF validation ..." << PSFValidationimage << endl;
sdkSavePGM(PSFValidationimage, i_PSF, TA.Nb_Rows_PSF, TA.Nb_Cols_PSF);
verbosefile << " PSF \u24F5 Comparing files ... \n";
testPSF = compareData(PSF_valid,
original_PSF,
TA.Nb_Cols_PSF*TA.Nb_Rows_PSF,
MAX_EPSILON_ERROR/1000,
0.15f);
for (int jPSF = 0; jPSF < TA.PSF_size; jPSF++)
Sumdel[1] += fabsf(*(PSF_valid+jPSF)- *(original_PSF+jPSF));
cudaFree(PSF_valid);
return(testPSF);
}
|
aec1a3092c27a4f5f52ec348d1c19bace322fd31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__
void solve_jit(double *rateConst, double *state, double *deriv, int numcell)
{
size_t tid;
double rate;
tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numcell) {
rate = rateConst[tid*50+0];
rate *= state[tid*20+8];
rate *= state[tid*20+8];
rate *= state[tid*20+18];
deriv[tid*20+8] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+18] -= rate;
deriv[tid*20+14] += rate;
rate = rateConst[tid*50+1];
rate *= state[tid*20+5];
rate *= state[tid*20+15];
rate *= state[tid*20+4];
deriv[tid*20+5] -= rate;
deriv[tid*20+15] -= rate;
deriv[tid*20+4] -= rate;
deriv[tid*20+12] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+4] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+12] += rate;
rate = rateConst[tid*50+2];
rate *= state[tid*20+10];
rate *= state[tid*20+14];
rate *= state[tid*20+10];
deriv[tid*20+10] -= rate;
deriv[tid*20+14] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+12] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+0] += rate;
rate = rateConst[tid*50+3];
rate *= state[tid*20+14];
rate *= state[tid*20+7];
deriv[tid*20+14] -= rate;
deriv[tid*20+7] -= rate;
deriv[tid*20+14] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+13] += rate;
rate = rateConst[tid*50+4];
rate *= state[tid*20+5];
rate *= state[tid*20+16];
deriv[tid*20+5] -= rate;
deriv[tid*20+16] -= rate;
deriv[tid*20+5] += rate;
rate = rateConst[tid*50+5];
rate *= state[tid*20+11];
rate *= state[tid*20+8];
deriv[tid*20+11] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+2] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+19] += rate;
rate = rateConst[tid*50+6];
rate *= state[tid*20+16];
rate *= state[tid*20+1];
rate *= state[tid*20+14];
deriv[tid*20+16] -= rate;
deriv[tid*20+1] -= rate;
deriv[tid*20+14] -= rate;
deriv[tid*20+3] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+8] += rate;
rate = rateConst[tid*50+7];
rate *= state[tid*20+12];
rate *= state[tid*20+8];
deriv[tid*20+12] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+0] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+19] += rate;
rate = rateConst[tid*50+8];
rate *= state[tid*20+7];
rate *= state[tid*20+12];
deriv[tid*20+7] -= rate;
deriv[tid*20+12] -= rate;
deriv[tid*20+15] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+12] += rate;
rate = rateConst[tid*50+9];
rate *= state[tid*20+0];
rate *= state[tid*20+3];
deriv[tid*20+0] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+10] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+19] += rate;
rate = rateConst[tid*50+10];
rate *= state[tid*20+3];
rate *= state[tid*20+5];
rate *= state[tid*20+18];
deriv[tid*20+3] -= rate;
deriv[tid*20+5] -= rate;
deriv[tid*20+18] -= rate;
deriv[tid*20+3] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+10] += rate;
rate = rateConst[tid*50+11];
rate *= state[tid*20+18];
rate *= state[tid*20+19];
deriv[tid*20+18] -= rate;
deriv[tid*20+19] -= rate;
deriv[tid*20+11] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+6] += rate;
rate = rateConst[tid*50+12];
rate *= state[tid*20+5];
rate *= state[tid*20+0];
rate *= state[tid*20+14];
deriv[tid*20+5] -= rate;
deriv[tid*20+0] -= rate;
deriv[tid*20+14] -= rate;
deriv[tid*20+5] += rate;
deriv[tid*20+9] += rate;
rate = rateConst[tid*50+13];
rate *= state[tid*20+15];
rate *= state[tid*20+11];
deriv[tid*20+15] -= rate;
deriv[tid*20+11] -= rate;
deriv[tid*20+4] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+16] += rate;
rate = rateConst[tid*50+14];
rate *= state[tid*20+7];
rate *= state[tid*20+14];
rate *= state[tid*20+6];
deriv[tid*20+7] -= rate;
deriv[tid*20+14] -= rate;
deriv[tid*20+6] -= rate;
deriv[tid*20+3] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+5] += rate;
rate = rateConst[tid*50+15];
rate *= state[tid*20+2];
rate *= state[tid*20+4];
rate *= state[tid*20+10];
deriv[tid*20+2] -= rate;
deriv[tid*20+4] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+2] += rate;
rate = rateConst[tid*50+16];
rate *= state[tid*20+6];
rate *= state[tid*20+2];
rate *= state[tid*20+2];
deriv[tid*20+6] -= rate;
deriv[tid*20+2] -= rate;
deriv[tid*20+2] -= rate;
deriv[tid*20+8] += rate;
deriv[tid*20+13] += rate;
rate = rateConst[tid*50+17];
rate *= state[tid*20+13];
rate *= state[tid*20+15];
rate *= state[tid*20+1];
deriv[tid*20+13] -= rate;
deriv[tid*20+15] -= rate;
deriv[tid*20+1] -= rate;
deriv[tid*20+11] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+2] += rate;
rate = rateConst[tid*50+18];
rate *= state[tid*20+19];
rate *= state[tid*20+10];
rate *= state[tid*20+6];
deriv[tid*20+19] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+6] -= rate;
deriv[tid*20+1] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+18] += rate;
rate = rateConst[tid*50+19];
rate *= state[tid*20+12];
rate *= state[tid*20+4];
deriv[tid*20+12] -= rate;
deriv[tid*20+4] -= rate;
deriv[tid*20+2] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+11] += rate;
rate = rateConst[tid*50+20];
rate *= state[tid*20+2];
rate *= state[tid*20+18];
rate *= state[tid*20+2];
deriv[tid*20+2] -= rate;
deriv[tid*20+18] -= rate;
deriv[tid*20+2] -= rate;
deriv[tid*20+12] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+0] += rate;
rate = rateConst[tid*50+21];
rate *= state[tid*20+17];
rate *= state[tid*20+17];
rate *= state[tid*20+7];
deriv[tid*20+17] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+7] -= rate;
deriv[tid*20+3] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+1] += rate;
rate = rateConst[tid*50+22];
rate *= state[tid*20+15];
rate *= state[tid*20+15];
rate *= state[tid*20+15];
deriv[tid*20+15] -= rate;
deriv[tid*20+15] -= rate;
deriv[tid*20+15] -= rate;
deriv[tid*20+1] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+10] += rate;
rate = rateConst[tid*50+23];
rate *= state[tid*20+3];
rate *= state[tid*20+17];
deriv[tid*20+3] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+8] += rate;
deriv[tid*20+0] += rate;
rate = rateConst[tid*50+24];
rate *= state[tid*20+3];
rate *= state[tid*20+17];
deriv[tid*20+3] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+14] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+4] += rate;
rate = rateConst[tid*50+25];
rate *= state[tid*20+5];
rate *= state[tid*20+3];
deriv[tid*20+5] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+9] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+5] += rate;
rate = rateConst[tid*50+26];
rate *= state[tid*20+2];
rate *= state[tid*20+8];
deriv[tid*20+2] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+2] += rate;
rate = rateConst[tid*50+27];
rate *= state[tid*20+6];
rate *= state[tid*20+18];
rate *= state[tid*20+3];
deriv[tid*20+6] -= rate;
deriv[tid*20+18] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+14] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+12] += rate;
rate = rateConst[tid*50+28];
rate *= state[tid*20+15];
rate *= state[tid*20+10];
rate *= state[tid*20+17];
deriv[tid*20+15] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+7] += rate;
deriv[tid*20+9] += rate;
rate = rateConst[tid*50+29];
rate *= state[tid*20+9];
rate *= state[tid*20+2];
deriv[tid*20+9] -= rate;
deriv[tid*20+2] -= rate;
deriv[tid*20+1] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+4] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+10] += rate;
rate = rateConst[tid*50+30];
rate *= state[tid*20+10];
rate *= state[tid*20+5];
rate *= state[tid*20+14];
deriv[tid*20+10] -= rate;
deriv[tid*20+5] -= rate;
deriv[tid*20+14] -= rate;
deriv[tid*20+13] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+15] += rate;
rate = rateConst[tid*50+31];
rate *= state[tid*20+19];
rate *= state[tid*20+5];
rate *= state[tid*20+19];
deriv[tid*20+19] -= rate;
deriv[tid*20+5] -= rate;
deriv[tid*20+19] -= rate;
deriv[tid*20+14] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+17] += rate;
rate = rateConst[tid*50+32];
rate *= state[tid*20+14];
rate *= state[tid*20+18];
deriv[tid*20+14] -= rate;
deriv[tid*20+18] -= rate;
deriv[tid*20+9] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+1] += rate;
rate = rateConst[tid*50+33];
rate *= state[tid*20+13];
rate *= state[tid*20+3];
rate *= state[tid*20+13];
deriv[tid*20+13] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+13] -= rate;
deriv[tid*20+16] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+19] += rate;
rate = rateConst[tid*50+34];
rate *= state[tid*20+12];
rate *= state[tid*20+17];
rate *= state[tid*20+7];
deriv[tid*20+12] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+7] -= rate;
deriv[tid*20+2] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+4] += rate;
rate = rateConst[tid*50+35];
rate *= state[tid*20+2];
rate *= state[tid*20+12];
rate *= state[tid*20+3];
deriv[tid*20+2] -= rate;
deriv[tid*20+12] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+9] += rate;
rate = rateConst[tid*50+36];
rate *= state[tid*20+10];
rate *= state[tid*20+10];
deriv[tid*20+10] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+6] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+11] += rate;
rate = rateConst[tid*50+37];
rate *= state[tid*20+8];
rate *= state[tid*20+1];
deriv[tid*20+8] -= rate;
deriv[tid*20+1] -= rate;
deriv[tid*20+17] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+15] += rate;
rate = rateConst[tid*50+38];
rate *= state[tid*20+19];
rate *= state[tid*20+8];
deriv[tid*20+19] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+0] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+2] += rate;
rate = rateConst[tid*50+39];
rate *= state[tid*20+7];
rate *= state[tid*20+10];
rate *= state[tid*20+17];
deriv[tid*20+7] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+9] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+19] += rate;
rate = rateConst[tid*50+40];
rate *= state[tid*20+1];
rate *= state[tid*20+12];
deriv[tid*20+1] -= rate;
deriv[tid*20+12] -= rate;
deriv[tid*20+3] += rate;
rate = rateConst[tid*50+41];
rate *= state[tid*20+1];
rate *= state[tid*20+1];
rate *= state[tid*20+4];
deriv[tid*20+1] -= rate;
deriv[tid*20+1] -= rate;
deriv[tid*20+4] -= rate;
deriv[tid*20+2] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+9] += rate;
rate = rateConst[tid*50+42];
rate *= state[tid*20+7];
rate *= state[tid*20+9];
rate *= state[tid*20+7];
deriv[tid*20+7] -= rate;
deriv[tid*20+9] -= rate;
deriv[tid*20+7] -= rate;
deriv[tid*20+13] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+18] += rate;
rate = rateConst[tid*50+43];
rate *= state[tid*20+13];
rate *= state[tid*20+3];
deriv[tid*20+13] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+0] += rate;
rate = rateConst[tid*50+44];
rate *= state[tid*20+1];
rate *= state[tid*20+6];
deriv[tid*20+1] -= rate;
deriv[tid*20+6] -= rate;
deriv[tid*20+13] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+15] += rate;
rate = rateConst[tid*50+45];
rate *= state[tid*20+7];
rate *= state[tid*20+11];
deriv[tid*20+7] -= rate;
deriv[tid*20+11] -= rate;
deriv[tid*20+16] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+4] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+16] += rate;
rate = rateConst[tid*50+46];
rate *= state[tid*20+1];
rate *= state[tid*20+9];
deriv[tid*20+1] -= rate;
deriv[tid*20+9] -= rate;
deriv[tid*20+9] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+4] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+5] += rate;
rate = rateConst[tid*50+47];
rate *= state[tid*20+3];
rate *= state[tid*20+9];
deriv[tid*20+3] -= rate;
deriv[tid*20+9] -= rate;
deriv[tid*20+2] += rate;
deriv[tid*20+18] += rate;
rate = rateConst[tid*50+48];
rate *= state[tid*20+5];
rate *= state[tid*20+15];
rate *= state[tid*20+8];
deriv[tid*20+5] -= rate;
deriv[tid*20+15] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+4] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+18] += rate;
rate = rateConst[tid*50+49];
rate *= state[tid*20+17];
rate *= state[tid*20+11];
deriv[tid*20+17] -= rate;
deriv[tid*20+11] -= rate;
deriv[tid*20+8] += rate;
deriv[tid*20+5] += rate;
}
}
| aec1a3092c27a4f5f52ec348d1c19bace322fd31.cu |
extern "C"
__global__
void solve_jit(double *rateConst, double *state, double *deriv, int numcell)
{
size_t tid;
double rate;
tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numcell) {
rate = rateConst[tid*50+0];
rate *= state[tid*20+8];
rate *= state[tid*20+8];
rate *= state[tid*20+18];
deriv[tid*20+8] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+18] -= rate;
deriv[tid*20+14] += rate;
rate = rateConst[tid*50+1];
rate *= state[tid*20+5];
rate *= state[tid*20+15];
rate *= state[tid*20+4];
deriv[tid*20+5] -= rate;
deriv[tid*20+15] -= rate;
deriv[tid*20+4] -= rate;
deriv[tid*20+12] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+4] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+12] += rate;
rate = rateConst[tid*50+2];
rate *= state[tid*20+10];
rate *= state[tid*20+14];
rate *= state[tid*20+10];
deriv[tid*20+10] -= rate;
deriv[tid*20+14] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+12] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+0] += rate;
rate = rateConst[tid*50+3];
rate *= state[tid*20+14];
rate *= state[tid*20+7];
deriv[tid*20+14] -= rate;
deriv[tid*20+7] -= rate;
deriv[tid*20+14] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+13] += rate;
rate = rateConst[tid*50+4];
rate *= state[tid*20+5];
rate *= state[tid*20+16];
deriv[tid*20+5] -= rate;
deriv[tid*20+16] -= rate;
deriv[tid*20+5] += rate;
rate = rateConst[tid*50+5];
rate *= state[tid*20+11];
rate *= state[tid*20+8];
deriv[tid*20+11] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+2] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+19] += rate;
rate = rateConst[tid*50+6];
rate *= state[tid*20+16];
rate *= state[tid*20+1];
rate *= state[tid*20+14];
deriv[tid*20+16] -= rate;
deriv[tid*20+1] -= rate;
deriv[tid*20+14] -= rate;
deriv[tid*20+3] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+8] += rate;
rate = rateConst[tid*50+7];
rate *= state[tid*20+12];
rate *= state[tid*20+8];
deriv[tid*20+12] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+0] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+19] += rate;
rate = rateConst[tid*50+8];
rate *= state[tid*20+7];
rate *= state[tid*20+12];
deriv[tid*20+7] -= rate;
deriv[tid*20+12] -= rate;
deriv[tid*20+15] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+12] += rate;
rate = rateConst[tid*50+9];
rate *= state[tid*20+0];
rate *= state[tid*20+3];
deriv[tid*20+0] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+10] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+19] += rate;
rate = rateConst[tid*50+10];
rate *= state[tid*20+3];
rate *= state[tid*20+5];
rate *= state[tid*20+18];
deriv[tid*20+3] -= rate;
deriv[tid*20+5] -= rate;
deriv[tid*20+18] -= rate;
deriv[tid*20+3] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+10] += rate;
rate = rateConst[tid*50+11];
rate *= state[tid*20+18];
rate *= state[tid*20+19];
deriv[tid*20+18] -= rate;
deriv[tid*20+19] -= rate;
deriv[tid*20+11] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+6] += rate;
rate = rateConst[tid*50+12];
rate *= state[tid*20+5];
rate *= state[tid*20+0];
rate *= state[tid*20+14];
deriv[tid*20+5] -= rate;
deriv[tid*20+0] -= rate;
deriv[tid*20+14] -= rate;
deriv[tid*20+5] += rate;
deriv[tid*20+9] += rate;
rate = rateConst[tid*50+13];
rate *= state[tid*20+15];
rate *= state[tid*20+11];
deriv[tid*20+15] -= rate;
deriv[tid*20+11] -= rate;
deriv[tid*20+4] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+16] += rate;
rate = rateConst[tid*50+14];
rate *= state[tid*20+7];
rate *= state[tid*20+14];
rate *= state[tid*20+6];
deriv[tid*20+7] -= rate;
deriv[tid*20+14] -= rate;
deriv[tid*20+6] -= rate;
deriv[tid*20+3] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+5] += rate;
rate = rateConst[tid*50+15];
rate *= state[tid*20+2];
rate *= state[tid*20+4];
rate *= state[tid*20+10];
deriv[tid*20+2] -= rate;
deriv[tid*20+4] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+2] += rate;
rate = rateConst[tid*50+16];
rate *= state[tid*20+6];
rate *= state[tid*20+2];
rate *= state[tid*20+2];
deriv[tid*20+6] -= rate;
deriv[tid*20+2] -= rate;
deriv[tid*20+2] -= rate;
deriv[tid*20+8] += rate;
deriv[tid*20+13] += rate;
rate = rateConst[tid*50+17];
rate *= state[tid*20+13];
rate *= state[tid*20+15];
rate *= state[tid*20+1];
deriv[tid*20+13] -= rate;
deriv[tid*20+15] -= rate;
deriv[tid*20+1] -= rate;
deriv[tid*20+11] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+2] += rate;
rate = rateConst[tid*50+18];
rate *= state[tid*20+19];
rate *= state[tid*20+10];
rate *= state[tid*20+6];
deriv[tid*20+19] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+6] -= rate;
deriv[tid*20+1] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+18] += rate;
rate = rateConst[tid*50+19];
rate *= state[tid*20+12];
rate *= state[tid*20+4];
deriv[tid*20+12] -= rate;
deriv[tid*20+4] -= rate;
deriv[tid*20+2] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+11] += rate;
rate = rateConst[tid*50+20];
rate *= state[tid*20+2];
rate *= state[tid*20+18];
rate *= state[tid*20+2];
deriv[tid*20+2] -= rate;
deriv[tid*20+18] -= rate;
deriv[tid*20+2] -= rate;
deriv[tid*20+12] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+0] += rate;
rate = rateConst[tid*50+21];
rate *= state[tid*20+17];
rate *= state[tid*20+17];
rate *= state[tid*20+7];
deriv[tid*20+17] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+7] -= rate;
deriv[tid*20+3] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+1] += rate;
rate = rateConst[tid*50+22];
rate *= state[tid*20+15];
rate *= state[tid*20+15];
rate *= state[tid*20+15];
deriv[tid*20+15] -= rate;
deriv[tid*20+15] -= rate;
deriv[tid*20+15] -= rate;
deriv[tid*20+1] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+10] += rate;
rate = rateConst[tid*50+23];
rate *= state[tid*20+3];
rate *= state[tid*20+17];
deriv[tid*20+3] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+8] += rate;
deriv[tid*20+0] += rate;
rate = rateConst[tid*50+24];
rate *= state[tid*20+3];
rate *= state[tid*20+17];
deriv[tid*20+3] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+14] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+4] += rate;
rate = rateConst[tid*50+25];
rate *= state[tid*20+5];
rate *= state[tid*20+3];
deriv[tid*20+5] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+9] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+5] += rate;
rate = rateConst[tid*50+26];
rate *= state[tid*20+2];
rate *= state[tid*20+8];
deriv[tid*20+2] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+2] += rate;
rate = rateConst[tid*50+27];
rate *= state[tid*20+6];
rate *= state[tid*20+18];
rate *= state[tid*20+3];
deriv[tid*20+6] -= rate;
deriv[tid*20+18] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+14] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+12] += rate;
rate = rateConst[tid*50+28];
rate *= state[tid*20+15];
rate *= state[tid*20+10];
rate *= state[tid*20+17];
deriv[tid*20+15] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+7] += rate;
deriv[tid*20+9] += rate;
rate = rateConst[tid*50+29];
rate *= state[tid*20+9];
rate *= state[tid*20+2];
deriv[tid*20+9] -= rate;
deriv[tid*20+2] -= rate;
deriv[tid*20+1] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+4] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+10] += rate;
rate = rateConst[tid*50+30];
rate *= state[tid*20+10];
rate *= state[tid*20+5];
rate *= state[tid*20+14];
deriv[tid*20+10] -= rate;
deriv[tid*20+5] -= rate;
deriv[tid*20+14] -= rate;
deriv[tid*20+13] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+2] += rate;
deriv[tid*20+15] += rate;
rate = rateConst[tid*50+31];
rate *= state[tid*20+19];
rate *= state[tid*20+5];
rate *= state[tid*20+19];
deriv[tid*20+19] -= rate;
deriv[tid*20+5] -= rate;
deriv[tid*20+19] -= rate;
deriv[tid*20+14] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+17] += rate;
rate = rateConst[tid*50+32];
rate *= state[tid*20+14];
rate *= state[tid*20+18];
deriv[tid*20+14] -= rate;
deriv[tid*20+18] -= rate;
deriv[tid*20+9] += rate;
deriv[tid*20+13] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+1] += rate;
rate = rateConst[tid*50+33];
rate *= state[tid*20+13];
rate *= state[tid*20+3];
rate *= state[tid*20+13];
deriv[tid*20+13] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+13] -= rate;
deriv[tid*20+16] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+14] += rate;
deriv[tid*20+19] += rate;
rate = rateConst[tid*50+34];
rate *= state[tid*20+12];
rate *= state[tid*20+17];
rate *= state[tid*20+7];
deriv[tid*20+12] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+7] -= rate;
deriv[tid*20+2] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+4] += rate;
rate = rateConst[tid*50+35];
rate *= state[tid*20+2];
rate *= state[tid*20+12];
rate *= state[tid*20+3];
deriv[tid*20+2] -= rate;
deriv[tid*20+12] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+9] += rate;
rate = rateConst[tid*50+36];
rate *= state[tid*20+10];
rate *= state[tid*20+10];
deriv[tid*20+10] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+6] += rate;
deriv[tid*20+3] += rate;
deriv[tid*20+7] += rate;
deriv[tid*20+11] += rate;
rate = rateConst[tid*50+37];
rate *= state[tid*20+8];
rate *= state[tid*20+1];
deriv[tid*20+8] -= rate;
deriv[tid*20+1] -= rate;
deriv[tid*20+17] += rate;
deriv[tid*20+1] += rate;
deriv[tid*20+15] += rate;
rate = rateConst[tid*50+38];
rate *= state[tid*20+19];
rate *= state[tid*20+8];
deriv[tid*20+19] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+0] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+2] += rate;
rate = rateConst[tid*50+39];
rate *= state[tid*20+7];
rate *= state[tid*20+10];
rate *= state[tid*20+17];
deriv[tid*20+7] -= rate;
deriv[tid*20+10] -= rate;
deriv[tid*20+17] -= rate;
deriv[tid*20+9] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+19] += rate;
rate = rateConst[tid*50+40];
rate *= state[tid*20+1];
rate *= state[tid*20+12];
deriv[tid*20+1] -= rate;
deriv[tid*20+12] -= rate;
deriv[tid*20+3] += rate;
rate = rateConst[tid*50+41];
rate *= state[tid*20+1];
rate *= state[tid*20+1];
rate *= state[tid*20+4];
deriv[tid*20+1] -= rate;
deriv[tid*20+1] -= rate;
deriv[tid*20+4] -= rate;
deriv[tid*20+2] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+17] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+0] += rate;
deriv[tid*20+9] += rate;
rate = rateConst[tid*50+42];
rate *= state[tid*20+7];
rate *= state[tid*20+9];
rate *= state[tid*20+7];
deriv[tid*20+7] -= rate;
deriv[tid*20+9] -= rate;
deriv[tid*20+7] -= rate;
deriv[tid*20+13] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+18] += rate;
rate = rateConst[tid*50+43];
rate *= state[tid*20+13];
rate *= state[tid*20+3];
deriv[tid*20+13] -= rate;
deriv[tid*20+3] -= rate;
deriv[tid*20+0] += rate;
rate = rateConst[tid*50+44];
rate *= state[tid*20+1];
rate *= state[tid*20+6];
deriv[tid*20+1] -= rate;
deriv[tid*20+6] -= rate;
deriv[tid*20+13] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+15] += rate;
deriv[tid*20+9] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+8] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+12] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+15] += rate;
rate = rateConst[tid*50+45];
rate *= state[tid*20+7];
rate *= state[tid*20+11];
deriv[tid*20+7] -= rate;
deriv[tid*20+11] -= rate;
deriv[tid*20+16] += rate;
deriv[tid*20+19] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+4] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+16] += rate;
rate = rateConst[tid*50+46];
rate *= state[tid*20+1];
rate *= state[tid*20+9];
deriv[tid*20+1] -= rate;
deriv[tid*20+9] -= rate;
deriv[tid*20+9] += rate;
deriv[tid*20+18] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+10] += rate;
deriv[tid*20+4] += rate;
deriv[tid*20+16] += rate;
deriv[tid*20+6] += rate;
deriv[tid*20+11] += rate;
deriv[tid*20+5] += rate;
rate = rateConst[tid*50+47];
rate *= state[tid*20+3];
rate *= state[tid*20+9];
deriv[tid*20+3] -= rate;
deriv[tid*20+9] -= rate;
deriv[tid*20+2] += rate;
deriv[tid*20+18] += rate;
rate = rateConst[tid*50+48];
rate *= state[tid*20+5];
rate *= state[tid*20+15];
rate *= state[tid*20+8];
deriv[tid*20+5] -= rate;
deriv[tid*20+15] -= rate;
deriv[tid*20+8] -= rate;
deriv[tid*20+4] += rate;
deriv[tid*20+5] += rate;
deriv[tid*20+18] += rate;
rate = rateConst[tid*50+49];
rate *= state[tid*20+17];
rate *= state[tid*20+11];
deriv[tid*20+17] -= rate;
deriv[tid*20+11] -= rate;
deriv[tid*20+8] += rate;
deriv[tid*20+5] += rate;
}
}
|
c7cdf031c40d6351848948a96887d3e6f89a4f7c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mDivergence_TwoDim.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *div = NULL;
hipMalloc(&div, XSIZE*YSIZE);
float *u_dimX = NULL;
hipMalloc(&u_dimX, XSIZE*YSIZE);
float *u_dimY = NULL;
hipMalloc(&u_dimY, XSIZE*YSIZE);
float r_sStep = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mDivergence_TwoDim), dim3(gridBlock),dim3(threadBlock), 0, 0, div,u_dimX,u_dimY,r_sStep);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mDivergence_TwoDim), dim3(gridBlock),dim3(threadBlock), 0, 0, div,u_dimX,u_dimY,r_sStep);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mDivergence_TwoDim), dim3(gridBlock),dim3(threadBlock), 0, 0, div,u_dimX,u_dimY,r_sStep);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c7cdf031c40d6351848948a96887d3e6f89a4f7c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mDivergence_TwoDim.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *div = NULL;
cudaMalloc(&div, XSIZE*YSIZE);
float *u_dimX = NULL;
cudaMalloc(&u_dimX, XSIZE*YSIZE);
float *u_dimY = NULL;
cudaMalloc(&u_dimY, XSIZE*YSIZE);
float r_sStep = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mDivergence_TwoDim<<<gridBlock,threadBlock>>>(div,u_dimX,u_dimY,r_sStep);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mDivergence_TwoDim<<<gridBlock,threadBlock>>>(div,u_dimX,u_dimY,r_sStep);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mDivergence_TwoDim<<<gridBlock,threadBlock>>>(div,u_dimX,u_dimY,r_sStep);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
341f19c3f236cf3f087c88f1f824ebf1f9a9d1f7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "negative_prob_multiply_dense_matrix_vector_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *matrix = NULL;
hipMalloc(&matrix, XSIZE*YSIZE);
float *in_vector = NULL;
hipMalloc(&in_vector, XSIZE*YSIZE);
float *out_vector = NULL;
hipMalloc(&out_vector, XSIZE*YSIZE);
unsigned int outerdim = 1;
unsigned int innerdim = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
negative_prob_multiply_dense_matrix_vector_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,in_vector,out_vector,outerdim,innerdim);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
negative_prob_multiply_dense_matrix_vector_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,in_vector,out_vector,outerdim,innerdim);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
negative_prob_multiply_dense_matrix_vector_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,in_vector,out_vector,outerdim,innerdim);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 341f19c3f236cf3f087c88f1f824ebf1f9a9d1f7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "negative_prob_multiply_dense_matrix_vector_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *matrix = NULL;
cudaMalloc(&matrix, XSIZE*YSIZE);
float *in_vector = NULL;
cudaMalloc(&in_vector, XSIZE*YSIZE);
float *out_vector = NULL;
cudaMalloc(&out_vector, XSIZE*YSIZE);
unsigned int outerdim = 1;
unsigned int innerdim = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
negative_prob_multiply_dense_matrix_vector_kernel<<<gridBlock,threadBlock>>>(matrix,in_vector,out_vector,outerdim,innerdim);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
negative_prob_multiply_dense_matrix_vector_kernel<<<gridBlock,threadBlock>>>(matrix,in_vector,out_vector,outerdim,innerdim);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
negative_prob_multiply_dense_matrix_vector_kernel<<<gridBlock,threadBlock>>>(matrix,in_vector,out_vector,outerdim,innerdim);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bdaca14770063a2382b65ef7aeeff1a3fbf1cee7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "transpose_impl.h"
namespace onnxruntime {
namespace cuda {
constexpr unsigned int TILE_DIM = 16;
template <typename T>
__global__ void Transpose3DKernel(const TArray<int64_t> input_shape,
const TArray<int64_t> input_strides,
const T* input_data, T* output_data) {
__shared__ T tile[TILE_DIM * (TILE_DIM + 1)];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
tile[threadIdx.y * TILE_DIM + threadIdx.x] = input_data[blockIdx.z * input_strides[0] + y * input_shape[2] + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
output_data[blockIdx.z * input_strides[0] + y * input_shape[1] + x] = tile[threadIdx.x * TILE_DIM + threadIdx.y];
}
bool CanDoTranspose3D(int32_t rank,
const std::vector<int64_t>& input_dims,
const std::vector<size_t>& permutations) {
if (rank == 3 &&
// permutation is done in the last two dimensions.
permutations[rank - 2] == (rank - 1) && permutations[rank - 1] == (rank - 2) &&
// the last two dimensions are aligned with TILE_DIM.
input_dims[rank - 2] % TILE_DIM == 0 && input_dims[rank - 1] % TILE_DIM == 0) {
return true;
}
return false;
}
Status Transpose3DImpl(hipStream_t stream, size_t element_size,
const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides,
const void* input_data, void* output_data, int64_t N) {
dim3 block_size(TILE_DIM, TILE_DIM);
dim3 grid_size(static_cast<unsigned int>(input_shape[2] / TILE_DIM), static_cast<unsigned int>(input_shape[1] / TILE_DIM), static_cast<unsigned int>(input_shape[0]));
switch (element_size) {
case sizeof(int8_t):
hipLaunchKernelGGL(( Transpose3DKernel<int8_t>), dim3(grid_size), dim3(block_size), 0, stream,
input_shape, input_strides,
reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data));
break;
case sizeof(int16_t):
hipLaunchKernelGGL(( Transpose3DKernel<int16_t>), dim3(grid_size), dim3(block_size), 0, stream,
input_shape, input_strides,
reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data));
break;
case sizeof(int32_t):
hipLaunchKernelGGL(( Transpose3DKernel<int32_t>), dim3(grid_size), dim3(block_size), 0, stream,
input_shape, input_strides,
reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data));
break;
case sizeof(int64_t):
hipLaunchKernelGGL(( Transpose3DKernel<int64_t>), dim3(grid_size), dim3(block_size), 0, stream,
input_shape, input_strides,
reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data));
break;
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ",
element_size);
}
return Status::OK();
}
template <int element_size>
__global__ void Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim(
const TArray<int64_t> input_strides, const void* input_data,
const TArray<int64_t> output_strides, void* output_data,
CUDA_LONG N) {
// output coordinates will be: blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x
CUDA_LONG input_index = (blockIdx.y * input_strides[0] +
blockIdx.x * input_strides[1] +
threadIdx.y * input_strides[2]) /
(4 * sizeof(int) / element_size) +
threadIdx.x * input_strides[3];
CUDA_LONG output_index = (blockIdx.y * output_strides[0] +
blockIdx.x * output_strides[1] +
threadIdx.y * output_strides[2]) /
(4 * sizeof(int) / element_size) +
threadIdx.x * output_strides[3];
const int4* v_input = reinterpret_cast<const int4*>(input_data);
int4* v_output = reinterpret_cast<int4*>(output_data);
if (input_index < N && output_index < N) {
v_output[output_index] = v_input[input_index];
}
}
bool CanDoTranspose4DParallelizeMultipleElementsPerThreadInInnermostDim(const hipDeviceProp_t& prop,
size_t element_size,
int32_t rank,
const std::vector<int64_t>& input_dims,
const std::vector<size_t>& permutations) {
if (rank == 4 &&
// the permutations is not on the last dimension.
permutations[3] == 3) {
// The block size will be set based on the outer-most two dimensions of 4D tensor.
// the number threads per block will be calculated as below.
unsigned int num_elements_per_thread = 4 * sizeof(int) / static_cast<unsigned int>(element_size); // int4 is used in the kernel to access data.
int64_t num_elements_in_last_two_dimensions = input_dims[2] * input_dims[3];
int64_t num_threads_per_block = num_elements_in_last_two_dimensions / num_elements_per_thread;
if (((num_elements_in_last_two_dimensions & (num_elements_per_thread - 1)) == 0) &&
num_threads_per_block <= prop.maxThreadsPerBlock &&
num_threads_per_block >= prop.warpSize &&
// num_threads_per_block must be a multiple of warp size (32)
((num_threads_per_block & (prop.warpSize - 1)) == 0) &&
// input_dims[3] must be a multiple of `num_elements_per_thread`
((input_dims[3] % num_elements_per_thread) == 0)) {
return true;
}
}
return false;
}
Status Transpose4DParallelizeMultipleElementsPerThreadInInnermostDim(
hipStream_t stream, size_t element_size,
const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides,
const void* input_data, const TArray<int64_t>& output_strides,
void* output_data, int N) {
unsigned int num_elements_per_thread = 4 * sizeof(int) / static_cast<unsigned int>(element_size); // int4 is used in the kernel to access data.
dim3 block_size(static_cast<unsigned int>(input_shape[3] / num_elements_per_thread), static_cast<unsigned int>(input_shape[2]));
dim3 grid_size(static_cast<unsigned int>(input_shape[1]), static_cast<unsigned int>(input_shape[0]));
switch (element_size) {
case sizeof(int8_t):
hipLaunchKernelGGL(( Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int8_t)>)
, dim3(grid_size), dim3(block_size), 0, stream,
input_strides, input_data,
output_strides, output_data, N / num_elements_per_thread);
break;
case sizeof(int16_t):
hipLaunchKernelGGL(( Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int16_t)>)
, dim3(grid_size), dim3(block_size), 0, stream,
input_strides, input_data,
output_strides, output_data, N / num_elements_per_thread);
break;
case sizeof(int32_t):
hipLaunchKernelGGL(( Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int32_t)>)
, dim3(grid_size), dim3(block_size), 0, stream,
input_strides, input_data,
output_strides, output_data, N / num_elements_per_thread);
break;
case sizeof(int64_t):
hipLaunchKernelGGL(( Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int64_t)>)
, dim3(grid_size), dim3(block_size), 0, stream,
input_strides, input_data,
output_strides, output_data, N / num_elements_per_thread);
break;
default:
// User will not hit this as this kernel is for fixed element size tensors only
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ",
element_size);
}
return Status::OK();
}
__global__ void Transpose4DKernelParallelizeOneElementPerThread(
const TArray<int64_t> input_strides, const int8_t* input_data,
const TArray<int64_t> output_strides, int8_t* output_data,
size_t element_size,
CUDA_LONG N) {
CUDA_LONG input_index = blockIdx.y * input_strides[0] +
blockIdx.x * input_strides[1] +
threadIdx.y * input_strides[2] +
threadIdx.x * input_strides[3];
CUDA_LONG output_index = blockIdx.y * output_strides[0] +
blockIdx.x * output_strides[1] +
threadIdx.y * output_strides[2] +
threadIdx.x * output_strides[3];
if (input_index < N && output_index < N) {
const int8_t* input_data_to_be_copied = input_data + (input_index * element_size);
int8_t* output_data_to_be_copied = output_data + (output_index * element_size);
// copy over the bytes
for (size_t iter = 0; iter < element_size; ++iter) {
*output_data_to_be_copied++ = *input_data_to_be_copied++;
}
}
}
bool CanDoTranspose4DParallelizeOneElementPerThread(const hipDeviceProp_t& prop,
size_t element_size,
int32_t rank,
const std::vector<int64_t>& input_dims,
const std::vector<size_t>& permutations) {
if (rank == 4) {
// The block size will be set based on the outer-most two dimensions of 4D tensor.
// the number threads per block will be calculated as below.
int64_t number_of_threads_per_block = input_dims[2] * input_dims[3];
if (number_of_threads_per_block <= prop.maxThreadsPerBlock &&
number_of_threads_per_block >= prop.warpSize &&
// num_threads_per_block must be a multiple of warp size (32)
((number_of_threads_per_block & (prop.warpSize - 1)) == 0)) {
return true;
}
}
return false;
}
Status Transpose4DParallelizeOneElementPerThread(
hipStream_t stream, size_t element_size,
const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides,
const void* input_data, const TArray<int64_t>& output_strides,
void* output_data, int N) {
if (element_size != sizeof(int8_t) &&
element_size != sizeof(int16_t) &&
element_size != sizeof(int32_t) &&
element_size != sizeof(int64_t)) {
// User will not hit this as this kernel is for fixed element size tensors only
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ",
element_size);
}
dim3 block_size(static_cast<unsigned int>(input_shape[3]), static_cast<unsigned int>(input_shape[2]));
dim3 grid_size(static_cast<unsigned int>(input_shape[1]), static_cast<unsigned int>(input_shape[0]));
hipLaunchKernelGGL(( Transpose4DKernelParallelizeOneElementPerThread), dim3(grid_size), dim3(block_size), 0, stream,
input_strides, reinterpret_cast<const int8_t*>(input_data),
output_strides, reinterpret_cast<int8_t*>(output_data),
element_size, N);
return Status::OK();
}
template <typename T>
__global__ void TransposeKernel(int32_t shape_rank, const TArray<int64_t> input_strides,
const T* input_data, const TArray<fast_divmod> output_strides, T* output_data, CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
CUDA_LONG input_index = 0;
CUDA_LONG output_index = id;
#pragma unroll
for (auto dim = 0; dim < input_strides.Capacity(); ++dim) {
if (dim >= shape_rank) {
break;
}
int out_coord, r;
output_strides[dim].divmod(output_index, out_coord, r);
output_index = r;
input_index += input_strides[dim] * out_coord;
}
output_data[id] = input_data[input_index];
}
Status TransposeImpl(hipStream_t stream, size_t element_size, int32_t shape_rank, const TArray<int64_t>& input_strides,
const void* input_data, const TArray<fast_divmod>& fdm_output_strides, void* output_data, int N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
switch (element_size) {
case sizeof(int8_t):
hipLaunchKernelGGL(( TransposeKernel<int8_t>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream,
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data),
N);
break;
case sizeof(int16_t):
hipLaunchKernelGGL(( TransposeKernel<int16_t>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream,
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data),
N);
break;
case sizeof(int32_t):
hipLaunchKernelGGL(( TransposeKernel<int32_t>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream,
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data),
N);
break;
case sizeof(int64_t):
hipLaunchKernelGGL(( TransposeKernel<int64_t>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream,
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data),
N);
break;
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ",
element_size);
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
| bdaca14770063a2382b65ef7aeeff1a3fbf1cee7.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "transpose_impl.h"
namespace onnxruntime {
namespace cuda {
constexpr unsigned int TILE_DIM = 16;
template <typename T>
__global__ void Transpose3DKernel(const TArray<int64_t> input_shape,
const TArray<int64_t> input_strides,
const T* input_data, T* output_data) {
__shared__ T tile[TILE_DIM * (TILE_DIM + 1)];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
tile[threadIdx.y * TILE_DIM + threadIdx.x] = input_data[blockIdx.z * input_strides[0] + y * input_shape[2] + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
output_data[blockIdx.z * input_strides[0] + y * input_shape[1] + x] = tile[threadIdx.x * TILE_DIM + threadIdx.y];
}
bool CanDoTranspose3D(int32_t rank,
const std::vector<int64_t>& input_dims,
const std::vector<size_t>& permutations) {
if (rank == 3 &&
// permutation is done in the last two dimensions.
permutations[rank - 2] == (rank - 1) && permutations[rank - 1] == (rank - 2) &&
// the last two dimensions are aligned with TILE_DIM.
input_dims[rank - 2] % TILE_DIM == 0 && input_dims[rank - 1] % TILE_DIM == 0) {
return true;
}
return false;
}
Status Transpose3DImpl(cudaStream_t stream, size_t element_size,
const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides,
const void* input_data, void* output_data, int64_t N) {
dim3 block_size(TILE_DIM, TILE_DIM);
dim3 grid_size(static_cast<unsigned int>(input_shape[2] / TILE_DIM), static_cast<unsigned int>(input_shape[1] / TILE_DIM), static_cast<unsigned int>(input_shape[0]));
switch (element_size) {
case sizeof(int8_t):
Transpose3DKernel<int8_t><<<grid_size, block_size, 0, stream>>>(
input_shape, input_strides,
reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data));
break;
case sizeof(int16_t):
Transpose3DKernel<int16_t><<<grid_size, block_size, 0, stream>>>(
input_shape, input_strides,
reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data));
break;
case sizeof(int32_t):
Transpose3DKernel<int32_t><<<grid_size, block_size, 0, stream>>>(
input_shape, input_strides,
reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data));
break;
case sizeof(int64_t):
Transpose3DKernel<int64_t><<<grid_size, block_size, 0, stream>>>(
input_shape, input_strides,
reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data));
break;
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ",
element_size);
}
return Status::OK();
}
template <int element_size>
__global__ void Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim(
const TArray<int64_t> input_strides, const void* input_data,
const TArray<int64_t> output_strides, void* output_data,
CUDA_LONG N) {
// output coordinates will be: blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x
CUDA_LONG input_index = (blockIdx.y * input_strides[0] +
blockIdx.x * input_strides[1] +
threadIdx.y * input_strides[2]) /
(4 * sizeof(int) / element_size) +
threadIdx.x * input_strides[3];
CUDA_LONG output_index = (blockIdx.y * output_strides[0] +
blockIdx.x * output_strides[1] +
threadIdx.y * output_strides[2]) /
(4 * sizeof(int) / element_size) +
threadIdx.x * output_strides[3];
const int4* v_input = reinterpret_cast<const int4*>(input_data);
int4* v_output = reinterpret_cast<int4*>(output_data);
if (input_index < N && output_index < N) {
v_output[output_index] = v_input[input_index];
}
}
bool CanDoTranspose4DParallelizeMultipleElementsPerThreadInInnermostDim(const cudaDeviceProp& prop,
size_t element_size,
int32_t rank,
const std::vector<int64_t>& input_dims,
const std::vector<size_t>& permutations) {
if (rank == 4 &&
// the permutations is not on the last dimension.
permutations[3] == 3) {
// The block size will be set based on the outer-most two dimensions of 4D tensor.
// the number threads per block will be calculated as below.
unsigned int num_elements_per_thread = 4 * sizeof(int) / static_cast<unsigned int>(element_size); // int4 is used in the kernel to access data.
int64_t num_elements_in_last_two_dimensions = input_dims[2] * input_dims[3];
int64_t num_threads_per_block = num_elements_in_last_two_dimensions / num_elements_per_thread;
if (((num_elements_in_last_two_dimensions & (num_elements_per_thread - 1)) == 0) &&
num_threads_per_block <= prop.maxThreadsPerBlock &&
num_threads_per_block >= prop.warpSize &&
// num_threads_per_block must be a multiple of warp size (32)
((num_threads_per_block & (prop.warpSize - 1)) == 0) &&
// input_dims[3] must be a multiple of `num_elements_per_thread`
((input_dims[3] % num_elements_per_thread) == 0)) {
return true;
}
}
return false;
}
Status Transpose4DParallelizeMultipleElementsPerThreadInInnermostDim(
cudaStream_t stream, size_t element_size,
const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides,
const void* input_data, const TArray<int64_t>& output_strides,
void* output_data, int N) {
unsigned int num_elements_per_thread = 4 * sizeof(int) / static_cast<unsigned int>(element_size); // int4 is used in the kernel to access data.
dim3 block_size(static_cast<unsigned int>(input_shape[3] / num_elements_per_thread), static_cast<unsigned int>(input_shape[2]));
dim3 grid_size(static_cast<unsigned int>(input_shape[1]), static_cast<unsigned int>(input_shape[0]));
switch (element_size) {
case sizeof(int8_t):
Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int8_t)>
<<<grid_size, block_size, 0, stream>>>(
input_strides, input_data,
output_strides, output_data, N / num_elements_per_thread);
break;
case sizeof(int16_t):
Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int16_t)>
<<<grid_size, block_size, 0, stream>>>(
input_strides, input_data,
output_strides, output_data, N / num_elements_per_thread);
break;
case sizeof(int32_t):
Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int32_t)>
<<<grid_size, block_size, 0, stream>>>(
input_strides, input_data,
output_strides, output_data, N / num_elements_per_thread);
break;
case sizeof(int64_t):
Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int64_t)>
<<<grid_size, block_size, 0, stream>>>(
input_strides, input_data,
output_strides, output_data, N / num_elements_per_thread);
break;
default:
// User will not hit this as this kernel is for fixed element size tensors only
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ",
element_size);
}
return Status::OK();
}
__global__ void Transpose4DKernelParallelizeOneElementPerThread(
const TArray<int64_t> input_strides, const int8_t* input_data,
const TArray<int64_t> output_strides, int8_t* output_data,
size_t element_size,
CUDA_LONG N) {
CUDA_LONG input_index = blockIdx.y * input_strides[0] +
blockIdx.x * input_strides[1] +
threadIdx.y * input_strides[2] +
threadIdx.x * input_strides[3];
CUDA_LONG output_index = blockIdx.y * output_strides[0] +
blockIdx.x * output_strides[1] +
threadIdx.y * output_strides[2] +
threadIdx.x * output_strides[3];
if (input_index < N && output_index < N) {
const int8_t* input_data_to_be_copied = input_data + (input_index * element_size);
int8_t* output_data_to_be_copied = output_data + (output_index * element_size);
// copy over the bytes
for (size_t iter = 0; iter < element_size; ++iter) {
*output_data_to_be_copied++ = *input_data_to_be_copied++;
}
}
}
bool CanDoTranspose4DParallelizeOneElementPerThread(const cudaDeviceProp& prop,
size_t element_size,
int32_t rank,
const std::vector<int64_t>& input_dims,
const std::vector<size_t>& permutations) {
if (rank == 4) {
// The block size will be set based on the outer-most two dimensions of 4D tensor.
// the number threads per block will be calculated as below.
int64_t number_of_threads_per_block = input_dims[2] * input_dims[3];
if (number_of_threads_per_block <= prop.maxThreadsPerBlock &&
number_of_threads_per_block >= prop.warpSize &&
// num_threads_per_block must be a multiple of warp size (32)
((number_of_threads_per_block & (prop.warpSize - 1)) == 0)) {
return true;
}
}
return false;
}
Status Transpose4DParallelizeOneElementPerThread(
cudaStream_t stream, size_t element_size,
const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides,
const void* input_data, const TArray<int64_t>& output_strides,
void* output_data, int N) {
if (element_size != sizeof(int8_t) &&
element_size != sizeof(int16_t) &&
element_size != sizeof(int32_t) &&
element_size != sizeof(int64_t)) {
// User will not hit this as this kernel is for fixed element size tensors only
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ",
element_size);
}
dim3 block_size(static_cast<unsigned int>(input_shape[3]), static_cast<unsigned int>(input_shape[2]));
dim3 grid_size(static_cast<unsigned int>(input_shape[1]), static_cast<unsigned int>(input_shape[0]));
Transpose4DKernelParallelizeOneElementPerThread<<<grid_size, block_size, 0, stream>>>(
input_strides, reinterpret_cast<const int8_t*>(input_data),
output_strides, reinterpret_cast<int8_t*>(output_data),
element_size, N);
return Status::OK();
}
template <typename T>
__global__ void TransposeKernel(int32_t shape_rank, const TArray<int64_t> input_strides,
const T* input_data, const TArray<fast_divmod> output_strides, T* output_data, CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
CUDA_LONG input_index = 0;
CUDA_LONG output_index = id;
#pragma unroll
for (auto dim = 0; dim < input_strides.Capacity(); ++dim) {
if (dim >= shape_rank) {
break;
}
int out_coord, r;
output_strides[dim].divmod(output_index, out_coord, r);
output_index = r;
input_index += input_strides[dim] * out_coord;
}
output_data[id] = input_data[input_index];
}
Status TransposeImpl(cudaStream_t stream, size_t element_size, int32_t shape_rank, const TArray<int64_t>& input_strides,
const void* input_data, const TArray<fast_divmod>& fdm_output_strides, void* output_data, int N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
switch (element_size) {
case sizeof(int8_t):
TransposeKernel<int8_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data),
N);
break;
case sizeof(int16_t):
TransposeKernel<int16_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data),
N);
break;
case sizeof(int32_t):
TransposeKernel<int32_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data),
N);
break;
case sizeof(int64_t):
TransposeKernel<int64_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data),
N);
break;
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ",
element_size);
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
|
01aa58d41bdbf4f21c1aeb830bc445b14372c671.hip | // !!! This is a file automatically generated by hipify!!!
#include "./c_runtime_api.h"
#include <cassert>
#include <cstdio>
#include <rocblas.h>
#include <hip/hip_runtime.h>
int divupround(int a, int b) {
if(a % b == 0) return a / b;
return a / b + 1;
}
int64_t totallength(DLArrayHandle array) {
int64_t length = 1;
for(int i = 0; i < array->ndim; i++) {
length *= array->shape[i];
}
return length;
}
/* TODO: Your code here */
/* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol,
const float *input_a,
const float *input_b,
float *output) {
// Dynamic shared memory, size provided at kernel launch.
extern __shared__ float loss_per_row[];
// Two dimensional thread blocks.
int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x;
if (y >= nrow) {
return;
}
input_a += y * ncol;
input_b += y * ncol;
float maxval = *input_a;
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input_a[x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input_a[x] - maxval);
}
// Compute per-row loss.
float loss = 0;
for (int x = 0; x < ncol; ++x) {
loss -= input_b[x] * log(exp(input_a[x] - maxval) / sum);
}
loss_per_row[y] = loss;
__syncthreads();
// Compute reduce_mean across rows.
float mean_loss = 0;
// Use a single thread to reduce mean across rows.
if ((threadIdx.x == 0) && (threadIdx.y == 0)) {
for (int i = 0; i < nrow; ++i) {
mean_loss += loss_per_row[i];
}
mean_loss /= nrow;
output[0] = mean_loss;
}
}
__global__ void matrix_softmax_kernel(int nrow,
int ncol,
const float *input_data,
float *output_data){
// Two dimensional thread blocks.
int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x;
if (y >= nrow) {
return;
}
input_data += y * ncol;
output_data += y * ncol;
float maxval = *input_data;
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input_data[x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input_data[x] - maxval);
}
for (int x = 0; x < ncol; ++x) {
output_data[x] = exp(input_data[x] - maxval) / sum;
}
}
__global__ void relu_kernel(int64_t length,
const float *input_data,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = max(0.0f, input_data[y]);
}
__global__ void relu_gradient_kernel(int64_t length,
const float *input_data,
const float *in_grad_data,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = input_data[y]>0.0f? in_grad_data[y]:0.0f;
}
__global__ void array_set_kernel(int64_t length,
float *array_data,
float value){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
array_data[y] = value;
}
__global__ void broadcast_kernel(int64_t length,
const float *input_data,
float *output_data){
output_data += length * blockIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x < length) {
output_data[x] = input_data[x];
}
}
__global__ void reduce_sum_axis_zero_kernel(int64_t output_length, int reduce_size, const float* input, float *output) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
if(x >= output_length) return;
float value = 0;
for(int i = threadIdx.y; i < reduce_size; i+= blockDim.y) {
value += input[i * output_length + x];
}
atomicAdd(output + x, value);
}
__global__ void matrix_add_kernel(int64_t length,
const float *matA_data,
const float *matB_data,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = matA_data[y] + matB_data[y];
}
__global__ void matrix_add_by_const_kernel(int64_t length,
const float *input_data,
float val,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = input_data[y] + val;
}
__global__ void matrix_mul_kernel(int64_t length,
const float *matA_data,
const float *matB_data,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = matA_data[y] * matB_data[y];
}
__global__ void matrix_mul_by_const_kernel(int64_t length,
const float *input_data,
float val,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = input_data[y] * val;
}
hipblasHandle_t cublas_handle = NULL;
int DLGpuArraySet(DLArrayHandle arr, float value) { /* TODO: Your code here */
int64_t length = totallength(arr);
float *array_data = (float *)arr->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
hipLaunchKernelGGL(( array_set_kernel), dim3(DimGrid), dim3(DimBlock), 0, 0, length, array_data, value);
return 0;
}
int DLGpuBroadcastTo(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(input);
const float *input_data = (float *)input->data;
float *output_data = (float *)output->data;
dim3 threads;
if (length < 1024){
threads.x = (int)length;
}else{
threads.x = 1024;
threads.y = (int)((length + 1023)/1024);
}
hipLaunchKernelGGL(( broadcast_kernel), dim3(dim3(divupround(length, 1024), output->shape[0])), dim3(1024), 0, 0, length, input_data, output_data);
return 0;
}
int DLGpuReduceSumAxisZero(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
DLGpuArraySet(output, 0);
int output_length = totallength(output);
hipLaunchKernelGGL(( reduce_sum_axis_zero_kernel), dim3(divupround(output_length, 64)), dim3(dim3(min(64, output_length), 16)), 0, 0, output_length, input->shape[0], (float*)input->data, (float*)output->data);
return 0;
}
int DLGpuMatrixElementwiseAdd(const DLArrayHandle matA,
const DLArrayHandle matB, DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *matA_data = (const float *)matA->data;
const float *matB_data = (const float *)matB->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
hipLaunchKernelGGL(( matrix_add_kernel), dim3(DimGrid), dim3(DimBlock), 0, 0, length, matA_data, matB_data, output_data);
return 0;
}
int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
hipLaunchKernelGGL(( matrix_add_by_const_kernel), dim3(DimGrid), dim3(DimBlock), 0, 0, length, input_data, val, output_data);
return 0;
}
int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA,
const DLArrayHandle matB,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *matA_data = (const float *)matA->data;
const float *matB_data = (const float *)matB->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
hipLaunchKernelGGL(( matrix_mul_kernel), dim3(DimGrid), dim3(DimBlock), 0, 0, length, matA_data, matB_data, output_data);
return 0;
}
int DLGpuMatrixMultiplyByConst(const DLArrayHandle input, float val,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
hipLaunchKernelGGL(( matrix_mul_by_const_kernel), dim3(DimGrid), dim3(DimBlock), 0, 0, length, input_data, val, output_data);
return 0;
}
int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA,
const DLArrayHandle matB, bool transposeB,
DLArrayHandle matC) {
/* TODO: Your code here */
// Hint: use cublas
// cublas assume matrix is column major
if(!cublas_handle) {
hipblasCreate(&cublas_handle);
}
float one = 1.0f;
float zero = 0.0f;
int m = matC->shape[1];
int n = matC->shape[0];
int k = transposeA ? matA->shape[0] : matA->shape[1];
hipblasSgemm(cublas_handle,
transposeB ? HIPBLAS_OP_T : HIPBLAS_OP_N,
transposeA ? HIPBLAS_OP_T : HIPBLAS_OP_N,
m, n, k,
&one,
(const float*)matB->data, !transposeB ? m : k,
(const float*)matA->data, !transposeA ? k : n,
&zero,
(float*)matC->data, m
);
return 0;
}
int DLGpuRelu(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
hipLaunchKernelGGL(( relu_kernel), dim3(DimGrid), dim3(DimBlock), 0, 0, length, input_data, output_data);
return 0;
}
int DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *input_data = (const float *)input->data;
const float *in_grad_data = (const float *)in_grad->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
hipLaunchKernelGGL(( relu_gradient_kernel), dim3(DimGrid), dim3(DimBlock), 0, 0, length, input_data, in_grad_data, output_data);
return 0;
}
int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
//assert(input->ndim == 2);
//assert(output->ndim == 1);
int nrow = input->shape[0];
assert(nrow <= 1024 * 4);
int ncol = input->shape[1];
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
dim3 threads;
if (nrow < 1024){
threads.x = nrow;
}else{
threads.x = 1024;
threads.y = (nrow + 1023)/1024;
}
hipLaunchKernelGGL(( matrix_softmax_kernel), dim3(1), dim3(threads), nrow * sizeof(float), 0, nrow, ncol, input_data, output_data);
return 0;
}
int DLGpuSoftmaxCrossEntropy(const DLArrayHandle input_a,
const DLArrayHandle input_b,
DLArrayHandle output) {
assert(input_a->ndim == 2);
assert(input_b->ndim == 2);
assert(output->ndim == 1);
assert(input_a->shape[0] == input_b->shape[0] &&
input_a->shape[1] == input_b->shape[1]);
int nrow = input_a->shape[0];
// Maximum x- or y-dimension of a block = 1024
// But we need 'nrow' shared memory, and max shared memory is 48KB.
// Conservatively allow max 16KB shared memory.
assert(nrow <= 1024 * 4);
int ncol = input_a->shape[1];
const float *input_data_a = (const float *)input_a->data;
const float *input_data_b = (const float *)input_b->data;
float *output_data = (float *)output->data;
dim3 threads;
if (nrow <= 1024) {
threads.x = nrow;
} else {
threads.x = 1024;
threads.y = (nrow + 1023) / 1024;
}
// 1 block, each block with 'threads' number of threads with 'nrow' shared
// memory size
hipLaunchKernelGGL(( matrix_softmax_cross_entropy_kernel), dim3(1), dim3(threads), nrow * sizeof(float), 0,
nrow, ncol, input_data_a, input_data_b, output_data);
return 0;
}
| 01aa58d41bdbf4f21c1aeb830bc445b14372c671.cu | #include "./c_runtime_api.h"
#include <cassert>
#include <cstdio>
#include <cublas_v2.h>
#include <cuda_runtime.h>
int divupround(int a, int b) {
if(a % b == 0) return a / b;
return a / b + 1;
}
int64_t totallength(DLArrayHandle array) {
int64_t length = 1;
for(int i = 0; i < array->ndim; i++) {
length *= array->shape[i];
}
return length;
}
/* TODO: Your code here */
/* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol,
const float *input_a,
const float *input_b,
float *output) {
// Dynamic shared memory, size provided at kernel launch.
extern __shared__ float loss_per_row[];
// Two dimensional thread blocks.
int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x;
if (y >= nrow) {
return;
}
input_a += y * ncol;
input_b += y * ncol;
float maxval = *input_a;
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input_a[x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input_a[x] - maxval);
}
// Compute per-row loss.
float loss = 0;
for (int x = 0; x < ncol; ++x) {
loss -= input_b[x] * log(exp(input_a[x] - maxval) / sum);
}
loss_per_row[y] = loss;
__syncthreads();
// Compute reduce_mean across rows.
float mean_loss = 0;
// Use a single thread to reduce mean across rows.
if ((threadIdx.x == 0) && (threadIdx.y == 0)) {
for (int i = 0; i < nrow; ++i) {
mean_loss += loss_per_row[i];
}
mean_loss /= nrow;
output[0] = mean_loss;
}
}
__global__ void matrix_softmax_kernel(int nrow,
int ncol,
const float *input_data,
float *output_data){
// Two dimensional thread blocks.
int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x;
if (y >= nrow) {
return;
}
input_data += y * ncol;
output_data += y * ncol;
float maxval = *input_data;
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input_data[x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input_data[x] - maxval);
}
for (int x = 0; x < ncol; ++x) {
output_data[x] = exp(input_data[x] - maxval) / sum;
}
}
__global__ void relu_kernel(int64_t length,
const float *input_data,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = max(0.0f, input_data[y]);
}
__global__ void relu_gradient_kernel(int64_t length,
const float *input_data,
const float *in_grad_data,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = input_data[y]>0.0f? in_grad_data[y]:0.0f;
}
__global__ void array_set_kernel(int64_t length,
float *array_data,
float value){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
array_data[y] = value;
}
__global__ void broadcast_kernel(int64_t length,
const float *input_data,
float *output_data){
output_data += length * blockIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x < length) {
output_data[x] = input_data[x];
}
}
__global__ void reduce_sum_axis_zero_kernel(int64_t output_length, int reduce_size, const float* input, float *output) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
if(x >= output_length) return;
float value = 0;
for(int i = threadIdx.y; i < reduce_size; i+= blockDim.y) {
value += input[i * output_length + x];
}
atomicAdd(output + x, value);
}
__global__ void matrix_add_kernel(int64_t length,
const float *matA_data,
const float *matB_data,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = matA_data[y] + matB_data[y];
}
__global__ void matrix_add_by_const_kernel(int64_t length,
const float *input_data,
float val,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = input_data[y] + val;
}
__global__ void matrix_mul_kernel(int64_t length,
const float *matA_data,
const float *matB_data,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = matA_data[y] * matB_data[y];
}
__global__ void matrix_mul_by_const_kernel(int64_t length,
const float *input_data,
float val,
float *output_data){
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y > length){
return;
}
output_data[y] = input_data[y] * val;
}
cublasHandle_t cublas_handle = NULL;
int DLGpuArraySet(DLArrayHandle arr, float value) { /* TODO: Your code here */
int64_t length = totallength(arr);
float *array_data = (float *)arr->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
array_set_kernel<<<DimGrid, DimBlock>>>(length, array_data, value);
return 0;
}
int DLGpuBroadcastTo(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(input);
const float *input_data = (float *)input->data;
float *output_data = (float *)output->data;
dim3 threads;
if (length < 1024){
threads.x = (int)length;
}else{
threads.x = 1024;
threads.y = (int)((length + 1023)/1024);
}
broadcast_kernel<<<dim3(divupround(length, 1024), output->shape[0]), 1024>>>(length, input_data, output_data);
return 0;
}
int DLGpuReduceSumAxisZero(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
DLGpuArraySet(output, 0);
int output_length = totallength(output);
reduce_sum_axis_zero_kernel<<<divupround(output_length, 64), dim3(min(64, output_length), 16)>>>(output_length, input->shape[0], (float*)input->data, (float*)output->data);
return 0;
}
int DLGpuMatrixElementwiseAdd(const DLArrayHandle matA,
const DLArrayHandle matB, DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *matA_data = (const float *)matA->data;
const float *matB_data = (const float *)matB->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
matrix_add_kernel<<<DimGrid, DimBlock>>>(length, matA_data, matB_data, output_data);
return 0;
}
int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
matrix_add_by_const_kernel<<<DimGrid, DimBlock>>>(length, input_data, val, output_data);
return 0;
}
int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA,
const DLArrayHandle matB,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *matA_data = (const float *)matA->data;
const float *matB_data = (const float *)matB->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
matrix_mul_kernel<<<DimGrid, DimBlock>>>(length, matA_data, matB_data, output_data);
return 0;
}
int DLGpuMatrixMultiplyByConst(const DLArrayHandle input, float val,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
matrix_mul_by_const_kernel<<<DimGrid, DimBlock>>>(length, input_data, val, output_data);
return 0;
}
int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA,
const DLArrayHandle matB, bool transposeB,
DLArrayHandle matC) {
/* TODO: Your code here */
// Hint: use cublas
// cublas assume matrix is column major
if(!cublas_handle) {
cublasCreate(&cublas_handle);
}
float one = 1.0f;
float zero = 0.0f;
int m = matC->shape[1];
int n = matC->shape[0];
int k = transposeA ? matA->shape[0] : matA->shape[1];
cublasSgemm(cublas_handle,
transposeB ? CUBLAS_OP_T : CUBLAS_OP_N,
transposeA ? CUBLAS_OP_T : CUBLAS_OP_N,
m, n, k,
&one,
(const float*)matB->data, !transposeB ? m : k,
(const float*)matA->data, !transposeA ? k : n,
&zero,
(float*)matC->data, m
);
return 0;
}
int DLGpuRelu(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
relu_kernel<<<DimGrid, DimBlock>>>(length, input_data, output_data);
return 0;
}
int DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t length = totallength(output);
const float *input_data = (const float *)input->data;
const float *in_grad_data = (const float *)in_grad->data;
float *output_data = (float *)output->data;
dim3 DimGrid((length-1)/1024+1, 1, 1);
dim3 DimBlock(1024, 1, 1);
relu_gradient_kernel<<<DimGrid, DimBlock>>>(length, input_data, in_grad_data, output_data);
return 0;
}
int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
//assert(input->ndim == 2);
//assert(output->ndim == 1);
int nrow = input->shape[0];
assert(nrow <= 1024 * 4);
int ncol = input->shape[1];
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
dim3 threads;
if (nrow < 1024){
threads.x = nrow;
}else{
threads.x = 1024;
threads.y = (nrow + 1023)/1024;
}
matrix_softmax_kernel<<<1, threads, nrow * sizeof(float)>>>(nrow, ncol, input_data, output_data);
return 0;
}
int DLGpuSoftmaxCrossEntropy(const DLArrayHandle input_a,
const DLArrayHandle input_b,
DLArrayHandle output) {
assert(input_a->ndim == 2);
assert(input_b->ndim == 2);
assert(output->ndim == 1);
assert(input_a->shape[0] == input_b->shape[0] &&
input_a->shape[1] == input_b->shape[1]);
int nrow = input_a->shape[0];
// Maximum x- or y-dimension of a block = 1024
// But we need 'nrow' shared memory, and max shared memory is 48KB.
// Conservatively allow max 16KB shared memory.
assert(nrow <= 1024 * 4);
int ncol = input_a->shape[1];
const float *input_data_a = (const float *)input_a->data;
const float *input_data_b = (const float *)input_b->data;
float *output_data = (float *)output->data;
dim3 threads;
if (nrow <= 1024) {
threads.x = nrow;
} else {
threads.x = 1024;
threads.y = (nrow + 1023) / 1024;
}
// 1 block, each block with 'threads' number of threads with 'nrow' shared
// memory size
matrix_softmax_cross_entropy_kernel<<<1, threads, nrow * sizeof(float)>>>(
nrow, ncol, input_data_a, input_data_b, output_data);
return 0;
}
|
ba7ab1eba52cbe5e86b9fa57885a734bcbfd21d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// We assume that NUM_ELEMENTS is divisible by BLOCK_SIZE
#define RADIUS 3
#define BLOCK_SIZE 256
#define NUM_ELEMENTS (4096*2)
// CUDA API error checking macro
static void handleError(hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err),
file, line );
exit(EXIT_FAILURE);
}
}
#define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ ))
__global__ void stencil_1d(int *in, int *out)
{
// blockDim is 3-dimensional vector storing block grid dimensions
// index of a thread across all threads + RADIUS
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS;
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += in[gindex + offset];
// Store the result
out[gindex - RADIUS] = result;
}
int main()
{
unsigned int i;
// vectors stored in the CPU memory - can be used from host code only
int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS];
// vectors that will be stored in the device memory - can be dereferenced
// only in kernel code
int *d_in, *d_out;
// Initialize host data
for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i )
h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7
// Allocate space on the device
// hipMalloc is equivalent of malloc
cudaCheck( hipMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) );
cudaCheck( hipMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) );
// Copy input data to device
cudaCheck( hipMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int),
hipMemcpyHostToDevice) );
// Call kernels
hipLaunchKernelGGL(( stencil_1d), dim3((NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, d_in, d_out);
// Check errors from launching the kernel
cudaCheck(hipPeekAtLastError());
// Copy results from device memory to host
cudaCheck( hipMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int),
hipMemcpyDeviceToHost) );
// Verify every out value is 7
for( i = 0; i < NUM_ELEMENTS; ++i )
if (h_out[i] != 7)
{
printf("Element h_out[%d] == %d != 7\n", i, h_out[i]);
break;
}
if (i == NUM_ELEMENTS)
printf("SUCCESS!\n");
// Free out memory
hipFree(d_in);
hipFree(d_out);
return 0;
}
| ba7ab1eba52cbe5e86b9fa57885a734bcbfd21d6.cu | #include <stdio.h>
// We assume that NUM_ELEMENTS is divisible by BLOCK_SIZE
#define RADIUS 3
#define BLOCK_SIZE 256
#define NUM_ELEMENTS (4096*2)
// CUDA API error checking macro
static void handleError(cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line );
exit(EXIT_FAILURE);
}
}
#define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ ))
__global__ void stencil_1d(int *in, int *out)
{
// blockDim is 3-dimensional vector storing block grid dimensions
// index of a thread across all threads + RADIUS
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS;
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += in[gindex + offset];
// Store the result
out[gindex - RADIUS] = result;
}
int main()
{
unsigned int i;
// vectors stored in the CPU memory - can be used from host code only
int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS];
// vectors that will be stored in the device memory - can be dereferenced
// only in kernel code
int *d_in, *d_out;
// Initialize host data
for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i )
h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7
// Allocate space on the device
// cudaMalloc is equivalent of malloc
cudaCheck( cudaMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) );
cudaCheck( cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) );
// Copy input data to device
cudaCheck( cudaMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int),
cudaMemcpyHostToDevice) );
// Call kernels
stencil_1d<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out);
// Check errors from launching the kernel
cudaCheck(cudaPeekAtLastError());
// Copy results from device memory to host
cudaCheck( cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int),
cudaMemcpyDeviceToHost) );
// Verify every out value is 7
for( i = 0; i < NUM_ELEMENTS; ++i )
if (h_out[i] != 7)
{
printf("Element h_out[%d] == %d != 7\n", i, h_out[i]);
break;
}
if (i == NUM_ELEMENTS)
printf("SUCCESS!\n");
// Free out memory
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
87efecfc1081042f49d009509be1ebc0e7f7ae3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#include <vector>
#include <iostream>
#include <algorithm>
__global__ void assign_box_kernel(const long *label_cls, const float *label_box,
const float *locs, float *output,
const int im_h, const int im_w, const int ph, const int pw,
const int tlbr_max_min, const int tlbr_max_max, const int r,
const int n_max)
{
int i;
int b_i = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int phXpw_i = by*1024 + tx;
if (phXpw_i >= (ph*pw)) return;
int ph_i = phXpw_i / pw;
int pw_i = phXpw_i % pw;
int hs = (im_h-1) / (ph-1);
int ws = (im_w-1) / (pw-1);
int out_base = b_i*ph*pw*5 + ph_i*pw*5 + pw_i*5;
// to cyxyx
__shared__ float cyxyx[200][5];
__shared__ float shared_locs[4];
int b_i_X_4 = b_i*4;
int b_i_X_n_max = b_i*n_max;
int b_i_X_n_max_X_4 = b_i_X_n_max*4;
if(tx==0) {
for (i=0; i<n_max; i++) {
cyxyx[i][0] = (float)label_cls[b_i_X_n_max + i];
cyxyx[i][1] = label_box[b_i_X_n_max_X_4 + i*4 + 0];//ymin
cyxyx[i][2] = label_box[b_i_X_n_max_X_4 + i*4 + 1];//xmin
cyxyx[i][3] = label_box[b_i_X_n_max_X_4 + i*4 + 2];//ymax
cyxyx[i][4] = label_box[b_i_X_n_max_X_4 + i*4 + 3];//xmax
}
shared_locs[0] = locs[b_i_X_4 + 0];
shared_locs[1] = locs[b_i_X_4 + 1];
shared_locs[2] = locs[b_i_X_4 + 2];
shared_locs[3] = locs[b_i_X_4 + 3];
}
__syncthreads();
float center_y = ph_i * hs;
float center_x = pw_i * ws;
if ((center_y<shared_locs[0]) || (center_y>shared_locs[2]) ||
(center_x<shared_locs[1]) || (center_x>shared_locs[3])) {
output[out_base + 0] = -1;
return;
}
float center_offset_max_2 = r*r;
float ymin, xmin, ymax, xmax, top, left, bottom, right;
float cy, cx, dist2, max_tlbr, bxa;
float cls, pred_ymin, pred_xmin, pred_ymax, pred_xmax;
float pred_c=-10, pred_area=99999999;
for (i=0; i<n_max; i++) {
cls = cyxyx[i][0];
ymin = cyxyx[i][1];
xmin = cyxyx[i][2];
ymax = cyxyx[i][3];
xmax = cyxyx[i][4];
top = center_y - ymin;
bottom = ymax - center_y;
left = center_x - xmin;
right = xmax - center_x;
cy = (ymin + ymax) / 2.0;
cx = (xmin + xmax) / 2.0;
bxa = (ymax - ymin)*(xmax - xmin);
dist2 = (center_y - cy) * (center_y - cy) + (center_x - cx) * (center_x - cx);
max_tlbr = max(top, max(left, max(bottom, right)));
if ((cls>0) && (top>0) && (bottom>0) && (left>0) && (right>0) &&
(dist2<center_offset_max_2) &&
(max_tlbr>tlbr_max_min) && (max_tlbr<tlbr_max_max) &&
(bxa<=pred_area)) {
pred_area = bxa;
pred_c = cls;
pred_ymin = ymin;
pred_xmin = xmin;
pred_ymax = ymax;
pred_xmax = xmax;
}
}
if (pred_c > -1) {
output[out_base + 0] = pred_c;
output[out_base + 1] = pred_ymin;
output[out_base + 2] = pred_xmin;
output[out_base + 3] = pred_ymax;
output[out_base + 4] = pred_xmax;
}
}
at::Tensor assign_box_cuda(const at::Tensor &label_cls, const at::Tensor &label_box,
const at::Tensor &locs,
const int im_h, const int im_w, const int ph, const int pw,
const int tlbr_max_min, const int tlbr_max_max, const int r)
{
/*
GPU >= 6.1
Param:
label_cls: L(b, n_max) 0:bg 1~:fg, 0pad
label_box: F(b, n_max, 4) ymin, xmin, ymax, xmax, 0:pad
locs: F(b, 4) ymin, xmin, ymax, xmax
im_h = 1025
im_w = 1025
ph = 129
pw = 129
tlbr_max_min = 5
tlbr_max_max = 65
r = 12
Return:
target_cls: L(b, ph, pw) -1:ign 0:bg 1~:fg
target_box: F(b, ph, pw, 4) ymin, xmin, ymax, xmax
-> F(b, ph, pw, 1 + 4)
Note:
n_max <= 200
*/
const int b = label_cls.size(0);
const int n_max = label_cls.size(1);
auto output = at::zeros({b, ph, pw, 1 + 4}, label_box.options());
if (output.numel() == 0) {
THCudaCheck(hipGetLastError());
return output;
}
dim3 grid(b, ph*pw/1024+1), block(1024);
hipLaunchKernelGGL(( assign_box_kernel), dim3(grid), dim3(block), 0, 0,
static_cast<long*>(label_cls.contiguous().data_ptr()),
label_box.contiguous().data<float>(),
locs.contiguous().data<float>(),
output.contiguous().data<float>(),
im_h, im_w, ph, pw,
tlbr_max_min, tlbr_max_max, r, n_max);
THCudaCheck(hipGetLastError());
return output;
}
__global__ void smooth_kernel(const float *target_cls, float *output,
const int ph, const int pw)
{
int i, j;
int b_i = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int phXpw_i = by*1024 + tx;
if (phXpw_i >= (ph*pw)) return;
int ph_i = phXpw_i / pw;
int pw_i = phXpw_i % pw;
int base = b_i*ph*pw + ph_i*pw + pw_i;
int b_base = b_i*ph*pw;
int ptr;
float val = target_cls[base];
if (val == 0) {
for(i=ph_i-1; i<=ph_i+1; i++) {
for(j=pw_i-1; j<=pw_i+1; j++) {
ptr = b_base + i*pw + j;
if ((i>=0) && (j>=0) && (i<ph) && (j<pw)) {
if(target_cls[ptr] > 0) val = -1;
}
}
}
}
output[base] = val;
}
at::Tensor smooth_cuda(const at::Tensor &target_cls)
{
// target_cls: F(b, ph, pw) -1:ign 0:bg 1~:fg
const int b = target_cls.size(0);
const int ph = target_cls.size(1);
const int pw = target_cls.size(2);
auto output = at::zeros({b, ph, pw}, target_cls.options());
if (output.numel() == 0) {
THCudaCheck(hipGetLastError());
return output;
}
dim3 grid(b, ph*pw/1024+1), block(1024);
hipLaunchKernelGGL(( smooth_kernel), dim3(grid), dim3(block), 0, 0,
target_cls.contiguous().data<float>(),
output.contiguous().data<float>(),
ph, pw);
THCudaCheck(hipGetLastError());
return output;
}
| 87efecfc1081042f49d009509be1ebc0e7f7ae3e.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <vector>
#include <iostream>
#include <algorithm>
__global__ void assign_box_kernel(const long *label_cls, const float *label_box,
const float *locs, float *output,
const int im_h, const int im_w, const int ph, const int pw,
const int tlbr_max_min, const int tlbr_max_max, const int r,
const int n_max)
{
int i;
int b_i = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int phXpw_i = by*1024 + tx;
if (phXpw_i >= (ph*pw)) return;
int ph_i = phXpw_i / pw;
int pw_i = phXpw_i % pw;
int hs = (im_h-1) / (ph-1);
int ws = (im_w-1) / (pw-1);
int out_base = b_i*ph*pw*5 + ph_i*pw*5 + pw_i*5;
// to cyxyx
__shared__ float cyxyx[200][5];
__shared__ float shared_locs[4];
int b_i_X_4 = b_i*4;
int b_i_X_n_max = b_i*n_max;
int b_i_X_n_max_X_4 = b_i_X_n_max*4;
if(tx==0) {
for (i=0; i<n_max; i++) {
cyxyx[i][0] = (float)label_cls[b_i_X_n_max + i];
cyxyx[i][1] = label_box[b_i_X_n_max_X_4 + i*4 + 0];//ymin
cyxyx[i][2] = label_box[b_i_X_n_max_X_4 + i*4 + 1];//xmin
cyxyx[i][3] = label_box[b_i_X_n_max_X_4 + i*4 + 2];//ymax
cyxyx[i][4] = label_box[b_i_X_n_max_X_4 + i*4 + 3];//xmax
}
shared_locs[0] = locs[b_i_X_4 + 0];
shared_locs[1] = locs[b_i_X_4 + 1];
shared_locs[2] = locs[b_i_X_4 + 2];
shared_locs[3] = locs[b_i_X_4 + 3];
}
__syncthreads();
float center_y = ph_i * hs;
float center_x = pw_i * ws;
if ((center_y<shared_locs[0]) || (center_y>shared_locs[2]) ||
(center_x<shared_locs[1]) || (center_x>shared_locs[3])) {
output[out_base + 0] = -1;
return;
}
float center_offset_max_2 = r*r;
float ymin, xmin, ymax, xmax, top, left, bottom, right;
float cy, cx, dist2, max_tlbr, bxa;
float cls, pred_ymin, pred_xmin, pred_ymax, pred_xmax;
float pred_c=-10, pred_area=99999999;
for (i=0; i<n_max; i++) {
cls = cyxyx[i][0];
ymin = cyxyx[i][1];
xmin = cyxyx[i][2];
ymax = cyxyx[i][3];
xmax = cyxyx[i][4];
top = center_y - ymin;
bottom = ymax - center_y;
left = center_x - xmin;
right = xmax - center_x;
cy = (ymin + ymax) / 2.0;
cx = (xmin + xmax) / 2.0;
bxa = (ymax - ymin)*(xmax - xmin);
dist2 = (center_y - cy) * (center_y - cy) + (center_x - cx) * (center_x - cx);
max_tlbr = max(top, max(left, max(bottom, right)));
if ((cls>0) && (top>0) && (bottom>0) && (left>0) && (right>0) &&
(dist2<center_offset_max_2) &&
(max_tlbr>tlbr_max_min) && (max_tlbr<tlbr_max_max) &&
(bxa<=pred_area)) {
pred_area = bxa;
pred_c = cls;
pred_ymin = ymin;
pred_xmin = xmin;
pred_ymax = ymax;
pred_xmax = xmax;
}
}
if (pred_c > -1) {
output[out_base + 0] = pred_c;
output[out_base + 1] = pred_ymin;
output[out_base + 2] = pred_xmin;
output[out_base + 3] = pred_ymax;
output[out_base + 4] = pred_xmax;
}
}
at::Tensor assign_box_cuda(const at::Tensor &label_cls, const at::Tensor &label_box,
const at::Tensor &locs,
const int im_h, const int im_w, const int ph, const int pw,
const int tlbr_max_min, const int tlbr_max_max, const int r)
{
/*
GPU >= 6.1
Param:
label_cls: L(b, n_max) 0:bg 1~:fg, 0pad
label_box: F(b, n_max, 4) ymin, xmin, ymax, xmax, 0:pad
locs: F(b, 4) ymin, xmin, ymax, xmax
im_h = 1025
im_w = 1025
ph = 129
pw = 129
tlbr_max_min = 5
tlbr_max_max = 65
r = 12
Return:
target_cls: L(b, ph, pw) -1:ign 0:bg 1~:fg
target_box: F(b, ph, pw, 4) ymin, xmin, ymax, xmax
-> F(b, ph, pw, 1 + 4)
Note:
n_max <= 200
*/
const int b = label_cls.size(0);
const int n_max = label_cls.size(1);
auto output = at::zeros({b, ph, pw, 1 + 4}, label_box.options());
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return output;
}
dim3 grid(b, ph*pw/1024+1), block(1024);
assign_box_kernel<<<grid, block>>>(
static_cast<long*>(label_cls.contiguous().data_ptr()),
label_box.contiguous().data<float>(),
locs.contiguous().data<float>(),
output.contiguous().data<float>(),
im_h, im_w, ph, pw,
tlbr_max_min, tlbr_max_max, r, n_max);
THCudaCheck(cudaGetLastError());
return output;
}
__global__ void smooth_kernel(const float *target_cls, float *output,
const int ph, const int pw)
{
int i, j;
int b_i = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int phXpw_i = by*1024 + tx;
if (phXpw_i >= (ph*pw)) return;
int ph_i = phXpw_i / pw;
int pw_i = phXpw_i % pw;
int base = b_i*ph*pw + ph_i*pw + pw_i;
int b_base = b_i*ph*pw;
int ptr;
float val = target_cls[base];
if (val == 0) {
for(i=ph_i-1; i<=ph_i+1; i++) {
for(j=pw_i-1; j<=pw_i+1; j++) {
ptr = b_base + i*pw + j;
if ((i>=0) && (j>=0) && (i<ph) && (j<pw)) {
if(target_cls[ptr] > 0) val = -1;
}
}
}
}
output[base] = val;
}
at::Tensor smooth_cuda(const at::Tensor &target_cls)
{
// target_cls: F(b, ph, pw) -1:ign 0:bg 1~:fg
const int b = target_cls.size(0);
const int ph = target_cls.size(1);
const int pw = target_cls.size(2);
auto output = at::zeros({b, ph, pw}, target_cls.options());
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return output;
}
dim3 grid(b, ph*pw/1024+1), block(1024);
smooth_kernel<<<grid, block>>>(
target_cls.contiguous().data<float>(),
output.contiguous().data<float>(),
ph, pw);
THCudaCheck(cudaGetLastError());
return output;
}
|
b06ebc025244d23bb439effb5b9175f7f7e74f01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
% Function: deinterleaver
% Description: Deinterleaves ULSCH data from RI and ACK control information
% Inputs: input_h Input bits
% N_ri_bits Number of RI control bits to deinterleave
% N_ack_bits Number of ACK control bits to deinterleave
% N_l Number of layers
% Qm Number of bits per modulation symbol
% ri_h RI control bits to interleave
% ack_h ACK control bits to interleave
% Outputs: *output_h Output bits
% *ri_h Deinterleaved RI control bits
% *ack_h Deinterleaved ACK control bits
By: Ahmad Nour
*/
#include "deinterleaver_hip.cuh"
__global__ void initializeMatricies(Byte* y_idx_d, Byte* y_mat_d, int N_idx, int N_mat)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//initialize Matricies
//Not to run more threads than available data
if (idx >= N_mat)
return;
if (idx < N_idx)
{
y_idx_d[idx] = 100;
y_mat_d[idx] = 0;
}
else
{
y_mat_d[idx] = 0;
}
}
__global__ void deinterleaveRI(Byte* y_idx_d, Byte* y_mat_d, Byte* ri_d, int R_prime_mux, int N_ri_bits)
{
int col = threadIdx.x;
int row = blockIdx.y;
int idx = row * blockDim.x + col;
int C_mux = 12;
int Ncol = blockDim.x;
//Not to run more threads than available data
if (row >= N_ri_bits)
return;
Byte ri_column_set[4] = { 1, 10, 7, 4 };
//Byte ack_column_set[4] = { 2, 9, 8, 3 };
int r = R_prime_mux - 1 - (row / 4);
int C_ri = ri_column_set[(row % 4)];
y_idx_d[r*C_mux + C_ri] = 1;
ri_d[row * Ncol + col] = y_mat_d[C_mux*r*Ncol + C_ri*Ncol + col];
}
__global__ void deinterleaveData(Byte* y_idx_d, Byte* y_mat_d, Byte* output_d, int numThreads, int H_prime_total, int N_ri_bits, int Qm, int N_l)
{
const int Ncol = blockDim.x; //Total number of columns
int col = threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int idx = row * Ncol + col;
const int C_mux = 12;
//printf("if %d > %d", row, numThreads);
//Not to run more threads than available data
if (row >= numThreads)
return;
int firstRI_row = H_prime_total - (N_ri_bits * 3); // The original eqn:
// firstRI_row = ((H_prime_total/12) - (N_N_ri_bits / 4))*12
if (row < firstRI_row) //No RI bits in this range
{
y_idx_d[row] = 1;
output_d[row * (Qm*N_l) + col] = y_mat_d[row*(Qm*N_l) + col];
}
else
{
/*
Now, we reshape the matrix to be of (12 cols):
idx 0 1 2 3 4 5 6 7 8 9 10 11701b4032c
Data can be put? (No RI) yes no yes yes no yes yes no yes yes no yes
So, to map the data to indices where no RI bits exist, this equation is applied:
col = col + (col / 2) + (col % 2);
*/
int old_mapping = (row - firstRI_row);
int new_mapping = old_mapping + (old_mapping / 2) + (old_mapping % 2);
int new_row = row + (new_mapping - old_mapping);
y_idx_d[new_row] = 1;
output_d[row * (Qm*N_l) + col] = y_mat_d[new_row*(Qm*N_l) + col];
}
//printf("output_d[%d] = %d\n", row * (Qm*N_l) + col, output_d[row * (Qm*N_l) + col]);
}
__global__ void serialOut(Byte* input_d, Byte* input_d2, Byte* y_mat_d,const int N ,int Nrows, int Qm, int N_l) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int idx = y * blockDim.x + x + z * (Nrows * blockDim.x);
const int C_mux = 12;
//Not to run more threads than available data
if (y >= Nrows)
return;
if (idx <( N / N_l))
y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d[idx];
else
y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d2[idx - (N / N_l)];
//printf("y_mat_d[%d] = %d\n", y*C_mux*Qm*N_l + z*Qm*N_l + x, y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x]);
}
void deinterleaver(Byte* input_d, Byte* input_d2, Byte** ri_d, Byte** output_d, const int N, const int N_ri, const int Qm, const int N_l, Byte* y_idx_d, Byte* y_mat_d)
{
// Step 1: Define C_mux
int C_mux = N_pusch_symbs;
// Step 2: Define R_mux and R_prime_mux
int H_prime_total = N / (Qm*N_l);
int H_prime = H_prime_total - N_ri;
int R_mux = N / C_mux;
int R_prime_mux = R_mux / (Qm*N_l);
// Initialize the matricies
//Calc. number of needed threads for calling kernel(s)
int numThreads = (C_mux*R_mux);
int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1)
//Calling the kernel(s)
initializeMatricies << <gridDim, blockDim >> > (y_idx_d, y_mat_d, (C_mux*R_prime_mux), (C_mux*R_mux));
// Step 6: Construct matrix
//Calc. number of needed threads for calling kernel(s)
numThreads = C_mux * R_prime_mux * (Qm*N_l);
int rows = (numThreads < (1024)) ? numThreads : (1024 / (C_mux*(Qm*N_l)));
int gridY = numThreads / (rows*(C_mux*(Qm*N_l))) + (numThreads % (rows*(C_mux*(Qm*N_l))) == 0 ? 0 : 1); //grid size in bloack (min 1)
dim3 blockDim_3((Qm*N_l), rows, C_mux);
dim3 gridDim_3(1, gridY);
serialOut << <gridDim_3, blockDim_3 >> >(input_d, input_d2 , y_mat_d, N, R_prime_mux, Qm, N_l);
// Step 3: Deinterleave the RI control bits
if (N_ri != 0)
{
//Calc. number of needed threads for calling kernel(s)
numThreads = N_ri;
rows = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
dim3 blockDim( Qm*N_l,1 );
dim3 gridDim( 1,rows);
deinterleaveRI << <gridDim, blockDim >> > (y_idx_d, y_mat_d, *ri_d, R_prime_mux, numThreads);
}
// Step 4: Deinterleave the data bits
//Calc. number of needed threads for calling kernel(s)
numThreads = H_prime; //Actually, it's number of required rows or it's total_threads / (Qm*N_l)
rows = (numThreads < (1024/ (Qm*N_l))) ? numThreads : (1024/ (Qm*N_l));
gridY = numThreads / (rows)+(numThreads % rows == 0 ? 0 : 1); //grid size in bloack (min 1)
dim3 blockDim_2(Qm*N_l, rows);
dim3 gridDim_2(1, gridY);
//printf("kh\n");
deinterleaveData << <gridDim_2, blockDim_2 >> >(y_idx_d, y_mat_d, *output_d, numThreads, H_prime_total, N_ri, Qm, N_l);
//printf("kh\n");
}
| b06ebc025244d23bb439effb5b9175f7f7e74f01.cu | /*
% Function: deinterleaver
% Description: Deinterleaves ULSCH data from RI and ACK control information
% Inputs: input_h Input bits
% N_ri_bits Number of RI control bits to deinterleave
% N_ack_bits Number of ACK control bits to deinterleave
% N_l Number of layers
% Qm Number of bits per modulation symbol
% ri_h RI control bits to interleave
% ack_h ACK control bits to interleave
% Outputs: *output_h Output bits
% *ri_h Deinterleaved RI control bits
% *ack_h Deinterleaved ACK control bits
By: Ahmad Nour
*/
#include "deinterleaver.cuh"
__global__ void initializeMatricies(Byte* y_idx_d, Byte* y_mat_d, int N_idx, int N_mat)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//initialize Matricies
//Not to run more threads than available data
if (idx >= N_mat)
return;
if (idx < N_idx)
{
y_idx_d[idx] = 100;
y_mat_d[idx] = 0;
}
else
{
y_mat_d[idx] = 0;
}
}
__global__ void deinterleaveRI(Byte* y_idx_d, Byte* y_mat_d, Byte* ri_d, int R_prime_mux, int N_ri_bits)
{
int col = threadIdx.x;
int row = blockIdx.y;
int idx = row * blockDim.x + col;
int C_mux = 12;
int Ncol = blockDim.x;
//Not to run more threads than available data
if (row >= N_ri_bits)
return;
Byte ri_column_set[4] = { 1, 10, 7, 4 };
//Byte ack_column_set[4] = { 2, 9, 8, 3 };
int r = R_prime_mux - 1 - (row / 4);
int C_ri = ri_column_set[(row % 4)];
y_idx_d[r*C_mux + C_ri] = 1;
ri_d[row * Ncol + col] = y_mat_d[C_mux*r*Ncol + C_ri*Ncol + col];
}
__global__ void deinterleaveData(Byte* y_idx_d, Byte* y_mat_d, Byte* output_d, int numThreads, int H_prime_total, int N_ri_bits, int Qm, int N_l)
{
const int Ncol = blockDim.x; //Total number of columns
int col = threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int idx = row * Ncol + col;
const int C_mux = 12;
//printf("if %d > %d", row, numThreads);
//Not to run more threads than available data
if (row >= numThreads)
return;
int firstRI_row = H_prime_total - (N_ri_bits * 3); // The original eqn:
// firstRI_row = ((H_prime_total/12) - (N_N_ri_bits / 4))*12
if (row < firstRI_row) //No RI bits in this range
{
y_idx_d[row] = 1;
output_d[row * (Qm*N_l) + col] = y_mat_d[row*(Qm*N_l) + col];
}
else
{
/*
Now, we reshape the matrix to be of (12 cols):
idx 0 1 2 3 4 5 6 7 8 9 10 11701b4032c
Data can be put? (No RI) yes no yes yes no yes yes no yes yes no yes
So, to map the data to indices where no RI bits exist, this equation is applied:
col = col + (col / 2) + (col % 2);
*/
int old_mapping = (row - firstRI_row);
int new_mapping = old_mapping + (old_mapping / 2) + (old_mapping % 2);
int new_row = row + (new_mapping - old_mapping);
y_idx_d[new_row] = 1;
output_d[row * (Qm*N_l) + col] = y_mat_d[new_row*(Qm*N_l) + col];
}
//printf("output_d[%d] = %d\n", row * (Qm*N_l) + col, output_d[row * (Qm*N_l) + col]);
}
__global__ void serialOut(Byte* input_d, Byte* input_d2, Byte* y_mat_d,const int N ,int Nrows, int Qm, int N_l) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int idx = y * blockDim.x + x + z * (Nrows * blockDim.x);
const int C_mux = 12;
//Not to run more threads than available data
if (y >= Nrows)
return;
if (idx <( N / N_l))
y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d[idx];
else
y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d2[idx - (N / N_l)];
//printf("y_mat_d[%d] = %d\n", y*C_mux*Qm*N_l + z*Qm*N_l + x, y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x]);
}
void deinterleaver(Byte* input_d, Byte* input_d2, Byte** ri_d, Byte** output_d, const int N, const int N_ri, const int Qm, const int N_l, Byte* y_idx_d, Byte* y_mat_d)
{
// Step 1: Define C_mux
int C_mux = N_pusch_symbs;
// Step 2: Define R_mux and R_prime_mux
int H_prime_total = N / (Qm*N_l);
int H_prime = H_prime_total - N_ri;
int R_mux = N / C_mux;
int R_prime_mux = R_mux / (Qm*N_l);
// Initialize the matricies
//Calc. number of needed threads for calling kernel(s)
int numThreads = (C_mux*R_mux);
int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1)
//Calling the kernel(s)
initializeMatricies << <gridDim, blockDim >> > (y_idx_d, y_mat_d, (C_mux*R_prime_mux), (C_mux*R_mux));
// Step 6: Construct matrix
//Calc. number of needed threads for calling kernel(s)
numThreads = C_mux * R_prime_mux * (Qm*N_l);
int rows = (numThreads < (1024)) ? numThreads : (1024 / (C_mux*(Qm*N_l)));
int gridY = numThreads / (rows*(C_mux*(Qm*N_l))) + (numThreads % (rows*(C_mux*(Qm*N_l))) == 0 ? 0 : 1); //grid size in bloack (min 1)
dim3 blockDim_3((Qm*N_l), rows, C_mux);
dim3 gridDim_3(1, gridY);
serialOut << <gridDim_3, blockDim_3 >> >(input_d, input_d2 , y_mat_d, N, R_prime_mux, Qm, N_l);
// Step 3: Deinterleave the RI control bits
if (N_ri != 0)
{
//Calc. number of needed threads for calling kernel(s)
numThreads = N_ri;
rows = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
dim3 blockDim( Qm*N_l,1 );
dim3 gridDim( 1,rows);
deinterleaveRI << <gridDim, blockDim >> > (y_idx_d, y_mat_d, *ri_d, R_prime_mux, numThreads);
}
// Step 4: Deinterleave the data bits
//Calc. number of needed threads for calling kernel(s)
numThreads = H_prime; //Actually, it's number of required rows or it's total_threads / (Qm*N_l)
rows = (numThreads < (1024/ (Qm*N_l))) ? numThreads : (1024/ (Qm*N_l));
gridY = numThreads / (rows)+(numThreads % rows == 0 ? 0 : 1); //grid size in bloack (min 1)
dim3 blockDim_2(Qm*N_l, rows);
dim3 gridDim_2(1, gridY);
//printf("kh\n");
deinterleaveData << <gridDim_2, blockDim_2 >> >(y_idx_d, y_mat_d, *output_d, numThreads, H_prime_total, N_ri, Qm, N_l);
//printf("kh\n");
}
|
dafb02a0a2803730b51dc607629242cf5a0f5197.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel_z1 [6][2];
static int dims_advec_mom_kernel_z1_h [6][2] = {0};
//user function
__device__
inline void advec_mom_kernel_z1_gpu(ACC<double> &pre_vol,
ACC<double> &post_vol,
const ACC<double> &volume,
const ACC<double> &vol_flux_x,
const ACC<double> &vol_flux_y,
const ACC<double> &vol_flux_z) {
post_vol(0,0,0) = volume(0,0,0) + vol_flux_x(1,0,0) - vol_flux_x(0,0,0)
+ vol_flux_y(0,1,0) - vol_flux_y(0,0,0);
pre_vol(0,0,0) = post_vol(0,0,0) + vol_flux_z(0,0,1) - vol_flux_z(0,0,0);
}
__global__ void ops_advec_mom_kernel_z1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[0][0] * dims_advec_mom_kernel_z1[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[1][0] * dims_advec_mom_kernel_z1[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[2][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[2][0] * dims_advec_mom_kernel_z1[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[3][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[3][0] * dims_advec_mom_kernel_z1[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[4][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[4][0] * dims_advec_mom_kernel_z1[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[5][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[5][0] * dims_advec_mom_kernel_z1[5][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel_z1[0][0], dims_advec_mom_kernel_z1[0][1], arg0);
ACC<double> argp1(dims_advec_mom_kernel_z1[1][0], dims_advec_mom_kernel_z1[1][1], arg1);
const ACC<double> argp2(dims_advec_mom_kernel_z1[2][0], dims_advec_mom_kernel_z1[2][1], arg2);
const ACC<double> argp3(dims_advec_mom_kernel_z1[3][0], dims_advec_mom_kernel_z1[3][1], arg3);
const ACC<double> argp4(dims_advec_mom_kernel_z1[4][0], dims_advec_mom_kernel_z1[4][1], arg4);
const ACC<double> argp5(dims_advec_mom_kernel_z1[5][0], dims_advec_mom_kernel_z1[5][1], arg5);
advec_mom_kernel_z1_gpu(argp0, argp1, argp2, argp3,
argp4, argp5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel_z1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
#else
void ops_par_loop_advec_mom_kernel_z1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,6,range,121)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(121,"advec_mom_kernel_z1");
OPS_kernels[121].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 6,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
if (xdim0 != dims_advec_mom_kernel_z1_h[0][0] || ydim0 != dims_advec_mom_kernel_z1_h[0][1] || xdim1 != dims_advec_mom_kernel_z1_h[1][0] || ydim1 != dims_advec_mom_kernel_z1_h[1][1] || xdim2 != dims_advec_mom_kernel_z1_h[2][0] || ydim2 != dims_advec_mom_kernel_z1_h[2][1] || xdim3 != dims_advec_mom_kernel_z1_h[3][0] || ydim3 != dims_advec_mom_kernel_z1_h[3][1] || xdim4 != dims_advec_mom_kernel_z1_h[4][0] || ydim4 != dims_advec_mom_kernel_z1_h[4][1] || xdim5 != dims_advec_mom_kernel_z1_h[5][0] || ydim5 != dims_advec_mom_kernel_z1_h[5][1]) {
dims_advec_mom_kernel_z1_h[0][0] = xdim0;
dims_advec_mom_kernel_z1_h[0][1] = ydim0;
dims_advec_mom_kernel_z1_h[1][0] = xdim1;
dims_advec_mom_kernel_z1_h[1][1] = ydim1;
dims_advec_mom_kernel_z1_h[2][0] = xdim2;
dims_advec_mom_kernel_z1_h[2][1] = ydim2;
dims_advec_mom_kernel_z1_h[3][0] = xdim3;
dims_advec_mom_kernel_z1_h[3][1] = ydim3;
dims_advec_mom_kernel_z1_h[4][0] = xdim4;
dims_advec_mom_kernel_z1_h[4][1] = ydim4;
dims_advec_mom_kernel_z1_h[5][0] = xdim5;
dims_advec_mom_kernel_z1_h[5][1] = ydim5;
cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel_z1, dims_advec_mom_kernel_z1_h, sizeof(dims_advec_mom_kernel_z1)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[6];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args,6,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[121].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_advec_mom_kernel_z1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[121].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[121].mpi_time += t2-t1;
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel_z1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 121;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 121;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg*)malloc(6*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->function = ops_par_loop_advec_mom_kernel_z1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(121,"advec_mom_kernel_z1");
}
ops_enqueue_kernel(desc);
}
#endif
| dafb02a0a2803730b51dc607629242cf5a0f5197.cu | //
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel_z1 [6][2];
static int dims_advec_mom_kernel_z1_h [6][2] = {0};
//user function
__device__
inline void advec_mom_kernel_z1_gpu(ACC<double> &pre_vol,
ACC<double> &post_vol,
const ACC<double> &volume,
const ACC<double> &vol_flux_x,
const ACC<double> &vol_flux_y,
const ACC<double> &vol_flux_z) {
post_vol(0,0,0) = volume(0,0,0) + vol_flux_x(1,0,0) - vol_flux_x(0,0,0)
+ vol_flux_y(0,1,0) - vol_flux_y(0,0,0);
pre_vol(0,0,0) = post_vol(0,0,0) + vol_flux_z(0,0,1) - vol_flux_z(0,0,0);
}
__global__ void ops_advec_mom_kernel_z1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[0][0] * dims_advec_mom_kernel_z1[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[1][0] * dims_advec_mom_kernel_z1[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[2][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[2][0] * dims_advec_mom_kernel_z1[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[3][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[3][0] * dims_advec_mom_kernel_z1[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[4][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[4][0] * dims_advec_mom_kernel_z1[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z1[5][0] + idx_z * 1*1 * dims_advec_mom_kernel_z1[5][0] * dims_advec_mom_kernel_z1[5][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel_z1[0][0], dims_advec_mom_kernel_z1[0][1], arg0);
ACC<double> argp1(dims_advec_mom_kernel_z1[1][0], dims_advec_mom_kernel_z1[1][1], arg1);
const ACC<double> argp2(dims_advec_mom_kernel_z1[2][0], dims_advec_mom_kernel_z1[2][1], arg2);
const ACC<double> argp3(dims_advec_mom_kernel_z1[3][0], dims_advec_mom_kernel_z1[3][1], arg3);
const ACC<double> argp4(dims_advec_mom_kernel_z1[4][0], dims_advec_mom_kernel_z1[4][1], arg4);
const ACC<double> argp5(dims_advec_mom_kernel_z1[5][0], dims_advec_mom_kernel_z1[5][1], arg5);
advec_mom_kernel_z1_gpu(argp0, argp1, argp2, argp3,
argp4, argp5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel_z1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
#else
void ops_par_loop_advec_mom_kernel_z1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,6,range,121)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(121,"advec_mom_kernel_z1");
OPS_kernels[121].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 6,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
if (xdim0 != dims_advec_mom_kernel_z1_h[0][0] || ydim0 != dims_advec_mom_kernel_z1_h[0][1] || xdim1 != dims_advec_mom_kernel_z1_h[1][0] || ydim1 != dims_advec_mom_kernel_z1_h[1][1] || xdim2 != dims_advec_mom_kernel_z1_h[2][0] || ydim2 != dims_advec_mom_kernel_z1_h[2][1] || xdim3 != dims_advec_mom_kernel_z1_h[3][0] || ydim3 != dims_advec_mom_kernel_z1_h[3][1] || xdim4 != dims_advec_mom_kernel_z1_h[4][0] || ydim4 != dims_advec_mom_kernel_z1_h[4][1] || xdim5 != dims_advec_mom_kernel_z1_h[5][0] || ydim5 != dims_advec_mom_kernel_z1_h[5][1]) {
dims_advec_mom_kernel_z1_h[0][0] = xdim0;
dims_advec_mom_kernel_z1_h[0][1] = ydim0;
dims_advec_mom_kernel_z1_h[1][0] = xdim1;
dims_advec_mom_kernel_z1_h[1][1] = ydim1;
dims_advec_mom_kernel_z1_h[2][0] = xdim2;
dims_advec_mom_kernel_z1_h[2][1] = ydim2;
dims_advec_mom_kernel_z1_h[3][0] = xdim3;
dims_advec_mom_kernel_z1_h[3][1] = ydim3;
dims_advec_mom_kernel_z1_h[4][0] = xdim4;
dims_advec_mom_kernel_z1_h[4][1] = ydim4;
dims_advec_mom_kernel_z1_h[5][0] = xdim5;
dims_advec_mom_kernel_z1_h[5][1] = ydim5;
cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel_z1, dims_advec_mom_kernel_z1_h, sizeof(dims_advec_mom_kernel_z1)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[6];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args,6,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[121].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_advec_mom_kernel_z1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[121].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[121].mpi_time += t2-t1;
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[121].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel_z1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 121;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 121;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg*)malloc(6*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->function = ops_par_loop_advec_mom_kernel_z1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(121,"advec_mom_kernel_z1");
}
ops_enqueue_kernel(desc);
}
#endif
|
c739eb40fb936fd97861753e86e0137258a8baef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->usingdata2 = this->usingdata2;
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data;
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data;
if (this->usingdata2) {
prob_data = prob_.gpu_data2();
loss_data = bottom[0]->mutable_gpu_diff2();
} else {
prob_data = prob_.gpu_data();
loss_data = bottom[0]->mutable_gpu_diff();
}
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts;
if (this->usingdata2) {
counts = prob_.mutable_gpu_diff2();
} else {
counts = prob_.mutable_gpu_diff();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
loss /= count;
} else {
loss /= outer_num_;
}
if (this->usingdata2) {
top[0]->mutable_cpu_data2()[0] = loss;
top[0]->mutable_cpu_diff2()[0] = (Dtype) 1.0; // TAEHOON LEE : I don't know why but it must be done
} else {
top[0]->mutable_cpu_data()[0] = loss;
}
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU2(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>( ( (int)label[n * spatial_dim + s] + 1 ) % 10 );
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff;
const Dtype* prob_data;
const Dtype* top_data;
if (this->adversarial || this->usingdata2) {
bottom_diff = bottom[0]->mutable_gpu_diff2();
} else {
bottom_diff = bottom[0]->mutable_gpu_diff();
}
if (this->usingdata2) {
prob_data = prob_.gpu_data2();
top_data = top[0]->gpu_data2();
} else {
prob_data = prob_.gpu_data();
top_data = top[0]->gpu_data();
}
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts;
if (this->usingdata2) {
counts = prob_.mutable_gpu_diff2();
} else {
counts = prob_.mutable_gpu_diff();
}
// NOLINT_NEXT_LINE(whitespace/operators)
if (this->adversarial) {
for (int i=0; i<100; ++i)
bottom[1]->mutable_cpu_data()[i] = caffe_rng_rand() % 10;
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU2<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
} else {
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
}
//LOG(INFO) << this->adversarial << this->usingdata2 << this->manifold;
//LOG(INFO) << bottom[1]->cpu_data()[0] << " " << bottom[1]->cpu_data()[1] << " " << bottom[1]->cpu_data()[2] << " " << bottom[1]->cpu_data()[3] << " " << bottom[1]->cpu_data()[4] << " ";
Dtype loss_weight;
if (this->usingdata2) {
loss_weight = top[0]->cpu_diff2()[0];
} else {
loss_weight = top[0]->cpu_diff()[0];
}
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
| c739eb40fb936fd97861753e86e0137258a8baef.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->usingdata2 = this->usingdata2;
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data;
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data;
if (this->usingdata2) {
prob_data = prob_.gpu_data2();
loss_data = bottom[0]->mutable_gpu_diff2();
} else {
prob_data = prob_.gpu_data();
loss_data = bottom[0]->mutable_gpu_diff();
}
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts;
if (this->usingdata2) {
counts = prob_.mutable_gpu_diff2();
} else {
counts = prob_.mutable_gpu_diff();
}
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
loss /= count;
} else {
loss /= outer_num_;
}
if (this->usingdata2) {
top[0]->mutable_cpu_data2()[0] = loss;
top[0]->mutable_cpu_diff2()[0] = (Dtype) 1.0; // TAEHOON LEE : I don't know why but it must be done
} else {
top[0]->mutable_cpu_data()[0] = loss;
}
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU2(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>( ( (int)label[n * spatial_dim + s] + 1 ) % 10 );
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff;
const Dtype* prob_data;
const Dtype* top_data;
if (this->adversarial || this->usingdata2) {
bottom_diff = bottom[0]->mutable_gpu_diff2();
} else {
bottom_diff = bottom[0]->mutable_gpu_diff();
}
if (this->usingdata2) {
prob_data = prob_.gpu_data2();
top_data = top[0]->gpu_data2();
} else {
prob_data = prob_.gpu_data();
top_data = top[0]->gpu_data();
}
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts;
if (this->usingdata2) {
counts = prob_.mutable_gpu_diff2();
} else {
counts = prob_.mutable_gpu_diff();
}
// NOLINT_NEXT_LINE(whitespace/operators)
if (this->adversarial) {
for (int i=0; i<100; ++i)
bottom[1]->mutable_cpu_data()[i] = caffe_rng_rand() % 10;
SoftmaxLossBackwardGPU2<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
} else {
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
}
//LOG(INFO) << this->adversarial << this->usingdata2 << this->manifold;
//LOG(INFO) << bottom[1]->cpu_data()[0] << " " << bottom[1]->cpu_data()[1] << " " << bottom[1]->cpu_data()[2] << " " << bottom[1]->cpu_data()[3] << " " << bottom[1]->cpu_data()[4] << " ";
Dtype loss_weight;
if (this->usingdata2) {
loss_weight = top[0]->cpu_diff2()[0];
} else {
loss_weight = top[0]->cpu_diff()[0];
}
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
84ef6ae737106639de3f281a44f777bb30e7d655.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "../include/common.hpp"
#include "../include/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
} // namespace caffe
| 84ef6ae737106639de3f281a44f777bb30e7d655.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "../include/common.hpp"
#include "../include/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
} // namespace caffe
|
ff5ea555ff8ea26be846fe28e508b3e506ee935a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_utils.cuh"
#include <ATen/ATen.h>
#ifdef VERSION_LE_04
#include "ATen/hip/AccumulateType.cuh"
#else
#include "ATen/AccumulateType.h"
#endif
#include "ATen/hip/HIPTensorMethods.cuh"
// #include "ATen/hip/HIPTypeConversion.cuh"
// #include <THH/THHTensorMathReduce.cuh>
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_first_dim_kernel
(scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int rowSize)
{
// We are norming each slowest-dim row of the tensor separately.
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
result = sqrtf(result);
if(tid == 0)
norms[row] = result;
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]);
accscalar_t rnorm = 1.f/result; // for consistency with backward kernel
// Write data to output
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_last_dim_kernel
(
scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int fast_dim_size,
const int slower_dims_size
)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* alloc = (accscalar_t*)buf;
accscalar_t* s = &alloc[0];
accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y];
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
// Better to pass an EpilogueOp to reduce_block_into_lanes, implement later
if(threadIdx.y == 0)
{
accscalar_t result = s[threadIdx.x];
accscalar_t norm_this_col = sqrtf(result);
norms[fast_dim_location] = norm_this_col;
rnorms_this_block[threadIdx.x] = 1.f/norm_this_col;
}
__syncthreads();
accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]);
accscalar_t rnorm = rnorms_this_block[threadIdx.x];
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
void weight_norm_fwd_cuda
(const at::Tensor& w,
const at::Tensor& norms,
const at::Tensor& v,
const at::Tensor& g,
int dim)
{
#ifdef DEBUG_ANY
using namespace std;
cout << "hello from send_to_fwd with v.type() = " << v.type() << endl;
#endif
const int ndims = v.ndimension();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= v.size(i);
using namespace at;
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.type(),
"weight_norm_fwd_first_dim_kernel",
[&]
{
using cuda_scalar_t = apex::cuda::type<scalar_t>;
USING_ACCSCALAR_T
hipLaunchKernelGGL(( weight_norm_fwd_first_dim_kernel)
, dim3(v.size(0)),
dim3(BLOCK),
BLOCK*sizeof(accscalar_t),
stream,
w.data<cuda_scalar_t>(),
norms.data<accscalar_t>(),
v.data<cuda_scalar_t>(),
g.data<cuda_scalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= v.size(i);
int fast_dim_size = v.size(ndims-1);
using namespace at;
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.type(),
"weight_norm_fwd_last_dim_kernel",
[&]
{
using cuda_scalar_t = apex::cuda::type<scalar_t>;
USING_ACCSCALAR_T
// just trying this formatting out to see how it feels...
hipLaunchKernelGGL(( weight_norm_fwd_last_dim_kernel)
, dim3((fast_dim_size+TILE_W-1)/TILE_W),
dim3(dim3(TILE_W,TILE_H)),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream,
w.data<cuda_scalar_t>(),
norms.data<accscalar_t>(),
v.data<cuda_scalar_t>(),
g.data<cuda_scalar_t>(),
fast_dim_size,
slower_dims_size);
});
}
// else
// {
// intermediate dim kernel. Error checking on the dim was already done in
// Module.cpp:weight_norm_fwd. Could put that logic here instead, if we include
// <python.h> in both files.
// }
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, this is the best we can do.
THCudaCheck(hipGetLastError());
#ifdef DEBUG_PROFILE
THCudaCheck(hipDeviceSynchronize());
#endif
}
| ff5ea555ff8ea26be846fe28e508b3e506ee935a.cu | #include "kernel_utils.cuh"
#include <ATen/ATen.h>
#ifdef VERSION_LE_04
#include "ATen/cuda/AccumulateType.cuh"
#else
#include "ATen/AccumulateType.h"
#endif
#include "ATen/cuda/CUDATensorMethods.cuh"
// #include "ATen/cuda/CUDATypeConversion.cuh"
// #include <THC/THCTensorMathReduce.cuh>
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_first_dim_kernel
(scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int rowSize)
{
// We are norming each slowest-dim row of the tensor separately.
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
result = sqrtf(result);
if(tid == 0)
norms[row] = result;
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]);
accscalar_t rnorm = 1.f/result; // for consistency with backward kernel
// Write data to output
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_last_dim_kernel
(
scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int fast_dim_size,
const int slower_dims_size
)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* alloc = (accscalar_t*)buf;
accscalar_t* s = &alloc[0];
accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y];
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
// Better to pass an EpilogueOp to reduce_block_into_lanes, implement later
if(threadIdx.y == 0)
{
accscalar_t result = s[threadIdx.x];
accscalar_t norm_this_col = sqrtf(result);
norms[fast_dim_location] = norm_this_col;
rnorms_this_block[threadIdx.x] = 1.f/norm_this_col;
}
__syncthreads();
accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]);
accscalar_t rnorm = rnorms_this_block[threadIdx.x];
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
void weight_norm_fwd_cuda
(const at::Tensor& w,
const at::Tensor& norms,
const at::Tensor& v,
const at::Tensor& g,
int dim)
{
#ifdef DEBUG_ANY
using namespace std;
cout << "hello from send_to_fwd with v.type() = " << v.type() << endl;
#endif
const int ndims = v.ndimension();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= v.size(i);
using namespace at;
cudaStream_t stream = globalContext().getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.type(),
"weight_norm_fwd_first_dim_kernel",
[&]
{
using cuda_scalar_t = apex::cuda::type<scalar_t>;
USING_ACCSCALAR_T
weight_norm_fwd_first_dim_kernel
<<<v.size(0),
BLOCK,
BLOCK*sizeof(accscalar_t),
stream>>>
(w.data<cuda_scalar_t>(),
norms.data<accscalar_t>(),
v.data<cuda_scalar_t>(),
g.data<cuda_scalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= v.size(i);
int fast_dim_size = v.size(ndims-1);
using namespace at;
cudaStream_t stream = globalContext().getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.type(),
"weight_norm_fwd_last_dim_kernel",
[&]
{
using cuda_scalar_t = apex::cuda::type<scalar_t>;
USING_ACCSCALAR_T
// just trying this formatting out to see how it feels...
weight_norm_fwd_last_dim_kernel
<<<(fast_dim_size+TILE_W-1)/TILE_W,
dim3(TILE_W,TILE_H),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream>>>
(w.data<cuda_scalar_t>(),
norms.data<accscalar_t>(),
v.data<cuda_scalar_t>(),
g.data<cuda_scalar_t>(),
fast_dim_size,
slower_dims_size);
});
}
// else
// {
// intermediate dim kernel. Error checking on the dim was already done in
// Module.cpp:weight_norm_fwd. Could put that logic here instead, if we include
// <python.h> in both files.
// }
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, this is the best we can do.
THCudaCheck(cudaGetLastError());
#ifdef DEBUG_PROFILE
THCudaCheck(cudaDeviceSynchronize());
#endif
}
|
6c522a80cfa7634c345ef8df665cefd255e4e1ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
typedef struct{
float *matrix;
unsigned long long int rows;
unsigned long long int *position;
unsigned long long int columns;
unsigned long long int sparseCount;
size_t pitch;
}Matrix;
void fillMatrix(Matrix*);
void COOTheMatrix(Matrix*);
void printMatrix(Matrix*);
void prepareGPUCopy(Matrix**);
void prepareHostGPUCopy(Matrix**, Matrix*);
void prepareResultHGPUCopy(Matrix**, Matrix**, Matrix**);
void retrieveResult(Matrix**);
void kronProd(Matrix*, Matrix*, Matrix**);
__global__ void multiply(Matrix*, Matrix*, Matrix*, unsigned long long int*);
#define checkError(call) { checkGPUError((call), __LINE__); }
inline void checkGPUError(hipError_t errCode, int line){
if(errCode != hipSuccess){
printf("\n");
printf("Error at %d: %s\n", line, hipGetErrorString(errCode));
}
return;
}
int main(){
Matrix *a, *b, *c;
Matrix *a_hgpu, *b_hgpu;
a = (Matrix*)malloc(sizeof(Matrix));
b = (Matrix*)malloc(sizeof(Matrix));
c = (Matrix*)malloc(sizeof(Matrix));
if(a == NULL || b == NULL || c == NULL){
fprintf(stderr, "Failed to allocate memory on CPU");
}
/*
* Initialilizing the matrix parameters
*/
a -> rows = b -> columns = 2;
a -> columns = b -> rows = 3;
a -> matrix = (float*)malloc(sizeof(float) * a -> rows * a -> columns);
b -> matrix = (float*)malloc(sizeof(float) * b -> rows * b -> columns);
fillMatrix(a);
fillMatrix(b);
printMatrix(a);
printMatrix(b);
prepareHostGPUCopy(&a_hgpu, a);
prepareHostGPUCopy(&b_hgpu, b);
kronProd(a_hgpu, b_hgpu, &c);
retrieveResult(&c);
printMatrix(c);
hipFree(a_hgpu -> matrix);
hipFree(a_hgpu -> position);
hipFree(b_hgpu -> matrix);
hipFree(b_hgpu -> position);
hipFree(a_hgpu);
hipFree(b_hgpu);
free(a);
free(b);
free(c);
return(0);
}
void fillMatrix(Matrix *m){
unsigned long long int i, elements, count = 0;
float a = 5.0;
elements = m -> rows * m -> columns;
srand((unsigned int)time(NULL));
for(i = 0; i < elements;i++){
printf("%f\t", m -> matrix[i] = ((float)rand()/(float)(RAND_MAX)) * a);
if(m -> matrix[i] != 0)
count++;
if((i + 1) % m -> columns == 0)
printf("\n");
}
printf("\n");
m -> sparseCount = count;
COOTheMatrix(m);
return;
}
void COOTheMatrix(Matrix *m){
unsigned long long int i, elements, count;
float *holding;
count = m -> sparseCount;
elements = m -> rows * m -> columns;
m -> position = (unsigned long long int*)malloc(sizeof(unsigned long long int) * count * 2);
m -> pitch = count * sizeof(unsigned long long int);
holding = (float*)malloc(sizeof(float) * count);
for(i = 0, count = 0;i < elements;i++){
if(m -> matrix[i] != 0){
m -> position[count] = i / (m -> columns);
m -> position[m -> sparseCount + count] = i % (m -> columns);
holding[count++] = m -> matrix[i];
}
}
free(m -> matrix);
m -> matrix = holding;
return;
}
void printMatrix(Matrix *m){
unsigned long long int i, elements, count;
unsigned long long int columns = m -> sparseCount;
elements = m -> rows * m -> columns;
printf("%llu\n", m -> columns);
for(i = 0, count = 0;i < elements && count <= m -> sparseCount;i++){
// if(m -> position[count] == (i / m -> columns) && m -> position[columns + count] == (i % m -> columns)){
printf("%f[%llu, %llu]\t", m -> matrix[count], m -> position[count], m -> position[columns + count]);
count++;
// }
// else
// printf("0\t");
if((i + 1) % m -> columns == 0)
printf("\n");
}
return;
}
void prepareHostGPUCopy(Matrix **m_hgpu, Matrix *m){
(*m_hgpu) = (Matrix*)malloc(sizeof(Matrix));
(*m_hgpu) -> rows = m -> rows;
(*m_hgpu) -> columns = m -> columns;
(*m_hgpu) -> sparseCount = m -> sparseCount;
checkError(hipMalloc(&((*m_hgpu) -> matrix), m -> sparseCount * sizeof(float)));
checkError(hipMemcpy((*m_hgpu) -> matrix, m -> matrix, m -> sparseCount * sizeof(float), hipMemcpyHostToDevice));
checkError(hipMallocPitch(&((*m_hgpu) -> position), &((*m_hgpu) -> pitch), m -> sparseCount * sizeof(unsigned long long int), 2));
checkError(hipMemcpy2D((*m_hgpu) -> position, (*m_hgpu) -> pitch, m -> position, m -> pitch, m -> sparseCount * sizeof(unsigned long long int), 2, hipMemcpyHostToDevice));
return;
}
void prepareGPUCopy(Matrix **m_gpu, Matrix **m_hgpu){
checkError(hipMalloc(m_gpu, sizeof(Matrix)));
checkError(hipMemcpy(*m_gpu, *m_hgpu, sizeof(Matrix), hipMemcpyHostToDevice));
return;
}
void prepareResultHGPUCopy(Matrix **c, Matrix **m1, Matrix **m2){
*c = (Matrix*)malloc(sizeof(Matrix));
(*c) -> sparseCount = (*m1) -> sparseCount * (*m2) -> sparseCount;
(*c) -> rows = (*m1) -> rows * (*m2) -> rows;
(*c) -> columns = (*m1) -> columns * (*m2) -> columns;
//Pitch doesn't have to be declared here as this method returns Host GPU copy
//Which means that the pitch will be extracted from the hipMallocPitch call
return;
}
void retrieveResult(Matrix **m){
Matrix *retVal;
retVal = (Matrix*)malloc(sizeof(Matrix));
retVal -> columns = (*m) -> columns;
retVal -> rows = (*m) -> rows;
retVal -> sparseCount = (*m) -> sparseCount;
retVal -> pitch = (*m) -> sparseCount * sizeof(unsigned long long int);
retVal -> matrix = (float*)malloc(sizeof(float) * (*m) -> sparseCount);
retVal -> position = (unsigned long long int*)malloc(sizeof(unsigned long long int) * (*m) -> sparseCount * 2);
checkError(hipMemcpy(retVal -> matrix, (*m) -> matrix, (*m) -> sparseCount * sizeof(float), hipMemcpyDeviceToHost));
checkError(hipMemcpy2D(retVal -> position, retVal -> pitch, (*m) -> position, (*m) -> pitch, (*m) -> sparseCount * sizeof(unsigned long long int), 2, hipMemcpyDeviceToHost));
//hipFree((*m) -> matrix);
//hipFree((*m) -> position);
//free(*m);
printf("Jio %llu\n", (*m) -> sparseCount);
*m = retVal;
return;
}
void kronProd(Matrix *a_hgpu, Matrix *b_hgpu, Matrix **c_hgpu){
Matrix *c_gpu, *a_gpu, *b_gpu;
prepareGPUCopy(&a_gpu, &a_hgpu);
prepareGPUCopy(&b_gpu, &b_hgpu);
//Initializing C_host
prepareResultHGPUCopy(c_hgpu, &a_hgpu, &b_hgpu);
checkError(hipMalloc(&((*c_hgpu) -> matrix), (*c_hgpu) -> sparseCount * sizeof(float)));
checkError(hipMallocPitch(&((*c_hgpu) -> position), &((*c_hgpu) -> pitch), (*c_hgpu) -> sparseCount * sizeof(unsigned long long int), 2));
prepareGPUCopy(&c_gpu, c_hgpu);
int cSparse = (*c_hgpu) -> sparseCount;
float blocks = ceil((float)cSparse / 512);
unsigned int numBlocks = ((unsigned int)blocks);
unsigned int threadsPerBlock = 512;
unsigned long long int *aH = (unsigned long long int*)malloc(sizeof(unsigned long long int) * cSparse);
unsigned long long int *aG;
checkError(hipMalloc(&aG, sizeof(unsigned long long int) * cSparse));
hipLaunchKernelGGL(( multiply), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, a_gpu, b_gpu, c_gpu, aG);
checkError(hipMemcpy(aH, aG, sizeof(unsigned long long int) * cSparse, hipMemcpyDeviceToHost));
int i;
for(i = 0;i < 36;i++)
printf("aH[%d]: %llu\n", i, aH[i]);
hipFree(c_gpu);
hipFree(a_gpu);
hipFree(b_gpu);
return;
}
__global__ void multiply(Matrix *a, Matrix *b, Matrix *c, unsigned long long int *aG){
unsigned long long int i, cDim;
i = blockIdx.x * blockDim.x + threadIdx.x;
cDim = a -> sparseCount * b -> sparseCount;
if(i < cDim){
unsigned long long int aIndex = (unsigned int)((double)i / b -> sparseCount);
unsigned long long int bIndex = i % b -> sparseCount;
c -> matrix[i] = a -> matrix[aIndex] * b -> matrix[bIndex];
unsigned long long int *cRow0 = (unsigned long long int*)((char*)c -> position); // + (0 * c -> pitch)
unsigned long long int *cRow1 = (unsigned long long int*)((char*)c -> position + (1 * c -> pitch));
unsigned long long int *aRow0 = (unsigned long long int*)((char*)a -> position); // + (0 * a -> pitch)
unsigned long long int *aRow1 = (unsigned long long int*)((char*)a -> position + (1 * a -> pitch));
unsigned long long int *bRow0 = (unsigned long long int*)((char*)b -> position); // + (0 * b -> pitch)
unsigned long long int *bRow1 = (unsigned long long int*)((char*)b -> position + (1 * b -> pitch));
cRow0[i] = aRow0[aIndex] * (b -> rows) + bRow0[bIndex];
cRow1[i] = aRow1[aIndex] * (b -> columns) + bRow1[bIndex];
aG[i] = aRow1[aIndex];
}
return;
}
| 6c522a80cfa7634c345ef8df665cefd255e4e1ac.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
typedef struct{
float *matrix;
unsigned long long int rows;
unsigned long long int *position;
unsigned long long int columns;
unsigned long long int sparseCount;
size_t pitch;
}Matrix;
void fillMatrix(Matrix*);
void COOTheMatrix(Matrix*);
void printMatrix(Matrix*);
void prepareGPUCopy(Matrix**);
void prepareHostGPUCopy(Matrix**, Matrix*);
void prepareResultHGPUCopy(Matrix**, Matrix**, Matrix**);
void retrieveResult(Matrix**);
void kronProd(Matrix*, Matrix*, Matrix**);
__global__ void multiply(Matrix*, Matrix*, Matrix*, unsigned long long int*);
#define checkError(call) { checkGPUError((call), __LINE__); }
inline void checkGPUError(cudaError_t errCode, int line){
if(errCode != cudaSuccess){
printf("\n");
printf("Error at %d: %s\n", line, cudaGetErrorString(errCode));
}
return;
}
int main(){
Matrix *a, *b, *c;
Matrix *a_hgpu, *b_hgpu;
a = (Matrix*)malloc(sizeof(Matrix));
b = (Matrix*)malloc(sizeof(Matrix));
c = (Matrix*)malloc(sizeof(Matrix));
if(a == NULL || b == NULL || c == NULL){
fprintf(stderr, "Failed to allocate memory on CPU");
}
/*
* Initialilizing the matrix parameters
*/
a -> rows = b -> columns = 2;
a -> columns = b -> rows = 3;
a -> matrix = (float*)malloc(sizeof(float) * a -> rows * a -> columns);
b -> matrix = (float*)malloc(sizeof(float) * b -> rows * b -> columns);
fillMatrix(a);
fillMatrix(b);
printMatrix(a);
printMatrix(b);
prepareHostGPUCopy(&a_hgpu, a);
prepareHostGPUCopy(&b_hgpu, b);
kronProd(a_hgpu, b_hgpu, &c);
retrieveResult(&c);
printMatrix(c);
cudaFree(a_hgpu -> matrix);
cudaFree(a_hgpu -> position);
cudaFree(b_hgpu -> matrix);
cudaFree(b_hgpu -> position);
cudaFree(a_hgpu);
cudaFree(b_hgpu);
free(a);
free(b);
free(c);
return(0);
}
void fillMatrix(Matrix *m){
unsigned long long int i, elements, count = 0;
float a = 5.0;
elements = m -> rows * m -> columns;
srand((unsigned int)time(NULL));
for(i = 0; i < elements;i++){
printf("%f\t", m -> matrix[i] = ((float)rand()/(float)(RAND_MAX)) * a);
if(m -> matrix[i] != 0)
count++;
if((i + 1) % m -> columns == 0)
printf("\n");
}
printf("\n");
m -> sparseCount = count;
COOTheMatrix(m);
return;
}
void COOTheMatrix(Matrix *m){
unsigned long long int i, elements, count;
float *holding;
count = m -> sparseCount;
elements = m -> rows * m -> columns;
m -> position = (unsigned long long int*)malloc(sizeof(unsigned long long int) * count * 2);
m -> pitch = count * sizeof(unsigned long long int);
holding = (float*)malloc(sizeof(float) * count);
for(i = 0, count = 0;i < elements;i++){
if(m -> matrix[i] != 0){
m -> position[count] = i / (m -> columns);
m -> position[m -> sparseCount + count] = i % (m -> columns);
holding[count++] = m -> matrix[i];
}
}
free(m -> matrix);
m -> matrix = holding;
return;
}
void printMatrix(Matrix *m){
unsigned long long int i, elements, count;
unsigned long long int columns = m -> sparseCount;
elements = m -> rows * m -> columns;
printf("%llu\n", m -> columns);
for(i = 0, count = 0;i < elements && count <= m -> sparseCount;i++){
// if(m -> position[count] == (i / m -> columns) && m -> position[columns + count] == (i % m -> columns)){
printf("%f[%llu, %llu]\t", m -> matrix[count], m -> position[count], m -> position[columns + count]);
count++;
// }
// else
// printf("0\t");
if((i + 1) % m -> columns == 0)
printf("\n");
}
return;
}
void prepareHostGPUCopy(Matrix **m_hgpu, Matrix *m){
(*m_hgpu) = (Matrix*)malloc(sizeof(Matrix));
(*m_hgpu) -> rows = m -> rows;
(*m_hgpu) -> columns = m -> columns;
(*m_hgpu) -> sparseCount = m -> sparseCount;
checkError(cudaMalloc(&((*m_hgpu) -> matrix), m -> sparseCount * sizeof(float)));
checkError(cudaMemcpy((*m_hgpu) -> matrix, m -> matrix, m -> sparseCount * sizeof(float), cudaMemcpyHostToDevice));
checkError(cudaMallocPitch(&((*m_hgpu) -> position), &((*m_hgpu) -> pitch), m -> sparseCount * sizeof(unsigned long long int), 2));
checkError(cudaMemcpy2D((*m_hgpu) -> position, (*m_hgpu) -> pitch, m -> position, m -> pitch, m -> sparseCount * sizeof(unsigned long long int), 2, cudaMemcpyHostToDevice));
return;
}
void prepareGPUCopy(Matrix **m_gpu, Matrix **m_hgpu){
checkError(cudaMalloc(m_gpu, sizeof(Matrix)));
checkError(cudaMemcpy(*m_gpu, *m_hgpu, sizeof(Matrix), cudaMemcpyHostToDevice));
return;
}
void prepareResultHGPUCopy(Matrix **c, Matrix **m1, Matrix **m2){
*c = (Matrix*)malloc(sizeof(Matrix));
(*c) -> sparseCount = (*m1) -> sparseCount * (*m2) -> sparseCount;
(*c) -> rows = (*m1) -> rows * (*m2) -> rows;
(*c) -> columns = (*m1) -> columns * (*m2) -> columns;
//Pitch doesn't have to be declared here as this method returns Host GPU copy
//Which means that the pitch will be extracted from the cudaMallocPitch call
return;
}
void retrieveResult(Matrix **m){
Matrix *retVal;
retVal = (Matrix*)malloc(sizeof(Matrix));
retVal -> columns = (*m) -> columns;
retVal -> rows = (*m) -> rows;
retVal -> sparseCount = (*m) -> sparseCount;
retVal -> pitch = (*m) -> sparseCount * sizeof(unsigned long long int);
retVal -> matrix = (float*)malloc(sizeof(float) * (*m) -> sparseCount);
retVal -> position = (unsigned long long int*)malloc(sizeof(unsigned long long int) * (*m) -> sparseCount * 2);
checkError(cudaMemcpy(retVal -> matrix, (*m) -> matrix, (*m) -> sparseCount * sizeof(float), cudaMemcpyDeviceToHost));
checkError(cudaMemcpy2D(retVal -> position, retVal -> pitch, (*m) -> position, (*m) -> pitch, (*m) -> sparseCount * sizeof(unsigned long long int), 2, cudaMemcpyDeviceToHost));
//cudaFree((*m) -> matrix);
//cudaFree((*m) -> position);
//free(*m);
printf("Jio %llu\n", (*m) -> sparseCount);
*m = retVal;
return;
}
void kronProd(Matrix *a_hgpu, Matrix *b_hgpu, Matrix **c_hgpu){
Matrix *c_gpu, *a_gpu, *b_gpu;
prepareGPUCopy(&a_gpu, &a_hgpu);
prepareGPUCopy(&b_gpu, &b_hgpu);
//Initializing C_host
prepareResultHGPUCopy(c_hgpu, &a_hgpu, &b_hgpu);
checkError(cudaMalloc(&((*c_hgpu) -> matrix), (*c_hgpu) -> sparseCount * sizeof(float)));
checkError(cudaMallocPitch(&((*c_hgpu) -> position), &((*c_hgpu) -> pitch), (*c_hgpu) -> sparseCount * sizeof(unsigned long long int), 2));
prepareGPUCopy(&c_gpu, c_hgpu);
int cSparse = (*c_hgpu) -> sparseCount;
float blocks = ceil((float)cSparse / 512);
unsigned int numBlocks = ((unsigned int)blocks);
unsigned int threadsPerBlock = 512;
unsigned long long int *aH = (unsigned long long int*)malloc(sizeof(unsigned long long int) * cSparse);
unsigned long long int *aG;
checkError(cudaMalloc(&aG, sizeof(unsigned long long int) * cSparse));
multiply<<<numBlocks, threadsPerBlock>>>(a_gpu, b_gpu, c_gpu, aG);
checkError(cudaMemcpy(aH, aG, sizeof(unsigned long long int) * cSparse, cudaMemcpyDeviceToHost));
int i;
for(i = 0;i < 36;i++)
printf("aH[%d]: %llu\n", i, aH[i]);
cudaFree(c_gpu);
cudaFree(a_gpu);
cudaFree(b_gpu);
return;
}
__global__ void multiply(Matrix *a, Matrix *b, Matrix *c, unsigned long long int *aG){
unsigned long long int i, cDim;
i = blockIdx.x * blockDim.x + threadIdx.x;
cDim = a -> sparseCount * b -> sparseCount;
if(i < cDim){
unsigned long long int aIndex = (unsigned int)((double)i / b -> sparseCount);
unsigned long long int bIndex = i % b -> sparseCount;
c -> matrix[i] = a -> matrix[aIndex] * b -> matrix[bIndex];
unsigned long long int *cRow0 = (unsigned long long int*)((char*)c -> position); // + (0 * c -> pitch)
unsigned long long int *cRow1 = (unsigned long long int*)((char*)c -> position + (1 * c -> pitch));
unsigned long long int *aRow0 = (unsigned long long int*)((char*)a -> position); // + (0 * a -> pitch)
unsigned long long int *aRow1 = (unsigned long long int*)((char*)a -> position + (1 * a -> pitch));
unsigned long long int *bRow0 = (unsigned long long int*)((char*)b -> position); // + (0 * b -> pitch)
unsigned long long int *bRow1 = (unsigned long long int*)((char*)b -> position + (1 * b -> pitch));
cRow0[i] = aRow0[aIndex] * (b -> rows) + bRow0[bIndex];
cRow1[i] = aRow1[aIndex] * (b -> columns) + bRow1[bIndex];
aG[i] = aRow1[aIndex];
}
return;
}
|
cb8a4d87bc161eb7890d7273b9f15a4ac75aed00.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
__global__ void add(float * x, float *y, float * z, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride){
z[i] = x[i] + y[i];
}
}
int main(){
int deviceCount;
hipGetDeviceCount(&deviceCount);
for(int i=0;i<deviceCount;i++)
{
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
std::cout << "GPU device " << i << ": " << devProp.name << std::endl;
std::cout << " " << devProp.totalGlobalMem / 1024 / 1024 << "MB" << std::endl;
std::cout << "SM" << devProp.multiProcessorCount << std::endl;
std::cout << "" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl;
std::cout << "" << devProp.maxThreadsPerBlock << std::endl;
std::cout << "Block32 " << devProp.regsPerBlock << std::endl;
std::cout << "EM" << devProp.maxThreadsPerMultiProcessor << std::endl;
std::cout << "EM" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl;
std::cout << " " << devProp.multiProcessorCount << std::endl;
std::cout << "======================================================" << std::endl;
}
int N = 1 << 20;
int nBytes = N * sizeof (float);
float *x, *y, *z;
// CPU
x = (float*)malloc(nBytes);
y = (float*)malloc(nBytes);
z = (float*)malloc(nBytes);
//
for (int i = 0; i < N; i++){
x[i] = 10.0;
y[i] = 20.0;
}
float *d_x, *d_y, *d_z;
// GPU
hipMalloc((void**)&d_x, nBytes);
hipMalloc((void**)&d_y, nBytes);
hipMalloc((void**)&d_z, nBytes);
// CPUGPU
hipMemcpy((void*)d_x, (void*)x, nBytes, hipMemcpyHostToDevice);
hipMemcpy((void*)d_y, (void*)y, nBytes, hipMemcpyHostToDevice);
// kernelN/256blockblock256
dim3 blockSize(256);
// 4096
dim3 gridSize((N + blockSize.x - 1) / blockSize.x);
// kernel
add << < gridSize, blockSize >> >(d_x, d_y, d_z, N);
// GPUCPU
hipMemcpy((void*)z, (void*)d_z, nBytes, hipMemcpyDeviceToHost);
//
float maxError = 0.0;
for (int i = 0; i < N; i++){
maxError = fmax(maxError, (float)(fabs(z[i] - 30.0)));
}
printf ("test max default: %.4f\n", maxError);
// CPUGPU
hipFree(d_x);
hipFree(d_y);
hipFree(d_z);
free(x);
free(y);
free(z);
return 0;
} | cb8a4d87bc161eb7890d7273b9f15a4ac75aed00.cu | #include <stdio.h>
#include <iostream>
__global__ void add(float * x, float *y, float * z, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride){
z[i] = x[i] + y[i];
}
}
int main(){
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for(int i=0;i<deviceCount;i++)
{
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
std::cout << "使用GPU device " << i << ": " << devProp.name << std::endl;
std::cout << "设备全局内存总量: " << devProp.totalGlobalMem / 1024 / 1024 << "MB" << std::endl;
std::cout << "SM的数量:" << devProp.multiProcessorCount << std::endl;
std::cout << "每个线程块的共享内存大小:" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl;
std::cout << "每个线程块的最大线程数:" << devProp.maxThreadsPerBlock << std::endl;
std::cout << "设备上一个线程块(Block)种可用的32位寄存器数量: " << devProp.regsPerBlock << std::endl;
std::cout << "每个EM的最大线程数:" << devProp.maxThreadsPerMultiProcessor << std::endl;
std::cout << "每个EM的最大线程束数:" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl;
std::cout << "设备上多处理器的数量: " << devProp.multiProcessorCount << std::endl;
std::cout << "======================================================" << std::endl;
}
int N = 1 << 20;
int nBytes = N * sizeof (float);
float *x, *y, *z;
// CPU端分配内存
x = (float*)malloc(nBytes);
y = (float*)malloc(nBytes);
z = (float*)malloc(nBytes);
// 初始化数组
for (int i = 0; i < N; i++){
x[i] = 10.0;
y[i] = 20.0;
}
float *d_x, *d_y, *d_z;
// GPU端分配内存
cudaMalloc((void**)&d_x, nBytes);
cudaMalloc((void**)&d_y, nBytes);
cudaMalloc((void**)&d_z, nBytes);
// CPU的数据拷贝到GPU端
cudaMemcpy((void*)d_x, (void*)x, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_y, (void*)y, nBytes, cudaMemcpyHostToDevice);
// 定义kernel执行配置,(N/256)个block,每个block里面有256个线程
dim3 blockSize(256);
// 4096
dim3 gridSize((N + blockSize.x - 1) / blockSize.x);
// 执行kernel
add << < gridSize, blockSize >> >(d_x, d_y, d_z, N);
// 将在GPU端计算好的结果拷贝回CPU端
cudaMemcpy((void*)z, (void*)d_z, nBytes, cudaMemcpyDeviceToHost);
// 校验误差
float maxError = 0.0;
for (int i = 0; i < N; i++){
maxError = fmax(maxError, (float)(fabs(z[i] - 30.0)));
}
printf ("test max default: %.4f\n", maxError);
// 释放CPU端、GPU端的内存
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
free(x);
free(y);
free(z);
return 0;
} |
5dc6a368707f395a886f2f7cae25fdf032114841.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Defines the basic matrix operations for the AIJ (compressed row)
matrix storage format using the CUSPARSE library,
*/
#define PETSC_SKIP_SPINLOCK
#define PETSC_SKIP_CXX_COMPLEX_FIX
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <petscconf.h>
#include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/sbaij/seq/sbaij.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <petsc/private/vecimpl.h>
#undef VecType
#include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h>
#include <thrust/async/for_each.h>
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
#include <hip/hip_cooperative_groups.h>
#endif
const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0};
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* The following are copied from hipsparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in
0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them.
typedef enum {
HIPSPARSE_MV_ALG_DEFAULT = 0,
HIPSPARSE_COOMV_ALG = 1,
HIPSPARSE_CSRMV_ALG1 = 2,
HIPSPARSE_CSRMV_ALG2 = 3
} hipsparseSpMVAlg_t;
typedef enum {
HIPSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0,
HIPSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG1) = 1,
HIPSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG2) = 2,
HIPSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3,
HIPSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_CSRMM_ALG1) = 4,
CUSPARSE_SPMM_ALG_DEFAULT = 0,
HIPSPARSE_SPMM_COO_ALG1 = 1,
HIPSPARSE_SPMM_COO_ALG2 = 2,
CUSPARSE_SPMM_COO_ALG3 = 3,
CUSPARSE_SPMM_COO_ALG4 = 5,
HIPSPARSE_CSRMM_ALG1 = 4,
CUSPARSE_SPMM_CSR_ALG2 = 6,
} hipsparseSpMMAlg_t;
typedef enum {
HIPSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministc
HIPSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministc
} hipsparseCsr2CscAlg_t;
*/
const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT","COOMV_ALG", "CSRMV_ALG1","CSRMV_ALG2", "hipsparseSpMVAlg_t","CUSPARSE_",0};
const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT","COO_ALG1","COO_ALG2","COO_ALG3","CSR_ALG1","COO_ALG4","CSR_ALG2","hipsparseSpMMAlg_t","CUSPARSE_SPMM_",0};
const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID"/*cusparse does not have enum 0! We created one*/,"ALG1","ALG2","hipsparseCsr2CscAlg_t","CUSPARSE_CSR2CSC_",0};
#endif
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat);
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat,PetscScalar,Mat,MatStructure);
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat,PetscScalar);
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec,PetscBool,PetscBool);
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors**);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**);
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**);
static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat,PetscBool);
PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]);
PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode);
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],PetscScalar[]);
PetscErrorCode MatCUSPARSESetStream(Mat A,const hipStream_t stream)
{
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
cusparsestruct->stream = stream;
stat = hipsparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSESetHandle(Mat A,const hipsparseHandle_t handle)
{
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
if (cusparsestruct->handle != handle) {
if (cusparsestruct->handle) {
stat = hipsparseDestroy(cusparsestruct->handle);CHKERRCUSPARSE(stat);
}
cusparsestruct->handle = handle;
}
stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSEClearHandle(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscBool flg;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg || !cusparsestruct) PetscFunctionReturn(0);
if (cusparsestruct->handle) cusparsestruct->handle = 0;
PetscFunctionReturn(0);
}
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat A,MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSE;
PetscFunctionReturn(0);
}
/*MC
MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices
on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported
algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer
performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the
CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these
algorithms are not recommended. This class does NOT support direct solver operations.
Level: beginner
.seealso: PCFactorSetMatSolverType(), MatSolverType, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B)
{
PetscErrorCode ierr;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr);
ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr);
(*B)->factortype = ftype;
(*B)->useordering = PETSC_TRUE;
ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) {
ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr);
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE;
} else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) {
(*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE;
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types");
ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSPARSE_MULT:
cusparsestruct->format = format;
break;
case MAT_CUSPARSE_ALL:
cusparsestruct->format = format;
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op);
}
PetscFunctionReturn(0);
}
/*@
MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular
operation. Only the MatMult operation can use different GPU storage formats
for MPIAIJCUSPARSE matrices.
Not Collective
Input Parameters:
+ A - Matrix of type SEQAIJCUSPARSE
. op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL.
- format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2)
Output Parameter:
Level: intermediate
.seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
@*/
PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID,1);
ierr = PetscTryMethod(A,"MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A,MatOption op,PetscBool flg)
{
PetscErrorCode ierr;
PetscFunctionBegin;
switch (op) {
case MAT_FORM_EXPLICIT_TRANSPOSE:
/* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */
if (A->form_explicit_transpose && !flg) {ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);}
A->form_explicit_transpose = flg;
break;
default:
ierr = MatSetOption_SeqAIJ(A,op,flg);CHKERRQ(ierr);
break;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS isrow = b->row,iscol = b->col;
PetscBool row_identity,col_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (row_identity && col_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A)
{
PetscErrorCode ierr;
MatCUSPARSEStorageFormat format;
PetscBool flg;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_NONE) {
ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr);}
ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
ierr = PetscOptionsEnum("-mat_cusparse_spmv_alg","sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)",
"hipsparseSpMVAlg_t",MatCUSPARSESpMVAlgorithms,(PetscEnum)cusparsestruct->spmvAlg,(PetscEnum*)&cusparsestruct->spmvAlg,&flg);CHKERRQ(ierr);
/* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */
if (flg && HIPSPARSE_CSRMV_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
ierr = PetscOptionsEnum("-mat_cusparse_spmm_alg","sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)",
"hipsparseSpMMAlg_t",MatCUSPARSESpMMAlgorithms,(PetscEnum)cusparsestruct->spmmAlg,(PetscEnum*)&cusparsestruct->spmmAlg,&flg);CHKERRQ(ierr);
if (flg && HIPSPARSE_CSRMM_ALG1 != 4) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMMAlg_t has been changed but PETSc has not been updated accordingly");
ierr = PetscOptionsEnum("-mat_cusparse_csr2csc_alg","sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices",
"hipsparseCsr2CscAlg_t",MatCUSPARSECsr2CscAlgorithms,(PetscEnum)cusparsestruct->csr2cscAlg,(PetscEnum*)&cusparsestruct->csr2cscAlg,&flg);CHKERRQ(ierr);
if (flg && HIPSPARSE_CSR2CSC_ALG1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly");
#endif
}
ierr = PetscOptionsTail();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
hipsparseStatus_t stat;
const PetscInt *ai = a->i,*aj = a->j,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiLo, *AjLo;
PetscInt i,nz, nzLower, offset, rowOffset;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */
nzLower=n+ai[n]-ai[1];
if (!loTriFactor) {
PetscScalar *AALo;
cerr = hipHostMalloc((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the lower triangular matrix */
cerr = hipHostMalloc((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the lower triangular matrix */
AiLo[0] = (PetscInt) 0;
AiLo[n] = nzLower;
AjLo[0] = (PetscInt) 0;
AALo[0] = (MatScalar) 1.0;
v = aa;
vi = aj;
offset = 1;
rowOffset= 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
/* additional 1 for the term on the diagonal */
AiLo[i] = rowOffset;
rowOffset += nz+1;
ierr = PetscArraycpy(&(AjLo[offset]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AALo[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
AjLo[offset] = (PetscInt) i;
AALo[offset] = (MatScalar) 1.0;
offset += 1;
v += nz;
vi += nz;
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_LOWER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = n;
loTriFactor->csrMat->num_cols = n;
loTriFactor->csrMat->num_entries = nzLower;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower);
loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower);
loTriFactor->csrMat->values = new THRUSTARRAY(nzLower);
loTriFactor->csrMat->values->assign(AALo, AALo+nzLower);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
loTriFactor->AA_h = AALo;
cerr = hipHostFree(AiLo);CHKERRCUDA(cerr);
cerr = hipHostFree(AjLo);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzLower)*sizeof(int)+nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
} else { /* update values only */
if (!loTriFactor->AA_h) {
cerr = hipHostMalloc((void**) &loTriFactor->AA_h, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the lower triangular matrix */
loTriFactor->AA_h[0] = 1.0;
v = aa;
vi = aj;
offset = 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
ierr = PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
loTriFactor->AA_h[offset] = 1.0;
offset += 1;
v += nz;
}
loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h+nzLower);
ierr = PetscLogCpuToGpu(nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
hipsparseStatus_t stat;
const PetscInt *aj = a->j,*adiag = a->diag,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiUp, *AjUp;
PetscInt i,nz, nzUpper, offset;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* next, figure out the number of nonzeros in the upper triangular matrix. */
nzUpper = adiag[0]-adiag[n];
if (!upTriFactor) {
PetscScalar *AAUp;
cerr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the upper triangular matrix */
cerr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
vi = aj + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1./v[nz];
AiUp[i] = AiUp[i+1] - (nz+1);
ierr = PetscArraycpy(&(AjUp[offset+1]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset+1]), v, nz);CHKERRQ(ierr);
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = n;
upTriFactor->csrMat->num_cols = n;
upTriFactor->csrMat->num_entries = nzUpper;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper);
upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper);
upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
upTriFactor->AA_h = AAUp;
cerr = hipHostFree(AiUp);CHKERRCUDA(cerr);
cerr = hipHostFree(AjUp);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzUpper)*sizeof(int)+nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
} else {
if (!upTriFactor->AA_h) {
cerr = hipHostMalloc((void**) &upTriFactor->AA_h, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the upper triangular matrix */
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
upTriFactor->AA_h[offset] = 1./v[nz];
ierr = PetscArraycpy(&(upTriFactor->AA_h[offset+1]), v, nz);CHKERRQ(ierr);
}
upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h+nzUpper);
ierr = PetscLogCpuToGpu(nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS isrow = a->row,iscol = a->icol;
PetscBool row_identity,col_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=a->nz;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
if (!row_identity && !cusparseTriFactors->rpermIndices) {
const PetscInt *r;
ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r+n);
ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
/* upper triangular indices */
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (!col_identity && !cusparseTriFactors->cpermIndices) {
const PetscInt *c;
ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c+n);
ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
hipsparseStatus_t stat;
PetscErrorCode ierr;
hipError_t cerr;
PetscInt *AiUp, *AjUp;
PetscScalar *AAUp;
PetscScalar *AALo;
PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j;
Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data;
const PetscInt *ai = b->i,*aj = b->j,*vj;
const MatScalar *aa = b->a,*v;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
cerr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
if (!upTriFactor && !loTriFactor) {
/* Allocate Space for the upper triangular matrix */
cerr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
vj = aj + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1.0/v[nz];
AiUp[i] = offset;
AALo[offset] = (MatScalar)1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AjUp[offset]), vj, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = A->rmap->n;
upTriFactor->csrMat->num_cols = A->cmap->n;
upTriFactor->csrMat->num_entries = a->nz;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
upTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
/* set the operation */
upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = HIPSPARSE_OPERATION_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = A->rmap->n;
loTriFactor->csrMat->num_cols = A->cmap->n;
loTriFactor->csrMat->num_entries = a->nz;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
loTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
ierr = PetscLogCpuToGpu(2*(((A->rmap->n+1)+(a->nz))*sizeof(int)+(a->nz)*sizeof(PetscScalar)));CHKERRQ(ierr);
cerr = hipHostFree(AiUp);CHKERRCUDA(cerr);
cerr = hipHostFree(AjUp);CHKERRCUDA(cerr);
} else {
/* Fill the upper triangular matrix */
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AAUp[offset] = 1.0/v[nz];
AALo[offset] = 1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
if (!upTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
if (!loTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
ierr = PetscLogCpuToGpu(2*(a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
}
cerr = hipHostFree(AAUp);CHKERRCUDA(cerr);
cerr = hipHostFree(AALo);CHKERRCUDA(cerr);
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS ip = a->row;
PetscBool perm_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=(a->nz-n)*2 + n;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (!perm_identity) {
IS iip;
const PetscInt *irip,*rip;
ierr = ISInvertPermutation(ip,PETSC_DECIDE,&iip);CHKERRQ(ierr);
ierr = ISGetIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(rip, rip+n);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(irip, irip+n);
ierr = ISRestoreIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISDestroy(&iip);CHKERRQ(ierr);
ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#define CHECK_LAUNCH_ERROR() \
do { \
/* Check synchronous errors, i.e. pre-launch */ \
hipError_t err = hipGetLastError(); \
if (hipSuccess != err) { \
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",hipGetErrorString(err)); \
} \
/* Check asynchronous errors, i.e. kernel failed (ULF) */ \
err = hipDeviceSynchronize(); \
if (hipSuccess != err) { \
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",hipGetErrorString(err)); \
} \
} while (0)
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS ip = b->row;
PetscBool perm_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (perm_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT;
hipsparseStatus_t stat;
hipsparseIndexBase_t indexBase;
hipsparseMatrixType_t matrixType;
hipsparseFillMode_t fillMode;
hipsparseDiagType_t diagType;
hipError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
/* allocate space for the transpose of the lower triangular factor */
ierr = PetscNew(&loTriFactorT);CHKERRQ(ierr);
loTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the lower triangular factor */
matrixType = cusparseGetMatType(loTriFactor->descr);
indexBase = cusparseGetMatIndexBase(loTriFactor->descr);
fillMode = cusparseGetMatFillMode(loTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ?
HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(loTriFactor->descr);
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the lower triangular factor*/
loTriFactorT->csrMat = new CsrMatrix;
loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols;
loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows;
loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries;
loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows+1);
loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries);
loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries);
/* compute the transpose of the lower triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
HIPSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactor->csr2cscBuffer,loTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC, indexBase,
HIPSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer
#else
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo,
&loTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactorT->solveBuffer,loTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT;
/*********************************************/
/* Now the Transpose of the Upper Tri Factor */
/*********************************************/
/* allocate space for the transpose of the upper triangular factor */
ierr = PetscNew(&upTriFactorT);CHKERRQ(ierr);
upTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the upper triangular factor */
matrixType = cusparseGetMatType(upTriFactor->descr);
indexBase = cusparseGetMatIndexBase(upTriFactor->descr);
fillMode = cusparseGetMatFillMode(upTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ?
HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(upTriFactor->descr);
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the upper triangular factor*/
upTriFactorT->csrMat = new CsrMatrix;
upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols;
upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows;
upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries;
upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows+1);
upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries);
upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries);
/* compute the transpose of the upper triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle,upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
HIPSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactor->csr2cscBuffer,upTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC, indexBase,
HIPSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer
#else
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo,
&upTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactorT->solveBuffer,upTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT;
PetscFunctionReturn(0);
}
struct PetscScalarToPetscInt
{
__host__ __device__
PetscInt operator()(PetscScalar s)
{
return (PetscInt)PetscRealPart(s);
}
};
static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTransposeForMult(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
hipsparseStatus_t stat;
hipsparseIndexBase_t indexBase;
hipError_t err;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!A->form_explicit_transpose || !A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
if (!matstruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing mat struct");
matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
if (A->transupdated && !matstructT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing matTranspose struct");
if (A->transupdated) PetscFunctionReturn(0);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
if (cusparsestruct->format != MAT_CUSPARSE_CSR) {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
}
if (!cusparsestruct->matTranspose) { /* create cusparse matrix */
matstructT = new Mat_SeqAIJCUSPARSEMultStruct;
stat = hipsparseCreateMatDescr(&matstructT->descr);CHKERRCUSPARSE(stat);
indexBase = cusparseGetMatIndexBase(matstruct->descr);
stat = hipsparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(matstructT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
/* set alpha and beta */
err = hipMalloc((void **)&(matstructT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstructT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMemcpy(matstructT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstructT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *matrixT = new CsrMatrix;
matstructT->mat = matrixT;
matrixT->num_rows = A->cmap->n;
matrixT->num_cols = A->rmap->n;
matrixT->num_entries = a->nz;
matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows+1);
matrixT->column_indices = new THRUSTINTARRAY32(a->nz);
matrixT->values = new THRUSTARRAY(a->nz);
if (!cusparsestruct->rowoffsets_gpu) { cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n+1); }
cusparsestruct->rowoffsets_gpu->assign(a->i,a->i+A->rmap->n+1);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCreateCsr(&matstructT->matDescr,
matrixT->num_rows, matrixT->num_cols, matrixT->num_entries,
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(),
matrixT->values->data().get(),
HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */
indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
} else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *temp = new CsrMatrix;
CsrMatrix *tempT = new CsrMatrix;
/* First convert HYB to CSR */
temp->num_rows = A->rmap->n;
temp->num_cols = A->cmap->n;
temp->num_entries = a->nz;
temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
temp->column_indices = new THRUSTINTARRAY32(a->nz);
temp->values = new THRUSTARRAY(a->nz);
stat = cusparse_hyb2csr(cusparsestruct->handle,
matstruct->descr, (cusparseHybMat_t)matstruct->mat,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get());CHKERRCUSPARSE(stat);
/* Next, convert CSR to CSC (i.e. the matrix transpose) */
tempT->num_rows = A->rmap->n;
tempT->num_cols = A->cmap->n;
tempT->num_entries = a->nz;
tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
tempT->column_indices = new THRUSTINTARRAY32(a->nz);
tempT->values = new THRUSTARRAY(a->nz);
stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows,
temp->num_cols, temp->num_entries,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get(),
tempT->values->data().get(),
tempT->column_indices->data().get(),
tempT->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
/* Last, convert CSC to HYB */
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n,
matstructT->descr, tempT->values->data().get(),
tempT->row_offsets->data().get(),
tempT->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstructT->mat = hybMat;
A->transupdated = PETSC_TRUE;
/* delete temporaries */
if (tempT) {
if (tempT->values) delete (THRUSTARRAY*) tempT->values;
if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices;
if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets;
delete (CsrMatrix*) tempT;
}
if (temp) {
if (temp->values) delete (THRUSTARRAY*) temp->values;
if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices;
if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets;
delete (CsrMatrix*) temp;
}
#endif
}
}
if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */
CsrMatrix *matrix = (CsrMatrix*)matstruct->mat;
CsrMatrix *matrixT = (CsrMatrix*)matstructT->mat;
if (!matrix) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix");
if (!matrix->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix rows");
if (!matrix->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix cols");
if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix values");
if (!matrixT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT");
if (!matrixT->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT rows");
if (!matrixT->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT cols");
if (!matrixT->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT values");
if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */
cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusparsestruct->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
if (!cusparsestruct->csr2csc_i) {
THRUSTARRAY csr2csc_a(matrix->num_entries);
PetscStackCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0));
indexBase = cusparseGetMatIndexBase(matstruct->descr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
void *csr2cscBuffer;
size_t csr2cscBufferSize;
stat = hipsparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n,
A->cmap->n, matrix->num_entries,
matrix->values->data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, &csr2cscBufferSize);CHKERRCUSPARSE(stat);
err = hipMalloc(&csr2cscBuffer,csr2cscBufferSize);CHKERRCUDA(err);
#endif
if (matrix->num_entries) {
/* When there are no nonzeros, this routine mistakenly returns HIPSPARSE_STATUS_INVALID_VALUE in
mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK.
I checked every parameters and they were just fine. I have no clue why cusparse complains.
Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[]
should be filled with indexBase. So I just take a shortcut here.
*/
stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n,
A->cmap->n,matrix->num_entries,
csr2csc_a.data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, csr2cscBuffer);CHKERRCUSPARSE(stat);
#else
matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
#endif
} else {
matrixT->row_offsets->assign(matrixT->row_offsets->size(),indexBase);
}
cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries);
PetscStackCallThrust(thrust::transform(thrust::device,matrixT->values->begin(),matrixT->values->end(),cusparsestruct->csr2csc_i->begin(),PetscScalarToPetscInt()));
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
err = hipFree(csr2cscBuffer);CHKERRCUDA(err);
#endif
}
PetscStackCallThrust(thrust::copy(thrust::device,thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()),
thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()),
matrixT->values->begin()));
}
ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* the compressed row indices is not used for matTranspose */
matstructT->cprowIndices = NULL;
/* assign the pointer */
((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT;
A->transupdated = PETSC_TRUE;
PetscFunctionReturn(0);
}
/* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = HIPSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
PetscInt n = xx->map->n;
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()),
xGPU);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
xarray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()),
tempGPU->begin());
/* Copy the temporary to the full solution. */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),tempGPU->begin(), tempGPU->end(), xGPU);
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
barray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()),
tempGPU->begin());
/* Next, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,
xarray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Last, reorder with the column permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()),
xGPU);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
barray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Next, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
hipError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_GPU) {
CsrMatrix *matrix = (CsrMatrix*)cusp->mat->mat;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
cerr = hipMemcpy(a->a, matrix->values->data().get(), a->nz*sizeof(PetscScalar), hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuToCpu(a->nz*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[])
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
*array = a->a;
A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt m = A->rmap->n,*ii,*ridx,tmp;
PetscErrorCode ierr;
hipsparseStatus_t stat;
PetscBool both = PETSC_TRUE;
hipError_t err;
PetscFunctionBegin;
if (A->boundtocpu) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cannot copy to GPU");
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */
CsrMatrix *matrix;
matrix = (CsrMatrix*)cusparsestruct->mat->mat;
if (a->nz && !a->a) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR values");
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
matrix->values->assign(a->a, a->a+a->nz);
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogCpuToGpu((a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
} else {
PetscInt nnz;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat,cusparsestruct->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
delete cusparsestruct->workVector;
delete cusparsestruct->rowoffsets_gpu;
cusparsestruct->workVector = NULL;
cusparsestruct->rowoffsets_gpu = NULL;
try {
if (a->compressedrow.use) {
m = a->compressedrow.nrows;
ii = a->compressedrow.i;
ridx = a->compressedrow.rindex;
} else {
m = A->rmap->n;
ii = a->i;
ridx = NULL;
}
if (!ii) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR row data");
if (m && !a->j) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR column data");
if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; }
else nnz = a->nz;
/* create cusparse matrix */
cusparsestruct->nrows = m;
matstruct = new Mat_SeqAIJCUSPARSEMultStruct;
stat = hipsparseCreateMatDescr(&matstruct->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(matstruct->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(matstruct->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
err = hipMalloc((void **)&(matstruct->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstruct->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMemcpy(matstruct->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstruct->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* Build a hybrid/ellpack matrix if this option is chosen for the storage */
if (cusparsestruct->format==MAT_CUSPARSE_CSR) {
/* set the matrix */
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
/* assign the pointer */
matstruct->mat = mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mat->num_rows) { /* cusparse errors on empty matrices! */
stat = hipsparseCreateCsr(&matstruct->matDescr,
mat->num_rows, mat->num_cols, mat->num_entries,
mat->row_offsets->data().get(), mat->column_indices->data().get(),
mat->values->data().get(),
HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
HIPSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
#endif
} else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols,
matstruct->descr, mat->values->data().get(),
mat->row_offsets->data().get(),
mat->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstruct->mat = hybMat;
if (mat) {
if (mat->values) delete (THRUSTARRAY*)mat->values;
if (mat->column_indices) delete (THRUSTINTARRAY32*)mat->column_indices;
if (mat->row_offsets) delete (THRUSTINTARRAY32*)mat->row_offsets;
delete (CsrMatrix*)mat;
}
#endif
}
/* assign the compressed row indices */
if (a->compressedrow.use) {
cusparsestruct->workVector = new THRUSTARRAY(m);
matstruct->cprowIndices = new THRUSTINTARRAY(m);
matstruct->cprowIndices->assign(ridx,ridx+m);
tmp = m;
} else {
cusparsestruct->workVector = NULL;
matstruct->cprowIndices = NULL;
tmp = 0;
}
ierr = PetscLogCpuToGpu(((m+1)+(a->nz))*sizeof(int)+tmp*sizeof(PetscInt)+(3+(a->nz))*sizeof(PetscScalar));CHKERRQ(ierr);
/* assign the pointer */
cusparsestruct->mat = matstruct;
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
cusparsestruct->nonzerostate = A->nonzerostate;
}
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
struct VecCUDAPlusEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t);
}
};
struct VecCUDAEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<0>(t);
}
};
struct VecCUDAEqualsReverse
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<0>(t) = thrust::get<1>(t);
}
};
struct MatMatCusparse {
PetscBool cisdense;
PetscScalar *Bt;
Mat X;
PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */
PetscLogDouble flops;
CsrMatrix *Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseSpMatDescr_t matSpBDescr;
PetscBool initialized; /* C = alpha op(A) op(B) + beta C */
hipsparseDnMatDescr_t matBDescr;
hipsparseDnMatDescr_t matCDescr;
PetscInt Blda,Clda; /* Record leading dimensions of B and C here to detect changes*/
size_t mmBufferSize;
void *mmBuffer;
void *mmBuffer2; /* SpGEMM WorkEstimation buffer */
hipsparseSpGEMMDescr_t spgemmDesc;
#endif
};
static PetscErrorCode MatDestroy_MatMatCusparse(void *data)
{
PetscErrorCode ierr;
MatMatCusparse *mmdata = (MatMatCusparse *)data;
hipError_t cerr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseStatus_t stat;
#endif
PetscFunctionBegin;
cerr = hipFree(mmdata->Bt);CHKERRCUDA(cerr);
delete mmdata->Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mmdata->matSpBDescr) { stat = hipsparseDestroySpMat(mmdata->matSpBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->mmBuffer) { cerr = hipFree(mmdata->mmBuffer);CHKERRCUDA(cerr); }
if (mmdata->mmBuffer2) { cerr = hipFree(mmdata->mmBuffer2);CHKERRCUDA(cerr); }
if (mmdata->matBDescr) { stat = hipsparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->matCDescr) { stat = hipsparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); }
if (mmdata->spgemmDesc) { stat = hipsparseSpGEMM_destroyDescr(mmdata->spgemmDesc);CHKERRCUSPARSE(stat); }
#endif
ierr = MatDestroy(&mmdata->X);CHKERRQ(ierr);
ierr = PetscFree(data);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat,Mat,Mat,PetscBool,PetscBool);
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n,blda,clda;
PetscBool flg,biscuda;
Mat_SeqAIJCUSPARSE *cusp;
hipsparseStatus_t stat;
hipsparseOperation_t opA;
const PetscScalar *barray;
PetscScalar *carray;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSEMultStruct *mat;
CsrMatrix *csrmat;
hipError_t cerr;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
mmdata = (MatMatCusparse*)product->data;
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
/* currently CopyToGpu does not copy if the matrix is bound to CPU
Instead of silently accepting the wrong answer, I prefer to raise the error */
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_PtAP:
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
if (!A->form_explicit_transpose) {
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_TRANSPOSE;
} else {
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
mat = cusp->matTranspose;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
}
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
case MATPRODUCT_RARt:
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
if (!mat) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csrmat = (CsrMatrix*)mat->mat;
/* if the user passed a CPU matrix, copy the data to the GPU */
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr);
if (!biscuda) {ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);}
ierr = MatDenseCUDAGetArrayRead(B,&barray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDAGetArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(mmdata->X,&clda);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDAGetArrayWrite(C,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* (re)allcoate mmBuffer if not initialized or LDAs are different */
if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) {
size_t mmBufferSize;
if (mmdata->initialized && mmdata->Blda != blda) {stat = hipsparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); mmdata->matBDescr = NULL;}
if (!mmdata->matBDescr) {
stat = hipsparseCreateDnMat(&mmdata->matBDescr,B->rmap->n,B->cmap->n,blda,(void*)barray,cusparse_scalartype,HIPSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Blda = blda;
}
if (mmdata->initialized && mmdata->Clda != clda) {stat = hipsparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); mmdata->matCDescr = NULL;}
if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */
stat = hipsparseCreateDnMat(&mmdata->matCDescr,m,n,clda,(void*)carray,cusparse_scalartype,HIPSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Clda = clda;
}
if (!mat->matDescr) {
stat = hipsparseCreateCsr(&mat->matDescr,
csrmat->num_rows, csrmat->num_cols, csrmat->num_entries,
csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(),
csrmat->values->data().get(),
HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
HIPSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
stat = hipsparseSpMM_bufferSize(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,&mmBufferSize);CHKERRCUSPARSE(stat);
if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) {
cerr = hipFree(mmdata->mmBuffer);CHKERRCUDA(cerr);
cerr = hipMalloc(&mmdata->mmBuffer,mmBufferSize);CHKERRCUDA(cerr);
mmdata->mmBufferSize = mmBufferSize;
}
mmdata->initialized = PETSC_TRUE;
} else {
/* to be safe, always update pointers of the mats */
stat = hipsparseSpMatSetValues(mat->matDescr,csrmat->values->data().get());CHKERRCUSPARSE(stat);
stat = hipsparseDnMatSetValues(mmdata->matBDescr,(void*)barray);CHKERRCUSPARSE(stat);
stat = hipsparseDnMatSetValues(mmdata->matCDescr,(void*)carray);CHKERRCUSPARSE(stat);
}
/* do hipsparseSpMM, which supports transpose on B */
stat = hipsparseSpMM(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,mmdata->mmBuffer);CHKERRCUSPARSE(stat);
#else
PetscInt k;
/* cusparseXcsrmm does not support transpose on B */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
hipblasHandle_t cublasv2handle;
hipblasStatus_t cerr;
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
cerr = cublasXgeam(cublasv2handle,HIPBLAS_OP_T,HIPBLAS_OP_T,
B->cmap->n,B->rmap->n,
&PETSC_CUSPARSE_ONE ,barray,blda,
&PETSC_CUSPARSE_ZERO,barray,blda,
mmdata->Bt,B->cmap->n);CHKERRCUBLAS(cerr);
blda = B->cmap->n;
k = B->cmap->n;
} else {
k = B->rmap->n;
}
/* perform the MatMat operation, op(A) is m x k, op(B) is k x n */
stat = cusparse_csr_spmm(cusp->handle,opA,m,n,k,
csrmat->num_entries,mat->alpha_one,mat->descr,
csrmat->values->data().get(),
csrmat->row_offsets->data().get(),
csrmat->column_indices->data().get(),
mmdata->Bt ? mmdata->Bt : barray,blda,mat->beta_zero,
carray,clda);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(n*2.0*csrmat->num_entries);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(B,&barray);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
} else if (product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDARestoreArrayWrite(C,&carray);CHKERRQ(ierr);
}
if (mmdata->cisdense) {
ierr = MatConvert(C,MATSEQDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr);
}
if (!biscuda) {
ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n;
PetscBool cisdense,flg;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSE *cusp;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (cusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
switch (product->type) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
break;
case MATPRODUCT_PtAP:
m = B->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_RARt:
m = B->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
/* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQDENSE,&cisdense);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQDENSECUDA);CHKERRQ(ierr);
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
mmdata->cisdense = cisdense;
#if PETSC_PKG_CUDA_VERSION_LT(11,0,0)
/* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
hipError_t cerr = hipMalloc((void**)&mmdata->Bt,(size_t)B->rmap->n*(size_t)B->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
#endif
/* for these products we need intermediate storage */
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatCreate(PetscObjectComm((PetscObject)C),&mmdata->X);CHKERRQ(ierr);
ierr = MatSetType(mmdata->X,MATSEQDENSECUDA);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->rmap->n,A->rmap->n,B->rmap->n);CHKERRQ(ierr);
} else {
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->cmap->n,A->rmap->n,B->cmap->n);CHKERRQ(ierr);
}
}
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscBool flg;
PetscErrorCode ierr;
hipsparseStatus_t stat;
hipError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseSpMatDescr_t BmatSpDescr;
#endif
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for C of type %s",((PetscObject)C)->type_name);
mmdata = (MatMatCusparse*)C->product->data;
A = product->A;
B = product->B;
if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */
mmdata->reusesym = PETSC_FALSE;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
Cmat = Ccusp->mat;
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[C->product->type]);
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct");
goto finalize;
}
if (!c->nz) goto finalize;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name);
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
if (B->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
switch (ptype) {
case MATPRODUCT_AB:
Amat = Acusp->mat;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_AtB:
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_ABt:
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
Cmat = Ccusp->mat;
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix*)Bmat->mat; /* B may be in compressed row storage */
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct");
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */
stat = hipsparseSpGEMM_compute(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
stat = hipsparseSpGEMM_copy(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#else
stat = cusparse_csr_spgemm(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
C->offloadmask = PETSC_OFFLOAD_GPU;
finalize:
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(C,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",C->rmap->n,C->cmap->n,c->nz);CHKERRQ(ierr);
ierr = PetscInfo(C,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(C,"Maximum nonzeros in any row is %D\n",c->rmax);CHKERRQ(ierr);
c->reallocs = 0;
C->info.mallocs += 0;
C->info.nz_unneeded = 0;
C->assembled = C->was_assembled = PETSC_TRUE;
C->num_ass++;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *a,*b,*c;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt i,j,m,n,k;
PetscBool flg;
PetscErrorCode ierr;
hipsparseStatus_t stat;
hipError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
PetscLogDouble flops;
PetscBool biscompressed,ciscompressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
int64_t C_num_rows1, C_num_cols1, C_nnz1;
size_t bufSize2;
hipsparseSpMatDescr_t BmatSpDescr;
#else
int cnz;
#endif
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name);
a = (Mat_SeqAIJ*)A->data;
b = (Mat_SeqAIJ*)B->data;
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
biscompressed = PETSC_FALSE;
ciscompressed = PETSC_FALSE;
switch (ptype) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
k = A->cmap->n;
Amat = Acusp->mat;
Bmat = Bcusp->mat;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
k = A->rmap->n;
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
k = A->cmap->n;
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr);
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
/* create cusparse matrix */
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)C->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
c->compressedrow.use = ciscompressed;
if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */
c->compressedrow.nrows = a->compressedrow.nrows;
ierr = PetscMalloc2(c->compressedrow.nrows+1,&c->compressedrow.i,c->compressedrow.nrows,&c->compressedrow.rindex);CHKERRQ(ierr);
ierr = PetscArraycpy(c->compressedrow.rindex,a->compressedrow.rindex,c->compressedrow.nrows);CHKERRQ(ierr);
Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices->assign(c->compressedrow.rindex,c->compressedrow.rindex + c->compressedrow.nrows);
} else {
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Cmat->cprowIndices = NULL;
}
Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = Ccusp->nrows;
Ccsr->num_cols = n;
Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows+1);
stat = hipsparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */
thrust::fill(thrust::device,Ccsr->row_offsets->begin(),Ccsr->row_offsets->end(),0);
c->nz = 0;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
goto finalizesym;
}
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
if (!biscompressed) {
Bcsr = (CsrMatrix*)Bmat->mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = Bmat->matDescr;
#endif
} else { /* we need to use row offsets for the full matrix */
CsrMatrix *cBcsr = (CsrMatrix*)Bmat->mat;
Bcsr = new CsrMatrix;
Bcsr->num_rows = B->rmap->n;
Bcsr->num_cols = cBcsr->num_cols;
Bcsr->num_entries = cBcsr->num_entries;
Bcsr->column_indices = cBcsr->column_indices;
Bcsr->values = cBcsr->values;
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Bcsr->row_offsets = Bcusp->rowoffsets_gpu;
mmdata->Bcsr = Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (Bcsr->num_rows && Bcsr->num_cols) {
stat = hipsparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries,
Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Bcsr->values->data().get(),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
}
BmatSpDescr = mmdata->matSpBDescr;
#endif
}
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct");
/* precompute flops count */
if (ptype == MATPRODUCT_AB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt st = a->i[i];
const PetscInt en = a->i[i+1];
for (j=st; j<en; j++) {
const PetscInt brow = a->j[j];
flops += 2.*(b->i[brow+1] - b->i[brow]);
}
}
} else if (ptype == MATPRODUCT_AtB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt anzi = a->i[i+1] - a->i[i];
const PetscInt bnzi = b->i[i+1] - b->i[i];
flops += (2.*anzi)*bnzi;
}
} else { /* TODO */
flops = 0.;
}
mmdata->flops = flops;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0,
NULL, NULL, NULL,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = hipsparseSpGEMM_createDescr(&mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
/* ask bufferSize bytes for external memory */
stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, NULL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void**) &mmdata->mmBuffer2, bufSize2);CHKERRCUDA(cerr);
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);CHKERRCUSPARSE(stat);
/* ask bufferSize again bytes for external memory */
stat = hipsparseSpGEMM_compute(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);CHKERRCUSPARSE(stat);
/* The CUSPARSE documentation is not clear, nor the API
We need both buffers to perform the operations properly!
mmdata->mmBuffer2 does not appear anywhere in the compute/copy API
it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address
is stored in the descriptor! What a messy API... */
cerr = hipMalloc((void**) &mmdata->mmBuffer, mmdata->mmBufferSize);CHKERRCUDA(cerr);
/* compute the intermediate product of A * B */
stat = hipsparseSpGEMM_compute(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
/* get matrix C non-zero entries C_nnz1 */
stat = hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat);
c->nz = (PetscInt) C_nnz1;
ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufSize2/1024,mmdata->mmBufferSize/1024);CHKERRQ(ierr);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(),
Ccsr->values->data().get());CHKERRCUSPARSE(stat);
stat = hipsparseSpGEMM_copy(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
stat = hipsparseXcsrgemmNnz(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);CHKERRCUSPARSE(stat);
c->nz = cnz;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only.
I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when
D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */
stat = cusparse_csr_spgemm(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalizesym:
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
PetscInt *d_i = c->i;
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = hipMemcpy(d_i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
PetscInt *d_i = c->i;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = hipMemcpy(d_i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (ciscompressed) { /* need to expand host row offsets */
PetscInt r = 0;
c->i[0] = 0;
for (k = 0; k < c->compressedrow.nrows; k++) {
const PetscInt next = c->compressedrow.rindex[k];
const PetscInt old = c->compressedrow.i[k];
for (; r < next; r++) c->i[r+1] = old;
}
for (; r < m; r++) c->i[r+1] = c->compressedrow.i[c->compressedrow.nrows];
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (k = 0; k < m; k++) {
const PetscInt nn = c->i[k+1] - c->i[k];
c->ilen[k] = c->imax[k] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
Ccsr->num_entries = c->nz;
C->nonzerostate++;
ierr = PetscLayoutSetUp(C->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(C->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = C->nonzerostate;
C->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
C->preallocated = PETSC_TRUE;
C->assembled = PETSC_FALSE;
C->was_assembled = PETSC_FALSE;
if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */
mmdata->reusesym = PETSC_TRUE;
C->offloadmask = PETSC_OFFLOAD_GPU;
}
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat);
/* handles sparse or dense B */
static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat)
{
Mat_Product *product = mat->product;
PetscErrorCode ierr;
PetscBool isdense = PETSC_FALSE,Biscusp = PETSC_FALSE,Ciscusp = PETSC_TRUE;
PetscFunctionBegin;
MatCheckProduct(mat,1);
ierr = PetscObjectBaseTypeCompare((PetscObject)product->B,MATSEQDENSE,&isdense);CHKERRQ(ierr);
if (!product->A->boundtocpu && !product->B->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->B,MATSEQAIJCUSPARSE,&Biscusp);CHKERRQ(ierr);
}
if (product->type == MATPRODUCT_ABC) {
Ciscusp = PETSC_FALSE;
if (!product->C->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->C,MATSEQAIJCUSPARSE,&Ciscusp);CHKERRQ(ierr);
}
}
if (isdense) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
if (product->A->boundtocpu) {
ierr = MatProductSetFromOptions_SeqAIJ_SeqDense(mat);CHKERRQ(ierr);
} else {
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA;
}
break;
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else if (Biscusp && Ciscusp) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
break;
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else { /* fallback for AIJ */
ierr = MatProductSetFromOptions_SeqAIJ(mat);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy, Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
__global__ static void ScatterAdd(PetscInt n, PetscInt *idx,const PetscScalar *x,PetscScalar *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[idx[i]] += x[i];
}
/* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans,PetscBool herm)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct;
PetscScalar *xarray,*zarray,*dptr,*beta,*xptr;
PetscErrorCode ierr;
hipError_t cerr;
hipsparseStatus_t stat;
hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
PetscBool compressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
PetscInt nx,ny;
#endif
PetscFunctionBegin;
if (herm && !trans) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Hermitian and not transpose not supported");
if (!a->nonzerorowcnt) {
if (!yy) {ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);}
else {ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);}
PetscFunctionReturn(0);
}
/* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!trans) {
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
if (!matstruct) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"SeqAIJCUSPARSE does not have a 'mat' (need to fix)");
} else {
if (herm || !A->form_explicit_transpose) {
opA = herm ? HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE;
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
} else {
if (!cusparsestruct->matTranspose) {ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);}
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
}
}
/* Does the matrix use compressed rows (i.e., drop zero rows)? */
compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE;
try {
ierr = VecCUDAGetArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);} /* read & write zz, so need to get uptodate zarray on GPU */
else {ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr);} /* write zz, so no need to init zarray on GPU */
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) {
/* z = A x + beta y.
If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax.
When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call.
*/
xptr = xarray;
dptr = compressed ? cusparsestruct->workVector->data().get() : zarray;
beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is
allocated to accommodate different uses. So we get the length info directly from mat.
*/
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_cols;
ny = mat->num_rows;
}
#endif
} else {
/* z = A^T x + beta y
If A is compressed, then we need a work vector as the shorter version of x to compute A^T x.
Note A^Tx is of full length, so we set beta to 1.0 if y exists.
*/
xptr = compressed ? cusparsestruct->workVector->data().get() : xarray;
dptr = zarray;
beta = yy ? matstruct->beta_one : matstruct->beta_zero;
if (compressed) { /* Scatter x to work vector */
thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray);
thrust::for_each(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAEqualsReverse());
}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_rows;
ny = mat->num_cols;
}
#endif
}
/* csr_spmv does y = alpha op(A) x + beta y */
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (opA < 0 || opA > 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE ABI on hipsparseOperation_t has changed and PETSc has not been updated accordingly");
if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */
stat = hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr,nx,xptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr,ny,dptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = hipsparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one,
matstruct->matDescr,
matstruct->cuSpMV[opA].vecXDescr, beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
&matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&matstruct->cuSpMV[opA].spmvBuffer,matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUDA(cerr);
matstruct->cuSpMV[opA].initialized = PETSC_TRUE;
} else {
/* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */
stat = hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr,xptr);CHKERRCUSPARSE(stat);
stat = hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr,dptr);CHKERRCUSPARSE(stat);
}
stat = hipsparseSpMV(cusparsestruct->handle, opA,
matstruct->alpha_one,
matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTransposeForMult() */
matstruct->cuSpMV[opA].vecXDescr,
beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
matstruct->cuSpMV[opA].spmvBuffer);CHKERRCUSPARSE(stat);
#else
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
stat = cusparse_csr_spmv(cusparsestruct->handle, opA,
mat->num_rows, mat->num_cols,
mat->num_entries, matstruct->alpha_one, matstruct->descr,
mat->values->data().get(), mat->row_offsets->data().get(),
mat->column_indices->data().get(), xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
} else {
if (cusparsestruct->nrows) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat;
stat = cusparse_hyb_spmv(cusparsestruct->handle, opA,
matstruct->alpha_one, matstruct->descr, hybMat,
xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
}
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) {
if (yy) { /* MatMultAdd: zz = A*xx + yy */
if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */
ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); /* zz = yy */
} else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
} else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */
ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);
}
/* ScatterAdd the result from work vector into the full vector when A is compressed */
if (compressed) {
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registerred)
and in the destructor of the scope, it will call hipStreamSynchronize() on this stream. One has to store all events to
prevent that. So I just add a ScatterAdd kernel.
*/
#if 0
thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray);
thrust::async::for_each(thrust::hip::par.on(cusparsestruct->stream),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAPlusEquals());
#else
PetscInt n = matstruct->cprowIndices->size();
hipLaunchKernelGGL(( ScatterAdd), dim3((n+255)/256),dim3(256),0,PetscDefaultCudaStream, n,matstruct->cprowIndices->data().get(),cusparsestruct->workVector->data().get(),zarray);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
} else {
if (yy && yy != zz) {
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
}
ierr = VecCUDARestoreArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);}
else {ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr);}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
if (yy) {
ierr = PetscLogGpuFlops(2.0*a->nz);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(2.0*a->nz-a->nonzerorowcnt);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode)
{
PetscErrorCode ierr;
PetscSplitCSRDataStructure *d_mat = NULL;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat;
}
ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); // this does very little if assembled on GPU - call it?
if (mode == MAT_FLUSH_ASSEMBLY || A->boundtocpu) PetscFunctionReturn(0);
if (d_mat) {
A->offloadmask = PETSC_OFFLOAD_GPU;
}
PetscFunctionReturn(0);
}
/* --------------------------------------------------------------------------------*/
/*@
MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter nz (or the array nnz). By setting these parameters accurately,
performance during matrix assembly can be increased by more than a factor of 50.
Collective
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows)
- nnz - array containing the number of nonzeros in the various rows
(possibly different for each row) or NULL
Output Parameter:
. A - the matrix
It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
MatXXXXSetPreallocation() paradgm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
Notes:
If nnz is given then nz is ignored
The AIJ format (also called the Yale sparse matrix format or
compressed row storage), is fully compatible with standard Fortran 77
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero. See the users' manual for details.
Specify the preallocated storage with either nz or nnz (not both).
Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
allocation. For large problems you MUST preallocate memory or you
will get TERRIBLE performance, see the users' manual chapter on matrices.
By default, this format uses inodes (identical nodes) when possible, to
improve numerical efficiency of matrix-vector products and solves. We
search for consecutive rows with the same nonzero structure, thereby
reusing matrix information to achieve increased efficiency.
Level: intermediate
.seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE
@*/
PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscSplitCSRDataStructure *d_mat = NULL;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat;
((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat = NULL;
ierr = MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr);
}
if (d_mat) {
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
hipError_t err;
PetscSplitCSRDataStructure h_mat;
ierr = PetscInfo(A,"Have device matrix\n");CHKERRQ(ierr);
err = hipMemcpy( &h_mat, d_mat, sizeof(PetscSplitCSRDataStructure), hipMemcpyDeviceToHost);CHKERRCUDA(err);
if (a->compressedrow.use) {
err = hipFree(h_mat.diag.i);CHKERRCUDA(err);
}
err = hipFree(d_mat);CHKERRCUDA(err);
}
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatFactorGetSolverType_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat,MatType,MatReuse,Mat*);
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat,PetscBool);
static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A,MatDuplicateOption cpvalues,Mat *B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatDuplicate_SeqAIJ(A,cpvalues,B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y,PetscScalar a,Mat X,MatStructure str)
{
PetscErrorCode ierr;
Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data,*y = (Mat_SeqAIJ*)Y->data;
Mat_SeqAIJCUSPARSE *cy;
Mat_SeqAIJCUSPARSE *cx;
PetscScalar *ay;
const PetscScalar *ax;
CsrMatrix *csry,*csrx;
hipError_t cerr;
PetscFunctionBegin;
cy = (Mat_SeqAIJCUSPARSE*)Y->spptr;
cx = (Mat_SeqAIJCUSPARSE*)X->spptr;
if (X->ops->axpy != Y->ops->axpy) {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr);
ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* if we are here, it means both matrices are bound to GPU */
ierr = MatSeqAIJCUSPARSECopyToGPU(Y);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(X);CHKERRQ(ierr);
if (cy->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)Y),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported");
if (cx->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported");
csry = (CsrMatrix*)cy->mat->mat;
csrx = (CsrMatrix*)cx->mat->mat;
/* see if we can turn this into a cublas axpy */
if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) {
bool eq = thrust::equal(thrust::device,csry->row_offsets->begin(),csry->row_offsets->end(),csrx->row_offsets->begin());
if (eq) {
eq = thrust::equal(thrust::device,csry->column_indices->begin(),csry->column_indices->end(),csrx->column_indices->begin());
}
if (eq) str = SAME_NONZERO_PATTERN;
}
/* spgeam is buggy with one column */
if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN;
if (str == SUBSET_NONZERO_PATTERN) {
hipsparseStatus_t stat;
PetscScalar b = 1.0;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
size_t bufferSize;
void *buffer;
#endif
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
stat = hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparse_csr_spgeam_bufferSize(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),&bufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&buffer,bufferSize);CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),buffer);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
cerr = hipFree(buffer);CHKERRCUDA(cerr);
#else
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get());CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
stat = hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else if (str == SAME_NONZERO_PATTERN) {
hipblasHandle_t cublasv2handle;
hipblasStatus_t berr;
PetscBLASInt one = 1, bnz = 1;
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXaxpy(cublasv2handle,bnz,&a,ax,one,ay,one);CHKERRCUBLAS(berr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(2.0*bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr);
ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y,PetscScalar a)
{
PetscErrorCode ierr;
Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data;
PetscScalar *ay;
hipError_t cerr;
hipblasHandle_t cublasv2handle;
hipblasStatus_t berr;
PetscBLASInt one = 1, bnz = 1;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscBLASIntCast(y->nz,&bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXscal(cublasv2handle,bnz,&a,ay,one);CHKERRCUBLAS(berr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscBool both = PETSC_FALSE;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (spptr->mat) {
CsrMatrix* matrix = (CsrMatrix*)spptr->mat->mat;
if (matrix->values) {
both = PETSC_TRUE;
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
if (spptr->matTranspose) {
CsrMatrix* matrix = (CsrMatrix*)spptr->matTranspose->mat;
if (matrix->values) {
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
}
//ierr = MatZeroEntries_SeqAIJ(A);CHKERRQ(ierr);
ierr = PetscArrayzero(a->a,a->i[A->rmap->n]);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(A);CHKERRQ(ierr);
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
else A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A,PetscBool flg)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype != MAT_FACTOR_NONE) PetscFunctionReturn(0);
if (flg) {
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
A->ops->scale = MatScale_SeqAIJ;
A->ops->axpy = MatAXPY_SeqAIJ;
A->ops->zeroentries = MatZeroEntries_SeqAIJ;
A->ops->mult = MatMult_SeqAIJ;
A->ops->multadd = MatMultAdd_SeqAIJ;
A->ops->multtranspose = MatMultTranspose_SeqAIJ;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ;
A->ops->multhermitiantranspose = NULL;
A->ops->multhermitiantransposeadd = NULL;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJ);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
} else {
A->ops->scale = MatScale_SeqAIJCUSPARSE;
A->ops->axpy = MatAXPY_SeqAIJCUSPARSE;
A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE;
A->ops->mult = MatMult_SeqAIJCUSPARSE;
A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE;
A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE;
A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE;
A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",MatSeqAIJCopySubArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
}
A->boundtocpu = flg;
a->inode.use = flg;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType mtype, MatReuse reuse, Mat* newmat)
{
PetscErrorCode ierr;
hipsparseStatus_t stat;
Mat B;
PetscFunctionBegin;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); /* first use of CUSPARSE may be via MatConvert */
if (reuse == MAT_INITIAL_MATRIX) {
ierr = MatDuplicate(A,MAT_COPY_VALUES,newmat);CHKERRQ(ierr);
} else if (reuse == MAT_REUSE_MATRIX) {
ierr = MatCopy(A,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
}
B = *newmat;
ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr);
if (reuse != MAT_REUSE_MATRIX && !B->spptr) {
if (B->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
stat = hipsparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
stat = hipsparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat);
spptr->format = MAT_CUSPARSE_CSR;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
spptr->spmvAlg = HIPSPARSE_CSRMV_ALG1; /* default, since we only support csr */
spptr->spmmAlg = HIPSPARSE_CSRMM_ALG1; /* default, only support column-major dense matrix B */
spptr->csr2cscAlg = HIPSPARSE_CSR2CSC_ALG1;
#endif
B->spptr = spptr;
} else {
Mat_SeqAIJCUSPARSETriFactors *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
stat = hipsparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
stat = hipsparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat);
B->spptr = spptr;
}
B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE;
B->ops->destroy = MatDestroy_SeqAIJCUSPARSE;
B->ops->setoption = MatSetOption_SeqAIJCUSPARSE;
B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE;
B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE;
B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE;
ierr = MatBindToCPU_SeqAIJCUSPARSE(B,PETSC_FALSE);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*MC
MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either
CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later.
All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library.
Options Database Keys:
+ -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions()
. -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
- -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
Level: beginner
.seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat,MatFactorType,Mat*);
PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse_band);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct)
{
PetscErrorCode ierr;
hipsparseStatus_t stat;
PetscFunctionBegin;
if (*cusparsestruct) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format);CHKERRQ(ierr);
delete (*cusparsestruct)->workVector;
delete (*cusparsestruct)->rowoffsets_gpu;
delete (*cusparsestruct)->cooPerm;
delete (*cusparsestruct)->cooPerm_a;
delete (*cusparsestruct)->csr2csc_i;
if ((*cusparsestruct)->handle) {stat = hipsparseDestroy((*cusparsestruct)->handle);CHKERRCUSPARSE(stat);}
ierr = PetscFree(*cusparsestruct);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat)
{
PetscFunctionBegin;
if (*mat) {
delete (*mat)->values;
delete (*mat)->column_indices;
delete (*mat)->row_offsets;
delete *mat;
*mat = 0;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor)
{
hipsparseStatus_t stat;
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactor) {
if ((*trifactor)->descr) { stat = hipsparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSPARSE(stat); }
if ((*trifactor)->solveInfo) { stat = cusparse_destroy_analysis_info((*trifactor)->solveInfo);CHKERRCUSPARSE(stat); }
ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr);
if ((*trifactor)->solveBuffer) {hipError_t cerr = hipFree((*trifactor)->solveBuffer);CHKERRCUDA(cerr);}
if ((*trifactor)->AA_h) {hipError_t cerr = hipHostFree((*trifactor)->AA_h);CHKERRCUDA(cerr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if ((*trifactor)->csr2cscBuffer) {hipError_t cerr = hipFree((*trifactor)->csr2cscBuffer);CHKERRCUDA(cerr);}
#endif
ierr = PetscFree(*trifactor);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format)
{
CsrMatrix *mat;
hipsparseStatus_t stat;
hipError_t err;
PetscFunctionBegin;
if (*matstruct) {
if ((*matstruct)->mat) {
if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat;
stat = cusparseDestroyHybMat(hybMat);CHKERRCUSPARSE(stat);
#endif
} else {
mat = (CsrMatrix*)(*matstruct)->mat;
CsrMatrix_Destroy(&mat);
}
}
if ((*matstruct)->descr) { stat = hipsparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSPARSE(stat); }
delete (*matstruct)->cprowIndices;
if ((*matstruct)->alpha_one) { err=hipFree((*matstruct)->alpha_one);CHKERRCUDA(err); }
if ((*matstruct)->beta_zero) { err=hipFree((*matstruct)->beta_zero);CHKERRCUDA(err); }
if ((*matstruct)->beta_one) { err=hipFree((*matstruct)->beta_one);CHKERRCUDA(err); }
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct;
if (mdata->matDescr) {stat = hipsparseDestroySpMat(mdata->matDescr);CHKERRCUSPARSE(stat);}
for (int i=0; i<3; i++) {
if (mdata->cuSpMV[i].initialized) {
err = hipFree(mdata->cuSpMV[i].spmvBuffer);CHKERRCUDA(err);
stat = hipsparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr);CHKERRCUSPARSE(stat);
stat = hipsparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr);CHKERRCUSPARSE(stat);
}
}
#endif
delete *matstruct;
*matstruct = NULL;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors** trifactors)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose);CHKERRQ(ierr);
delete (*trifactors)->rpermIndices;
delete (*trifactors)->cpermIndices;
delete (*trifactors)->workVector;
(*trifactors)->rpermIndices = NULL;
(*trifactors)->cpermIndices = NULL;
(*trifactors)->workVector = NULL;
if ((*trifactors)->a_band_d) {hipError_t cerr = hipFree((*trifactors)->a_band_d);CHKERRCUDA(cerr);}
if ((*trifactors)->i_band_d) {hipError_t cerr = hipFree((*trifactors)->i_band_d);CHKERRCUDA(cerr);}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors)
{
PetscErrorCode ierr;
hipsparseHandle_t handle;
hipsparseStatus_t stat;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSETriFactors_Reset(trifactors);CHKERRQ(ierr);
if (handle = (*trifactors)->handle) {
stat = hipsparseDestroy(handle);CHKERRCUSPARSE(stat);
}
ierr = PetscFree(*trifactors);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
struct IJCompare
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct IJEqual
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false;
return true;
}
};
struct IJDiff
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1 == t2 ? 0 : 1;
}
};
struct IJSum
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1||t2;
}
};
#include <thrust/iterator/discard_iterator.h>
PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
THRUSTARRAY *cooPerm_v = NULL;
thrust::device_ptr<const PetscScalar> d_v;
CsrMatrix *matrix;
PetscErrorCode ierr;
hipError_t cerr;
PetscInt n;
PetscFunctionBegin;
if (!cusp) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE struct");
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE CsrMatrix");
if (!cusp->cooPerm) {
ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
matrix = (CsrMatrix*)cusp->mat->mat;
if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
if (!v) {
if (imode == INSERT_VALUES) thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
goto finalize;
}
n = cusp->cooPerm->size();
if (isCudaMem(v)) {
d_v = thrust::device_pointer_cast(v);
} else {
cooPerm_v = new THRUSTARRAY(n);
cooPerm_v->assign(v,v+n);
d_v = cooPerm_v->data();
ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */
if (cusp->cooPerm_a) {
THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size());
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),cooPerm_w->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
thrust::transform(cooPerm_w->begin(),cooPerm_w->end(),matrix->values->begin(),matrix->values->begin(),thrust::plus<PetscScalar>());
delete cooPerm_w;
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAPlusEquals());
}
} else {
if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),matrix->values->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAEquals());
}
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalize:
delete cooPerm_v;
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(A,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",A->rmap->n,A->cmap->n,a->nz);CHKERRQ(ierr);
ierr = PetscInfo(A,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(A,"Maximum nonzeros in any row is %D\n",a->rmax);CHKERRQ(ierr);
a->reallocs = 0;
A->info.mallocs += 0;
A->info.nz_unneeded = 0;
A->assembled = A->was_assembled = PETSC_TRUE;
A->num_ass++;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (!cusp) PetscFunctionReturn(0);
if (destroy) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr);
delete cusp->csr2csc_i;
cusp->csr2csc_i = NULL;
}
A->transupdated = PETSC_FALSE;
PetscFunctionReturn(0);
}
#include <thrust/binary_search.h>
PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[])
{
PetscErrorCode ierr;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt cooPerm_n, nzr = 0;
hipError_t cerr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr);
cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0;
if (n != cooPerm_n) {
delete cusp->cooPerm;
delete cusp->cooPerm_a;
cusp->cooPerm = NULL;
cusp->cooPerm_a = NULL;
}
if (n) {
THRUSTINTARRAY d_i(n);
THRUSTINTARRAY d_j(n);
THRUSTINTARRAY ii(A->rmap->n);
if (!cusp->cooPerm) { cusp->cooPerm = new THRUSTINTARRAY(n); }
if (!cusp->cooPerm_a) { cusp->cooPerm_a = new THRUSTINTARRAY(n); }
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
d_i.assign(coo_i,coo_i+n);
d_j.assign(coo_j,coo_j+n);
auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin()));
auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end()));
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0);
thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare());
*cusp->cooPerm_a = d_i;
THRUSTINTARRAY w = d_j;
auto nekey = thrust::unique(fkey, ekey, IJEqual());
if (nekey == ekey) { /* all entries are unique */
delete cusp->cooPerm_a;
cusp->cooPerm_a = NULL;
} else { /* I couldn't come up with a more elegant algorithm */
adjacent_difference(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),IJDiff());
adjacent_difference(w.begin(),w.end(),w.begin(),IJDiff());
(*cusp->cooPerm_a)[0] = 0;
w[0] = 0;
thrust::transform(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),w.begin(),cusp->cooPerm_a->begin(),IJSum());
thrust::inclusive_scan(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),thrust::plus<PetscInt>());
}
thrust::counting_iterator<PetscInt> search_begin(0);
thrust::upper_bound(d_i.begin(), nekey.get_iterator_tuple().get<0>(),
search_begin, search_begin + A->rmap->n,
ii.begin());
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqXAIJFreeAIJ(A,&a->a,&a->j,&a->i);CHKERRQ(ierr);
a->singlemalloc = PETSC_FALSE;
a->free_a = PETSC_TRUE;
a->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(A->rmap->n+1,&a->i);CHKERRQ(ierr);
a->i[0] = 0;
cerr = hipMemcpy(a->i+1,ii.data().get(),A->rmap->n*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
a->nz = a->maxnz = a->i[A->rmap->n];
a->rmax = 0;
ierr = PetscMalloc1(a->nz,&a->a);CHKERRQ(ierr);
ierr = PetscMalloc1(a->nz,&a->j);CHKERRQ(ierr);
cerr = hipMemcpy(a->j,d_j.data().get(),a->nz*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
if (!a->ilen) { ierr = PetscMalloc1(A->rmap->n,&a->ilen);CHKERRQ(ierr); }
if (!a->imax) { ierr = PetscMalloc1(A->rmap->n,&a->imax);CHKERRQ(ierr); }
for (PetscInt i = 0; i < A->rmap->n; i++) {
const PetscInt nnzr = a->i[i+1] - a->i[i];
nzr += (PetscInt)!!(nnzr);
a->ilen[i] = a->imax[i] = nnzr;
a->rmax = PetscMax(a->rmax,nnzr);
}
a->nonzerorowcnt = nzr;
A->preallocated = PETSC_TRUE;
ierr = PetscLogGpuToCpu((A->rmap->n+a->nz)*sizeof(PetscInt));CHKERRQ(ierr);
ierr = MatMarkDiagonal_SeqAIJ(A);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJSetPreallocation(A,0,NULL);CHKERRQ(ierr);
}
ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
/* We want to allocate the CUSPARSE struct for matvec now.
The code is so convoluted now that I prefer to copy zeros */
ierr = PetscArrayzero(a->a,a->nz);CHKERRQ(ierr);
ierr = MatCheckCompressedRow(A,nzr,&a->compressedrow,a->i,A->rmap->n,0.6);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_CPU;
A->nonzerostate++;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
A->assembled = PETSC_FALSE;
A->was_assembled = PETSC_FALSE;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar** a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
*a = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
struct IJCompare4
{
__host__ __device__
inline bool operator() (const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct Shift
{
int _shift;
Shift(int shift) : _shift(shift) {}
__host__ __device__
inline int operator() (const int &c)
{
return c + _shift;
}
};
/* merges to SeqAIJCUSPARSE matrices, [A';B']' operation in matlab notation */
PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A,Mat B,MatReuse reuse,Mat* C)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data, *b = (Mat_SeqAIJ*)B->data, *c;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr, *Ccusp;
Mat_SeqAIJCUSPARSEMultStruct *Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt Annz,Bnnz;
hipsparseStatus_t stat;
PetscInt i,m,n,zero = 0;
hipError_t cerr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidHeaderSpecific(B,MAT_CLASSID,2);
PetscValidPointer(C,4);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
PetscCheckTypeName(B,MATSEQAIJCUSPARSE);
if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",A->rmap->n,B->rmap->n);
if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_INPLACE_MATRIX not supported");
if (Acusp->format == MAT_CUSPARSE_ELL || Acusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Bcusp->format == MAT_CUSPARSE_ELL || Bcusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (reuse == MAT_INITIAL_MATRIX) {
m = A->rmap->n;
n = A->cmap->n + B->cmap->n;
ierr = MatCreate(PETSC_COMM_SELF,C);CHKERRQ(ierr);
ierr = MatSetSizes(*C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)(*C)->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
Cmat->cprowIndices = NULL;
c->compressedrow.use = PETSC_FALSE;
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Ccusp->nrows = m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = m;
Ccsr->num_cols = n;
stat = hipsparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Annz = (PetscInt)Acsr->column_indices->size();
Bnnz = (PetscInt)Bcsr->column_indices->size();
c->nz = Annz + Bnnz;
Ccsr->row_offsets = new THRUSTINTARRAY32(m+1);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
Ccsr->num_entries = c->nz;
Ccusp->cooPerm = new THRUSTINTARRAY(c->nz);
if (c->nz) {
auto Acoo = new THRUSTINTARRAY32(Annz);
auto Bcoo = new THRUSTINTARRAY32(Bnnz);
auto Ccoo = new THRUSTINTARRAY32(c->nz);
THRUSTINTARRAY32 *Aroff,*Broff;
if (a->compressedrow.use) { /* need full row offset */
if (!Acusp->rowoffsets_gpu) {
Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
Acusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Aroff = Acusp->rowoffsets_gpu;
} else Aroff = Acsr->row_offsets;
if (b->compressedrow.use) { /* need full row offset */
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Broff = Bcusp->rowoffsets_gpu;
} else Broff = Bcsr->row_offsets;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = hipsparseXcsr2coo(Acusp->handle,
Aroff->data().get(),
Annz,
m,
Acoo->data().get(),
HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseXcsr2coo(Bcusp->handle,
Broff->data().get(),
Bnnz,
m,
Bcoo->data().get(),
HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
/* Issues when using bool with large matrices on SUMMIT 10.2.89 */
auto Aperm = thrust::make_constant_iterator(1);
auto Bperm = thrust::make_constant_iterator(0);
#if PETSC_PKG_CUDA_VERSION_GE(10,0,0)
auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(),Shift(A->cmap->n));
auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(),Shift(A->cmap->n));
#else
/* there are issues instantiating the merge operation using a transform iterator for the columns of B */
auto Bcib = Bcsr->column_indices->begin();
auto Bcie = Bcsr->column_indices->end();
thrust::transform(Bcib,Bcie,Bcib,Shift(A->cmap->n));
#endif
auto wPerm = new THRUSTINTARRAY32(Annz+Bnnz);
auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(),Acsr->column_indices->begin(),Acsr->values->begin(),Aperm));
auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(),Acsr->column_indices->end(),Acsr->values->end(),Aperm));
auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(),Bcib,Bcsr->values->begin(),Bperm));
auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(),Bcie,Bcsr->values->end(),Bperm));
auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(),Ccsr->column_indices->begin(),Ccsr->values->begin(),wPerm->begin()));
auto p1 = Ccusp->cooPerm->begin();
auto p2 = Ccusp->cooPerm->begin();
thrust::advance(p2,Annz);
PetscStackCallThrust(thrust::merge(thrust::device,Azb,Aze,Bzb,Bze,Czb,IJCompare4()));
#if PETSC_PKG_CUDA_VERSION_LT(10,0,0)
thrust::transform(Bcib,Bcie,Bcib,Shift(-A->cmap->n));
#endif
auto cci = thrust::make_counting_iterator(zero);
auto cce = thrust::make_counting_iterator(c->nz);
#if 0 //Errors on SUMMIT cuda 11.1.0
PetscStackCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>()));
#else
auto pred = thrust::identity<int>();
PetscStackCallThrust(thrust::copy_if(thrust::device,cci,cce,wPerm->begin(),p1,pred));
PetscStackCallThrust(thrust::remove_copy_if(thrust::device,cci,cce,wPerm->begin(),p2,pred));
#endif
stat = hipsparseXcoo2csr(Ccusp->handle,
Ccoo->data().get(),
c->nz,
m,
Ccsr->row_offsets->data().get(),
HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
delete wPerm;
delete Acoo;
delete Bcoo;
delete Ccoo;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries,
Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct;
CsrMatrix *CcsrT = new CsrMatrix;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
(*C)->form_explicit_transpose = PETSC_TRUE;
(*C)->transupdated = PETSC_TRUE;
Ccusp->rowoffsets_gpu = NULL;
CmatT->cprowIndices = NULL;
CmatT->mat = CcsrT;
CcsrT->num_rows = n;
CcsrT->num_cols = m;
CcsrT->num_entries = c->nz;
CcsrT->row_offsets = new THRUSTINTARRAY32(n+1);
CcsrT->column_indices = new THRUSTINTARRAY32(c->nz);
CcsrT->values = new THRUSTARRAY(c->nz);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto rT = CcsrT->row_offsets->begin();
if (AT) {
rT = thrust::copy(AcsrT->row_offsets->begin(),AcsrT->row_offsets->end(),rT);
thrust::advance(rT,-1);
}
if (BT) {
auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(),Shift(a->nz));
auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(),Shift(a->nz));
thrust::copy(titb,tite,rT);
}
auto cT = CcsrT->column_indices->begin();
if (AT) cT = thrust::copy(AcsrT->column_indices->begin(),AcsrT->column_indices->end(),cT);
if (BT) thrust::copy(BcsrT->column_indices->begin(),BcsrT->column_indices->end(),cT);
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
stat = hipsparseCreateMatDescr(&CmatT->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(CmatT->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(CmatT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void **)&(CmatT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(CmatT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMemcpy(CmatT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(CmatT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries,
CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
Ccusp->matTranspose = CmatT;
}
}
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
cerr = hipMemcpy(c->i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
cerr = hipMemcpy(c->i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (i = 0; i < m; i++) {
const PetscInt nn = c->i[i+1] - c->i[i];
c->ilen[i] = c->imax[i] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(*C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
(*C)->nonzerostate++;
ierr = PetscLayoutSetUp((*C)->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp((*C)->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = (*C)->nonzerostate;
(*C)->preallocated = PETSC_TRUE;
} else {
if ((*C)->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",(*C)->rmap->n,B->rmap->n);
c = (Mat_SeqAIJ*)(*C)->data;
if (c->nz) {
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
if (!Ccusp->cooPerm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cooPerm");
if (Ccusp->format == MAT_CUSPARSE_ELL || Ccusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Ccusp->nonzerostate != (*C)->nonzerostate) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Wrong nonzerostate");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Ccsr = (CsrMatrix*)Ccusp->mat->mat;
if (Acsr->num_entries != (PetscInt)Acsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"A nnz %D != %D",Acsr->num_entries,(PetscInt)Acsr->values->size());
if (Bcsr->num_entries != (PetscInt)Bcsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"B nnz %D != %D",Bcsr->num_entries,(PetscInt)Bcsr->values->size());
if (Ccsr->num_entries != (PetscInt)Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D",Ccsr->num_entries,(PetscInt)Ccsr->values->size());
if (Ccsr->num_entries != Acsr->num_entries + Bcsr->num_entries) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D + %D",Ccsr->num_entries,Acsr->num_entries,Bcsr->num_entries);
if (Ccusp->cooPerm->size() != Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"permSize %D != %D",(PetscInt)Ccusp->cooPerm->size(),(PetscInt)Ccsr->values->size());
auto pmid = Ccusp->cooPerm->begin();
thrust::advance(pmid,Acsr->num_entries);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->begin())));
auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
thrust::for_each(zibait,zieait,VecCUDAEquals());
auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->end())));
thrust::for_each(zibbit,ziebit,VecCUDAEquals());
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(*C,PETSC_FALSE);CHKERRQ(ierr);
if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) {
if (!Ccusp->matTranspose) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing transpose Mat_SeqAIJCUSPARSEMultStruct");
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
CsrMatrix *CcsrT = (CsrMatrix*)Ccusp->matTranspose->mat;
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
(*C)->transupdated = PETSC_TRUE;
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
}
ierr = PetscObjectStateIncrease((PetscObject)*C);CHKERRQ(ierr);
(*C)->assembled = PETSC_TRUE;
(*C)->was_assembled = PETSC_FALSE;
(*C)->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
{
PetscErrorCode ierr;
bool dmem;
const PetscScalar *av;
hipError_t cerr;
PetscFunctionBegin;
dmem = isCudaMem(v);
ierr = MatSeqAIJCUSPARSEGetArrayRead(A,&av);CHKERRQ(ierr);
if (n && idx) {
THRUSTINTARRAY widx(n);
widx.assign(idx,idx+n);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
THRUSTARRAY *w = NULL;
thrust::device_ptr<PetscScalar> dv;
if (dmem) {
dv = thrust::device_pointer_cast(v);
} else {
w = new THRUSTARRAY(n);
dv = w->data();
}
thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av);
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.begin()),dv));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.end()),dv+n));
thrust::for_each(zibit,zieit,VecCUDAEquals());
if (w) {
cerr = hipMemcpy(v,w->data().get(),n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
delete w;
} else {
cerr = hipMemcpy(v,av,n*sizeof(PetscScalar),dmem ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (!dmem) { ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); }
ierr = MatSeqAIJCUSPARSERestoreArrayRead(A,&av);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*
LU BAND factorization with optimization for block diagonal (Nf blocks) in natural order (-mat_no_inode -pc_factor_mat_ordering_type rcm with Nf>1 fields)
requires:
structurally symmetric: fix with transpose/column meta data
*/
/*
The GPU LU factor kernel
*/
__global__
void __launch_bounds__(1024,1)
mat_lu_factor_band_init_set_i(const PetscInt n, const int bw, int bi_csr[])
{
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt nloc_i = (nloc/Nblk + !!(nloc%Nblk)), start_i = field*nloc + blkIdx*nloc_i, end_i = (start_i + nloc_i) > (field+1)*nloc ? (field+1)*nloc : (start_i + nloc_i);
// set i (row+1)
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) bi_csr[0] = 0; // dummy at zero
// for (int rowb = start_i + blkIdx*blockDim.y + threadIdx.y; rowb < end_i; rowb += Nblk*blockDim.y) { // rows in block
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i && threadIdx.x==0) {
PetscInt i=rowb+1, ni = (rowb>bw) ? bw+1 : i, n1L = ni*(ni-1)/2, nug= i*bw, n2L = bw*((rowb>bw) ? (rowb-bw) : 0), mi = bw + rowb + 1 - n, clip = (mi>0) ? mi*(mi-1)/2 + mi: 0;
bi_csr[rowb+1] = n1L + nug - clip + n2L + i;
}
}
}
// copy AIJ to AIJ_BAND
__global__
void __launch_bounds__(1024,1)
mat_lu_factor_band_copy_aij_aij(const PetscInt n, const int bw, const PetscInt r[], const PetscInt ic[],
const int ai_d[], const int aj_d[], const PetscScalar aa_d[],
const int bi_csr[], PetscScalar ba_csr[])
{
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt nloc_i = (nloc/Nblk + !!(nloc%Nblk)), start_i = field*nloc + blkIdx*nloc_i, end_i = (start_i + nloc_i) > (field+1)*nloc ? (field+1)*nloc : (start_i + nloc_i);
// zero B
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) ba_csr[bi_csr[n]] = 0; // flop count at end
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i) {
PetscScalar *batmp = ba_csr + bi_csr[rowb];
const PetscInt nzb = bi_csr[rowb+1] - bi_csr[rowb];
for (int j=threadIdx.x ; j<nzb ; j += blockDim.x) {
if (j<nzb) {
batmp[j] = 0;
}
}
}
}
// copy A into B with CSR format -- these two loops can be fused
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i) {
const PetscInt rowa = r[rowb], nza = ai_d[rowa+1] - ai_d[rowa];
const int *ajtmp = aj_d + ai_d[rowa], bjStart = (rowb>bw) ? rowb-bw : 0;
const PetscScalar *av = aa_d + ai_d[rowa];
PetscScalar *batmp = ba_csr + bi_csr[rowb];
/* load in initial (unfactored row) */
for (int j=threadIdx.x ; j<nza ; j += blockDim.x) {
if (j<nza) {
PetscInt colb = ic[ajtmp[j]], idx = colb - bjStart;
PetscScalar vala = av[j];
batmp[idx] = vala;
}
}
}
}
}
// print AIJ_BAND
__global__
void print_mat_aij_band(const PetscInt n, const int bi_csr[], const PetscScalar ba_csr[])
{
// debug
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0){
printf("B (AIJ) n=%d:\n",(int)n);
for (int rowb=0;rowb<n;rowb++) {
const PetscInt nz = bi_csr[rowb+1] - bi_csr[rowb];
const PetscScalar *batmp = ba_csr + bi_csr[rowb];
for (int j=0; j<nz; j++) printf("(%13.6e) ",PetscRealPart(batmp[j]));
printf(" bi=%d\n",bi_csr[rowb+1]);
}
}
}
// Band LU kernel --- ba_csr bi_csr
__global__
void __launch_bounds__(1024,1)
mat_lu_factor_band(const PetscInt n, const PetscInt bw, const int bi_csr[], PetscScalar ba_csr[])
{
extern __shared__ PetscInt smemInt[];
PetscInt *sm_pkIdx = &smemInt[0];
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt start = field*nloc, end = start + nloc;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
auto g = cooperative_groups::this_grid();
#endif
// A22 panel update for each row A(1,:) and col A(:,1)
for (int glbDD=start, locDD = 0; glbDD<end; glbDD++, locDD++) {
PetscInt tnzUd = bw, maxU = end-1 - glbDD; // we are chopping off the inter ears
const PetscInt nzUd = (tnzUd>maxU) ? maxU : tnzUd, dOffset = (glbDD > bw) ? bw : glbDD; // global to go past ears after first
const PetscInt nzUd_pad = blockDim.y*(nzUd/blockDim.y + !!(nzUd%blockDim.y));
PetscScalar *pBdd = ba_csr + bi_csr[glbDD] + dOffset;
const PetscScalar *baUd = pBdd + 1; // vector of data U(i,i+1:end)
const PetscScalar Bdd = *pBdd;
const PetscInt offset = blkIdx*blockDim.y + threadIdx.y, inc = Nblk*blockDim.y;
for (int idx = offset, myi = glbDD + offset + 1; idx < nzUd_pad ; idx += inc, myi += inc) { /* assuming symmetric structure */
if (idx < nzUd && threadIdx.x==0) { /* assuming symmetric structure */
const PetscInt bwi = myi > bw ? bw : myi, kIdx = bwi - (myi-glbDD); // cuts off just the first (global) block
PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx;
*Aid = *Aid/Bdd;
sm_pkIdx[threadIdx.y] = kIdx;
}
__syncthreads(); // synch on threadIdx.x only
if (idx < nzUd) { /* assuming symmetric structure */
PetscInt kIdx = sm_pkIdx[threadIdx.y];
PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx;
PetscScalar *Aij = Aid + 1;
PetscScalar Lid = *Aid;
for (int jIdx=threadIdx.x ; jIdx<nzUd ; jIdx += blockDim.x) {
if (jIdx<nzUd) {
Aij[jIdx] -= Lid*baUd[jIdx];
}
}
}
}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
g.sync();
#else
__syncthreads();
#endif
} /* endof for (i=0; i<n; i++) { */
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat,Vec,Vec);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
Mat_SeqAIJCUSPARSE *cusparsestructA = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstructA;
CsrMatrix *matrixA;
PetscErrorCode ierr;
hipError_t cerr;
const PetscInt n=A->rmap->n, *ic, *r;
const int *ai_d, *aj_d;
const PetscScalar *aa_d;
PetscScalar *ba_t = cusparseTriFactors->a_band_d;
int *bi_t = cusparseTriFactors->i_band_d;
PetscContainer container;
int Ni = 10, team_size=9, Nf, nVec=56, nconcurrent = 1, nsm = -1;
PetscFunctionBegin;
if (A->rmap->n == 0) {
PetscFunctionReturn(0);
}
// cusparse setup
if (!cusparsestructA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparsestructA");
matstructA = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestructA->mat; // matstruct->cprowIndices
if (!matstructA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing mat struct");
matrixA = (CsrMatrix*)matstructA->mat;
if (!matrixA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing matrix cusparsestructA->mat->mat");
// factor: get Nf if available
ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr);
if (container) {
PetscInt *pNf=NULL;
ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr);
Nf = (*pNf)%1000;
if ((*pNf)/1000>0) nconcurrent = (*pNf)/1000; // number of SMs to use
} else Nf = 1;
if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf);
// get data
ic = thrust::raw_pointer_cast(cusparseTriFactors->cpermIndices->data());
ai_d = thrust::raw_pointer_cast(matrixA->row_offsets->data());
aj_d = thrust::raw_pointer_cast(matrixA->column_indices->data());
aa_d = thrust::raw_pointer_cast(matrixA->values->data().get());
r = thrust::raw_pointer_cast(cusparseTriFactors->rpermIndices->data());
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
{
int bw = (2*n-1 - (int)(PetscSqrtReal(1+4*(n*n-b->nz))+PETSC_MACHINE_EPSILON))/2, bm1=bw-1,nl=n/Nf;
int gpuid;
hipDeviceProp_t prop;
hipGetDevice(&gpuid);
hipGetDeviceProperties(&prop, gpuid);
#if PETSC_PKG_CUDA_VERSION_LT(11,0,0)
Ni = 1/nconcurrent;
Ni = 1;
#else
nsm = prop.multiProcessorCount;
Ni = nsm/Nf/nconcurrent;
#endif
team_size = bw/Ni + !!(bw%Ni);
nVec = PetscMin(bw, 1024/team_size);
ierr = PetscInfo5(A,"Matrix Bandwidth = %d, number SMs/block = %d, num concurency = %d, num fields = %d, numSMs/GPU = %d\n",bw,Ni,nconcurrent,Nf,nsm);CHKERRQ(ierr);
{
dim3 dimBlockTeam(nVec,team_size);
dim3 dimBlockLeague(Nf,Ni);
hipLaunchKernelGGL(( mat_lu_factor_band_copy_aij_aij), dim3(dimBlockLeague),dim3(dimBlockTeam), 0, 0, n, bw, r, ic, ai_d, aj_d, aa_d, bi_t, ba_t);
CHECK_LAUNCH_ERROR(); // does a sync
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
void *kernelArgs[] = { (void*)&n, (void*)&bw, (void*)&bi_t, (void*)&ba_t};
hipLaunchCooperativeKernel((void*)mat_lu_factor_band, dimBlockLeague, dimBlockTeam, kernelArgs, team_size*sizeof(PetscInt), NULL);
#else
hipLaunchKernelGGL(( mat_lu_factor_band), dim3(dimBlockLeague),dim3(dimBlockTeam),team_size*sizeof(PetscInt), 0, n, bw, bi_t, ba_t);
#endif
CHECK_LAUNCH_ERROR(); // does a sync
#if defined(PETSC_USE_LOG)
ierr = PetscLogGpuFlops((PetscLogDouble)Nf*(bm1*(bm1 + 1)*(2*bm1 + 1)/3 + 2*(nl-bw)*bw*bw + nl*(nl+1)/2));CHKERRQ(ierr);
#endif
}
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
/* determine which version of MatSolve needs to be used. from MatLUFactorNumeric_AIJ_SeqAIJCUSPARSE */
B->ops->solve = MatSolve_SeqAIJCUSPARSEBAND;
B->ops->solvetranspose = NULL; // need transpose
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatrixNfDestroy(void *ptr)
{
PetscInt *nf = (PetscInt *)ptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscFree(nf);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b;
IS isicol;
PetscErrorCode ierr;
hipError_t cerr;
const PetscInt *ic,*ai=a->i,*aj=a->j;
PetscScalar *ba_t;
int *bi_t;
PetscInt i,n=A->rmap->n,Nf;
PetscInt nzBcsr,bwL,bwU;
PetscBool missing;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscContainer container;
PetscFunctionBegin;
if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"matrix must be square");
ierr = MatMissingDiagonal(A,&missing,&i);CHKERRQ(ierr);
if (missing) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix is missing diagonal entry %D",i);
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"!cusparseTriFactors");
ierr = MatGetOption(A,MAT_STRUCTURALLY_SYMMETRIC,&missing);CHKERRQ(ierr);
if (!missing) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"only structrally symmetric matrices supported");
// factor: get Nf if available
ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr);
if (container) {
PetscInt *pNf=NULL;
ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr);
Nf = (*pNf)%1000;
ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr);
ierr = PetscMalloc(sizeof(PetscInt), &pNf);CHKERRQ(ierr);
*pNf = Nf;
ierr = PetscContainerSetPointer(container, (void *)pNf);CHKERRQ(ierr);
ierr = PetscContainerSetUserDestroy(container, MatrixNfDestroy);CHKERRQ(ierr);
ierr = PetscObjectCompose((PetscObject)B, "Nf", (PetscObject) container);CHKERRQ(ierr);
ierr = PetscContainerDestroy(&container);CHKERRQ(ierr);
} else Nf = 1;
if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf);
ierr = ISInvertPermutation(iscol,PETSC_DECIDE,&isicol);CHKERRQ(ierr);
ierr = ISGetIndices(isicol,&ic);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation_SeqAIJ(B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)isicol);CHKERRQ(ierr);
b = (Mat_SeqAIJ*)(B)->data;
/* get band widths, MatComputeBandwidth should take a reordering ic and do this */
bwL = bwU = 0;
for (int rwb=0; rwb<n; rwb++) {
const PetscInt rwa = ic[rwb], anz = ai[rwb+1] - ai[rwb], *ajtmp = aj + ai[rwb];
for (int j=0;j<anz;j++) {
PetscInt colb = ic[ajtmp[j]];
if (colb<rwa) { // L
if (rwa-colb > bwL) bwL = rwa-colb;
} else {
if (colb-rwa > bwU) bwU = colb-rwa;
}
}
}
ierr = ISRestoreIndices(isicol,&ic);CHKERRQ(ierr);
/* only support structurally symmetric, but it might work */
if (bwL!=bwU) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Only symmetric structure supported (now) W_L=%D W_U=%D",bwL,bwU);
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
nzBcsr = n + (2*n-1)*bwU - bwU*bwU;
b->maxnz = b->nz = nzBcsr;
cusparseTriFactors->nnz = b->nz; // only meta data needed: n & nz
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cerr = hipMalloc(&ba_t,(b->nz+1)*sizeof(PetscScalar));CHKERRCUDA(cerr); // incude a place for flops
cerr = hipMalloc(&bi_t,(n+1)*sizeof(int));CHKERRCUDA(cerr);
cusparseTriFactors->a_band_d = ba_t;
cusparseTriFactors->i_band_d = bi_t;
/* In b structure: Free imax, ilen, old a, old j. Allocate solve_work, new a, new j */
ierr = PetscLogObjectMemory((PetscObject)B,(nzBcsr+1)*(sizeof(PetscInt)+sizeof(PetscScalar)));CHKERRQ(ierr);
{
dim3 dimBlockTeam(1,128);
dim3 dimBlockLeague(Nf,1);
hipLaunchKernelGGL(( mat_lu_factor_band_init_set_i), dim3(dimBlockLeague),dim3(dimBlockTeam), 0, 0, n, bwU, bi_t);
}
CHECK_LAUNCH_ERROR(); // does a sync
// setup data
if (!cusparseTriFactors->rpermIndices) {
const PetscInt *r;
ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r+n);
ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
/* upper triangular indices */
if (!cusparseTriFactors->cpermIndices) {
const PetscInt *c;
ierr = ISGetIndices(isicol,&c);CHKERRQ(ierr);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c+n);
ierr = ISRestoreIndices(isicol,&c);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
/* put together the new matrix */
b->free_a = PETSC_FALSE;
b->free_ij = PETSC_FALSE;
b->singlemalloc = PETSC_FALSE;
b->ilen = NULL;
b->imax = NULL;
b->row = isrow;
b->col = iscol;
ierr = PetscObjectReference((PetscObject)isrow);CHKERRQ(ierr);
ierr = PetscObjectReference((PetscObject)iscol);CHKERRQ(ierr);
b->icol = isicol;
ierr = PetscMalloc1(n+1,&b->solve_work);CHKERRQ(ierr);
B->factortype = MAT_FACTOR_LU;
B->info.factor_mallocs = 0;
B->info.fill_ratio_given = 0;
if (ai[n]) {
B->info.fill_ratio_needed = ((PetscReal)(nzBcsr))/((PetscReal)ai[n]);
} else {
B->info.fill_ratio_needed = 0.0;
}
#if defined(PETSC_USE_INFO)
if (ai[n] != 0) {
PetscReal af = B->info.fill_ratio_needed;
ierr = PetscInfo1(A,"Band fill ratio %g\n",(double)af);CHKERRQ(ierr);
} else {
ierr = PetscInfo(A,"Empty matrix\n");CHKERRQ(ierr);
}
#endif
if (a->inode.size) {
ierr = PetscInfo(A,"Warning: using inodes in band solver.\n");CHKERRQ(ierr);
}
ierr = MatSeqAIJCheckInode_FactorLU(B);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSEBAND;
B->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
/* Use -pc_factor_mat_solver_type cusparseband */
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse_band(Mat A,MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSEBAND;
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat A,MatFactorType ftype,Mat *B)
{
PetscErrorCode ierr;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr);
ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr);
(*B)->factortype = ftype;
(*B)->useordering = PETSC_TRUE;
ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
if (ftype == MAT_FACTOR_LU) {
ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr);
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSEBAND;
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSEBAND Matrix Types");
ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse_band);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#define WARP_SIZE 32
template <typename T>
__forceinline__ __device__
T wreduce(T a)
{
T b;
#pragma unroll
for (int i = WARP_SIZE/2; i >= 1; i = i >> 1) {
b = __shfl_down_sync(0xffffffff, a, i);
a += b;
}
return a;
}
// reduce in a block, returns result in thread 0
template <typename T, int BLOCK_SIZE>
__device__
T breduce(T a)
{
constexpr int NWARP = BLOCK_SIZE/WARP_SIZE;
__shared__ double buf[NWARP];
int wid = threadIdx.x / WARP_SIZE;
int laneid = threadIdx.x % WARP_SIZE;
T b = wreduce<T>(a);
if (laneid == 0)
buf[wid] = b;
__syncthreads();
if (wid == 0) {
if (threadIdx.x < NWARP)
a = buf[threadIdx.x];
else
a = 0;
for (int i = (NWARP+1)/2; i >= 1; i = i >> 1) {
a += __shfl_down_sync(0xffffffff, a, i);
}
}
return a;
}
// Band LU kernel --- ba_csr bi_csr
template <int BLOCK_SIZE>
__global__
void __launch_bounds__(256,1)
mat_solve_band(const PetscInt n, const PetscInt bw, const PetscScalar ba_csr[], PetscScalar x[])
{
const PetscInt Nf = gridDim.x, nloc = n/Nf, field = blockIdx.x, start = field*nloc, end = start + nloc, chopnz = bw*(bw+1)/2, blocknz=(2*bw+1)*nloc, blocknz_0 = blocknz-chopnz;
const PetscScalar *pLi;
const int tid = threadIdx.x;
/* Next, solve L */
pLi = ba_csr + (field==0 ? 0 : blocknz_0 + (field-1)*blocknz + bw); // diagonal (0,0) in field
for (int glbDD=start, locDD = 0; glbDD<end; glbDD++, locDD++) {
const PetscInt col = locDD<bw ? start : (glbDD-bw);
PetscScalar t = 0;
for (int j=col+tid,idx=tid;j<glbDD;j+=blockDim.x,idx+=blockDim.x) {
t += pLi[idx]*x[j];
}
#if defined(PETSC_USE_COMPLEX)
PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t);
PetscScalar tt(breduce<PetscReal,BLOCK_SIZE>(tr), breduce<PetscReal,BLOCK_SIZE>(ti));
t = tt;
#else
t = breduce<PetscReal,BLOCK_SIZE>(t);
#endif
if (threadIdx.x == 0)
x[glbDD] -= t; // /1.0
__syncthreads();
// inc
pLi += glbDD-col; // get to diagonal
if (glbDD > n-1-bw) pLi += n-1-glbDD; // skip over U, only last block has funny offset
else pLi += bw;
pLi += 1; // skip to next row
if (field>0 && (locDD+1)<bw) pLi += bw-(locDD+1); // skip padding at beginning (ear)
}
/* Then, solve U */
pLi = ba_csr + Nf*blocknz - 2*chopnz - 1; // end of real data on block (diagonal)
if (field != Nf-1) pLi -= blocknz_0 + (Nf-2-field)*blocknz + bw; // diagonal of last local row
for (int glbDD=end-1, locDD = 0; glbDD >= start; glbDD--, locDD++) {
const PetscInt col = (locDD<bw) ? end-1 : glbDD+bw; // end of row in U
PetscScalar t = 0;
for (int j=col-tid,idx=tid;j>glbDD;j-=blockDim.x,idx+=blockDim.x) {
t += pLi[-idx]*x[j];
}
#if defined(PETSC_USE_COMPLEX)
PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t);
PetscScalar tt(breduce<PetscReal,BLOCK_SIZE>(tr), breduce<PetscReal,BLOCK_SIZE>(ti));
t = tt;
#else
t = breduce<PetscReal,BLOCK_SIZE>(PetscRealPart(t));
#endif
pLi -= col-glbDD; // diagonal
if (threadIdx.x == 0) {
x[glbDD] -= t;
x[glbDD] /= pLi[0];
}
__syncthreads();
// inc past L to start of previous U
pLi -= bw+1;
if (glbDD<bw) pLi += bw-glbDD; // overshot in top left corner
if (((locDD+1) < bw) && field != Nf-1) pLi -= (bw - (locDD+1)); // skip past right corner
}
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscInt n=A->rmap->n, nz=cusparseTriFactors->nnz, bw=(2*n-1 - (int)(PetscSqrtReal(1+4*(n*n-nz))+PETSC_MACHINE_EPSILON))/2, Nf;
PetscErrorCode ierr;
hipError_t cerr;
PetscContainer container;
PetscFunctionBegin;
if (A->rmap->n == 0) {
PetscFunctionReturn(0);
}
// factor: get Nf if available
ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr);
if (container) {
PetscInt *pNf=NULL;
ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr);
Nf = (*pNf)%1000;
} else Nf = 1;
if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf);
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()),
tempGPU->begin());
constexpr int block = 128;
hipLaunchKernelGGL(( mat_solve_band<block>), dim3(Nf),dim3(block), 0, 0, n,bw,cusparseTriFactors->a_band_d,tempGPU->data().get());
CHECK_LAUNCH_ERROR(); // does a sync
/* Last, reorder with the column permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()),
xGPU);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
| 5dc6a368707f395a886f2f7cae25fdf032114841.cu | /*
Defines the basic matrix operations for the AIJ (compressed row)
matrix storage format using the CUSPARSE library,
*/
#define PETSC_SKIP_SPINLOCK
#define PETSC_SKIP_CXX_COMPLEX_FIX
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <petscconf.h>
#include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/sbaij/seq/sbaij.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <petsc/private/vecimpl.h>
#undef VecType
#include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h>
#include <thrust/async/for_each.h>
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
#include <cooperative_groups.h>
#endif
const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0};
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* The following are copied from cusparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in
0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them.
typedef enum {
CUSPARSE_MV_ALG_DEFAULT = 0,
CUSPARSE_COOMV_ALG = 1,
CUSPARSE_CSRMV_ALG1 = 2,
CUSPARSE_CSRMV_ALG2 = 3
} cusparseSpMVAlg_t;
typedef enum {
CUSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0,
CUSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG1) = 1,
CUSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG2) = 2,
CUSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3,
CUSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_CSR_ALG1) = 4,
CUSPARSE_SPMM_ALG_DEFAULT = 0,
CUSPARSE_SPMM_COO_ALG1 = 1,
CUSPARSE_SPMM_COO_ALG2 = 2,
CUSPARSE_SPMM_COO_ALG3 = 3,
CUSPARSE_SPMM_COO_ALG4 = 5,
CUSPARSE_SPMM_CSR_ALG1 = 4,
CUSPARSE_SPMM_CSR_ALG2 = 6,
} cusparseSpMMAlg_t;
typedef enum {
CUSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministc
CUSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministc
} cusparseCsr2CscAlg_t;
*/
const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT","COOMV_ALG", "CSRMV_ALG1","CSRMV_ALG2", "cusparseSpMVAlg_t","CUSPARSE_",0};
const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT","COO_ALG1","COO_ALG2","COO_ALG3","CSR_ALG1","COO_ALG4","CSR_ALG2","cusparseSpMMAlg_t","CUSPARSE_SPMM_",0};
const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID"/*cusparse does not have enum 0! We created one*/,"ALG1","ALG2","cusparseCsr2CscAlg_t","CUSPARSE_CSR2CSC_",0};
#endif
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat);
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat,PetscScalar,Mat,MatStructure);
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat,PetscScalar);
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec,PetscBool,PetscBool);
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors**);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**);
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**);
static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat,PetscBool);
PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]);
PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode);
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],PetscScalar[]);
PetscErrorCode MatCUSPARSESetStream(Mat A,const cudaStream_t stream)
{
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
cusparsestruct->stream = stream;
stat = cusparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSESetHandle(Mat A,const cusparseHandle_t handle)
{
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
if (cusparsestruct->handle != handle) {
if (cusparsestruct->handle) {
stat = cusparseDestroy(cusparsestruct->handle);CHKERRCUSPARSE(stat);
}
cusparsestruct->handle = handle;
}
stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSEClearHandle(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscBool flg;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg || !cusparsestruct) PetscFunctionReturn(0);
if (cusparsestruct->handle) cusparsestruct->handle = 0;
PetscFunctionReturn(0);
}
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat A,MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSE;
PetscFunctionReturn(0);
}
/*MC
MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices
on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported
algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer
performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the
CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these
algorithms are not recommended. This class does NOT support direct solver operations.
Level: beginner
.seealso: PCFactorSetMatSolverType(), MatSolverType, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B)
{
PetscErrorCode ierr;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr);
ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr);
(*B)->factortype = ftype;
(*B)->useordering = PETSC_TRUE;
ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) {
ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr);
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE;
} else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) {
(*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE;
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types");
ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSPARSE_MULT:
cusparsestruct->format = format;
break;
case MAT_CUSPARSE_ALL:
cusparsestruct->format = format;
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op);
}
PetscFunctionReturn(0);
}
/*@
MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular
operation. Only the MatMult operation can use different GPU storage formats
for MPIAIJCUSPARSE matrices.
Not Collective
Input Parameters:
+ A - Matrix of type SEQAIJCUSPARSE
. op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL.
- format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2)
Output Parameter:
Level: intermediate
.seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
@*/
PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID,1);
ierr = PetscTryMethod(A,"MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A,MatOption op,PetscBool flg)
{
PetscErrorCode ierr;
PetscFunctionBegin;
switch (op) {
case MAT_FORM_EXPLICIT_TRANSPOSE:
/* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */
if (A->form_explicit_transpose && !flg) {ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);}
A->form_explicit_transpose = flg;
break;
default:
ierr = MatSetOption_SeqAIJ(A,op,flg);CHKERRQ(ierr);
break;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS isrow = b->row,iscol = b->col;
PetscBool row_identity,col_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (row_identity && col_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A)
{
PetscErrorCode ierr;
MatCUSPARSEStorageFormat format;
PetscBool flg;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_NONE) {
ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr);}
ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
ierr = PetscOptionsEnum("-mat_cusparse_spmv_alg","sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)",
"cusparseSpMVAlg_t",MatCUSPARSESpMVAlgorithms,(PetscEnum)cusparsestruct->spmvAlg,(PetscEnum*)&cusparsestruct->spmvAlg,&flg);CHKERRQ(ierr);
/* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */
if (flg && CUSPARSE_CSRMV_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
ierr = PetscOptionsEnum("-mat_cusparse_spmm_alg","sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)",
"cusparseSpMMAlg_t",MatCUSPARSESpMMAlgorithms,(PetscEnum)cusparsestruct->spmmAlg,(PetscEnum*)&cusparsestruct->spmmAlg,&flg);CHKERRQ(ierr);
if (flg && CUSPARSE_SPMM_CSR_ALG1 != 4) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMMAlg_t has been changed but PETSc has not been updated accordingly");
ierr = PetscOptionsEnum("-mat_cusparse_csr2csc_alg","sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices",
"cusparseCsr2CscAlg_t",MatCUSPARSECsr2CscAlgorithms,(PetscEnum)cusparsestruct->csr2cscAlg,(PetscEnum*)&cusparsestruct->csr2cscAlg,&flg);CHKERRQ(ierr);
if (flg && CUSPARSE_CSR2CSC_ALG1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly");
#endif
}
ierr = PetscOptionsTail();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
cusparseStatus_t stat;
const PetscInt *ai = a->i,*aj = a->j,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiLo, *AjLo;
PetscInt i,nz, nzLower, offset, rowOffset;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */
nzLower=n+ai[n]-ai[1];
if (!loTriFactor) {
PetscScalar *AALo;
cerr = cudaMallocHost((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the lower triangular matrix */
cerr = cudaMallocHost((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the lower triangular matrix */
AiLo[0] = (PetscInt) 0;
AiLo[n] = nzLower;
AjLo[0] = (PetscInt) 0;
AALo[0] = (MatScalar) 1.0;
v = aa;
vi = aj;
offset = 1;
rowOffset= 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
/* additional 1 for the term on the diagonal */
AiLo[i] = rowOffset;
rowOffset += nz+1;
ierr = PetscArraycpy(&(AjLo[offset]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AALo[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
AjLo[offset] = (PetscInt) i;
AALo[offset] = (MatScalar) 1.0;
offset += 1;
v += nz;
vi += nz;
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = n;
loTriFactor->csrMat->num_cols = n;
loTriFactor->csrMat->num_entries = nzLower;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower);
loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower);
loTriFactor->csrMat->values = new THRUSTARRAY(nzLower);
loTriFactor->csrMat->values->assign(AALo, AALo+nzLower);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
loTriFactor->AA_h = AALo;
cerr = cudaFreeHost(AiLo);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AjLo);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzLower)*sizeof(int)+nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
} else { /* update values only */
if (!loTriFactor->AA_h) {
cerr = cudaMallocHost((void**) &loTriFactor->AA_h, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the lower triangular matrix */
loTriFactor->AA_h[0] = 1.0;
v = aa;
vi = aj;
offset = 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
ierr = PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
loTriFactor->AA_h[offset] = 1.0;
offset += 1;
v += nz;
}
loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h+nzLower);
ierr = PetscLogCpuToGpu(nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
cusparseStatus_t stat;
const PetscInt *aj = a->j,*adiag = a->diag,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiUp, *AjUp;
PetscInt i,nz, nzUpper, offset;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* next, figure out the number of nonzeros in the upper triangular matrix. */
nzUpper = adiag[0]-adiag[n];
if (!upTriFactor) {
PetscScalar *AAUp;
cerr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the upper triangular matrix */
cerr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
vi = aj + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1./v[nz];
AiUp[i] = AiUp[i+1] - (nz+1);
ierr = PetscArraycpy(&(AjUp[offset+1]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset+1]), v, nz);CHKERRQ(ierr);
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = n;
upTriFactor->csrMat->num_cols = n;
upTriFactor->csrMat->num_entries = nzUpper;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper);
upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper);
upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
upTriFactor->AA_h = AAUp;
cerr = cudaFreeHost(AiUp);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AjUp);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzUpper)*sizeof(int)+nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
} else {
if (!upTriFactor->AA_h) {
cerr = cudaMallocHost((void**) &upTriFactor->AA_h, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the upper triangular matrix */
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
upTriFactor->AA_h[offset] = 1./v[nz];
ierr = PetscArraycpy(&(upTriFactor->AA_h[offset+1]), v, nz);CHKERRQ(ierr);
}
upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h+nzUpper);
ierr = PetscLogCpuToGpu(nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS isrow = a->row,iscol = a->icol;
PetscBool row_identity,col_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=a->nz;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
if (!row_identity && !cusparseTriFactors->rpermIndices) {
const PetscInt *r;
ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r+n);
ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
/* upper triangular indices */
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (!col_identity && !cusparseTriFactors->cpermIndices) {
const PetscInt *c;
ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c+n);
ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
cusparseStatus_t stat;
PetscErrorCode ierr;
cudaError_t cerr;
PetscInt *AiUp, *AjUp;
PetscScalar *AAUp;
PetscScalar *AALo;
PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j;
Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data;
const PetscInt *ai = b->i,*aj = b->j,*vj;
const MatScalar *aa = b->a,*v;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
cerr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
if (!upTriFactor && !loTriFactor) {
/* Allocate Space for the upper triangular matrix */
cerr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
vj = aj + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1.0/v[nz];
AiUp[i] = offset;
AALo[offset] = (MatScalar)1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AjUp[offset]), vj, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = A->rmap->n;
upTriFactor->csrMat->num_cols = A->cmap->n;
upTriFactor->csrMat->num_entries = a->nz;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
upTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
/* set the operation */
upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = A->rmap->n;
loTriFactor->csrMat->num_cols = A->cmap->n;
loTriFactor->csrMat->num_entries = a->nz;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
loTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
ierr = PetscLogCpuToGpu(2*(((A->rmap->n+1)+(a->nz))*sizeof(int)+(a->nz)*sizeof(PetscScalar)));CHKERRQ(ierr);
cerr = cudaFreeHost(AiUp);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AjUp);CHKERRCUDA(cerr);
} else {
/* Fill the upper triangular matrix */
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AAUp[offset] = 1.0/v[nz];
AALo[offset] = 1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
if (!upTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
if (!loTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
ierr = PetscLogCpuToGpu(2*(a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
}
cerr = cudaFreeHost(AAUp);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AALo);CHKERRCUDA(cerr);
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS ip = a->row;
PetscBool perm_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=(a->nz-n)*2 + n;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (!perm_identity) {
IS iip;
const PetscInt *irip,*rip;
ierr = ISInvertPermutation(ip,PETSC_DECIDE,&iip);CHKERRQ(ierr);
ierr = ISGetIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(rip, rip+n);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(irip, irip+n);
ierr = ISRestoreIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISDestroy(&iip);CHKERRQ(ierr);
ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#define CHECK_LAUNCH_ERROR() \
do { \
/* Check synchronous errors, i.e. pre-launch */ \
cudaError_t err = cudaGetLastError(); \
if (cudaSuccess != err) { \
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",cudaGetErrorString(err)); \
} \
/* Check asynchronous errors, i.e. kernel failed (ULF) */ \
err = cudaDeviceSynchronize(); \
if (cudaSuccess != err) { \
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",cudaGetErrorString(err)); \
} \
} while (0)
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS ip = b->row;
PetscBool perm_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (perm_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT;
cusparseStatus_t stat;
cusparseIndexBase_t indexBase;
cusparseMatrixType_t matrixType;
cusparseFillMode_t fillMode;
cusparseDiagType_t diagType;
cudaError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
/* allocate space for the transpose of the lower triangular factor */
ierr = PetscNew(&loTriFactorT);CHKERRQ(ierr);
loTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the lower triangular factor */
matrixType = cusparseGetMatType(loTriFactor->descr);
indexBase = cusparseGetMatIndexBase(loTriFactor->descr);
fillMode = cusparseGetMatFillMode(loTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ?
CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(loTriFactor->descr);
/* Create the matrix description */
stat = cusparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = cusparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the lower triangular factor*/
loTriFactorT->csrMat = new CsrMatrix;
loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols;
loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows;
loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries;
loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows+1);
loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries);
loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries);
/* compute the transpose of the lower triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
CUSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactor->csr2cscBuffer,loTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC, indexBase,
CUSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer
#else
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo,
&loTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactorT->solveBuffer,loTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT;
/*********************************************/
/* Now the Transpose of the Upper Tri Factor */
/*********************************************/
/* allocate space for the transpose of the upper triangular factor */
ierr = PetscNew(&upTriFactorT);CHKERRQ(ierr);
upTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the upper triangular factor */
matrixType = cusparseGetMatType(upTriFactor->descr);
indexBase = cusparseGetMatIndexBase(upTriFactor->descr);
fillMode = cusparseGetMatFillMode(upTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ?
CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(upTriFactor->descr);
/* Create the matrix description */
stat = cusparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = cusparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the upper triangular factor*/
upTriFactorT->csrMat = new CsrMatrix;
upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols;
upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows;
upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries;
upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows+1);
upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries);
upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries);
/* compute the transpose of the upper triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle,upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
CUSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactor->csr2cscBuffer,upTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC, indexBase,
CUSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer
#else
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo,
&upTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactorT->solveBuffer,upTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT;
PetscFunctionReturn(0);
}
struct PetscScalarToPetscInt
{
__host__ __device__
PetscInt operator()(PetscScalar s)
{
return (PetscInt)PetscRealPart(s);
}
};
static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTransposeForMult(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
cusparseStatus_t stat;
cusparseIndexBase_t indexBase;
cudaError_t err;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!A->form_explicit_transpose || !A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
if (!matstruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing mat struct");
matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
if (A->transupdated && !matstructT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing matTranspose struct");
if (A->transupdated) PetscFunctionReturn(0);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
if (cusparsestruct->format != MAT_CUSPARSE_CSR) {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
}
if (!cusparsestruct->matTranspose) { /* create cusparse matrix */
matstructT = new Mat_SeqAIJCUSPARSEMultStruct;
stat = cusparseCreateMatDescr(&matstructT->descr);CHKERRCUSPARSE(stat);
indexBase = cusparseGetMatIndexBase(matstruct->descr);
stat = cusparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
/* set alpha and beta */
err = cudaMalloc((void **)&(matstructT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstructT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMemcpy(matstructT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstructT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *matrixT = new CsrMatrix;
matstructT->mat = matrixT;
matrixT->num_rows = A->cmap->n;
matrixT->num_cols = A->rmap->n;
matrixT->num_entries = a->nz;
matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows+1);
matrixT->column_indices = new THRUSTINTARRAY32(a->nz);
matrixT->values = new THRUSTARRAY(a->nz);
if (!cusparsestruct->rowoffsets_gpu) { cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n+1); }
cusparsestruct->rowoffsets_gpu->assign(a->i,a->i+A->rmap->n+1);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCreateCsr(&matstructT->matDescr,
matrixT->num_rows, matrixT->num_cols, matrixT->num_entries,
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(),
matrixT->values->data().get(),
CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */
indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
} else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *temp = new CsrMatrix;
CsrMatrix *tempT = new CsrMatrix;
/* First convert HYB to CSR */
temp->num_rows = A->rmap->n;
temp->num_cols = A->cmap->n;
temp->num_entries = a->nz;
temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
temp->column_indices = new THRUSTINTARRAY32(a->nz);
temp->values = new THRUSTARRAY(a->nz);
stat = cusparse_hyb2csr(cusparsestruct->handle,
matstruct->descr, (cusparseHybMat_t)matstruct->mat,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get());CHKERRCUSPARSE(stat);
/* Next, convert CSR to CSC (i.e. the matrix transpose) */
tempT->num_rows = A->rmap->n;
tempT->num_cols = A->cmap->n;
tempT->num_entries = a->nz;
tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
tempT->column_indices = new THRUSTINTARRAY32(a->nz);
tempT->values = new THRUSTARRAY(a->nz);
stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows,
temp->num_cols, temp->num_entries,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get(),
tempT->values->data().get(),
tempT->column_indices->data().get(),
tempT->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
/* Last, convert CSC to HYB */
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n,
matstructT->descr, tempT->values->data().get(),
tempT->row_offsets->data().get(),
tempT->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstructT->mat = hybMat;
A->transupdated = PETSC_TRUE;
/* delete temporaries */
if (tempT) {
if (tempT->values) delete (THRUSTARRAY*) tempT->values;
if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices;
if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets;
delete (CsrMatrix*) tempT;
}
if (temp) {
if (temp->values) delete (THRUSTARRAY*) temp->values;
if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices;
if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets;
delete (CsrMatrix*) temp;
}
#endif
}
}
if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */
CsrMatrix *matrix = (CsrMatrix*)matstruct->mat;
CsrMatrix *matrixT = (CsrMatrix*)matstructT->mat;
if (!matrix) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix");
if (!matrix->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix rows");
if (!matrix->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix cols");
if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix values");
if (!matrixT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT");
if (!matrixT->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT rows");
if (!matrixT->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT cols");
if (!matrixT->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT values");
if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */
cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusparsestruct->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
if (!cusparsestruct->csr2csc_i) {
THRUSTARRAY csr2csc_a(matrix->num_entries);
PetscStackCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0));
indexBase = cusparseGetMatIndexBase(matstruct->descr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
void *csr2cscBuffer;
size_t csr2cscBufferSize;
stat = cusparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n,
A->cmap->n, matrix->num_entries,
matrix->values->data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, &csr2cscBufferSize);CHKERRCUSPARSE(stat);
err = cudaMalloc(&csr2cscBuffer,csr2cscBufferSize);CHKERRCUDA(err);
#endif
if (matrix->num_entries) {
/* When there are no nonzeros, this routine mistakenly returns CUSPARSE_STATUS_INVALID_VALUE in
mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK.
I checked every parameters and they were just fine. I have no clue why cusparse complains.
Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[]
should be filled with indexBase. So I just take a shortcut here.
*/
stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n,
A->cmap->n,matrix->num_entries,
csr2csc_a.data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, csr2cscBuffer);CHKERRCUSPARSE(stat);
#else
matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
#endif
} else {
matrixT->row_offsets->assign(matrixT->row_offsets->size(),indexBase);
}
cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries);
PetscStackCallThrust(thrust::transform(thrust::device,matrixT->values->begin(),matrixT->values->end(),cusparsestruct->csr2csc_i->begin(),PetscScalarToPetscInt()));
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
err = cudaFree(csr2cscBuffer);CHKERRCUDA(err);
#endif
}
PetscStackCallThrust(thrust::copy(thrust::device,thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()),
thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()),
matrixT->values->begin()));
}
ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* the compressed row indices is not used for matTranspose */
matstructT->cprowIndices = NULL;
/* assign the pointer */
((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT;
A->transupdated = PETSC_TRUE;
PetscFunctionReturn(0);
}
/* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = CUSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
PetscInt n = xx->map->n;
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()),
xGPU);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
xarray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()),
tempGPU->begin());
/* Copy the temporary to the full solution. */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),tempGPU->begin(), tempGPU->end(), xGPU);
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
barray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()),
tempGPU->begin());
/* Next, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,
xarray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Last, reorder with the column permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()),
xGPU);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
barray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Next, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
cudaError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_GPU) {
CsrMatrix *matrix = (CsrMatrix*)cusp->mat->mat;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
cerr = cudaMemcpy(a->a, matrix->values->data().get(), a->nz*sizeof(PetscScalar), cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuToCpu(a->nz*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[])
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
*array = a->a;
A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt m = A->rmap->n,*ii,*ridx,tmp;
PetscErrorCode ierr;
cusparseStatus_t stat;
PetscBool both = PETSC_TRUE;
cudaError_t err;
PetscFunctionBegin;
if (A->boundtocpu) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cannot copy to GPU");
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */
CsrMatrix *matrix;
matrix = (CsrMatrix*)cusparsestruct->mat->mat;
if (a->nz && !a->a) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR values");
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
matrix->values->assign(a->a, a->a+a->nz);
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogCpuToGpu((a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
} else {
PetscInt nnz;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat,cusparsestruct->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
delete cusparsestruct->workVector;
delete cusparsestruct->rowoffsets_gpu;
cusparsestruct->workVector = NULL;
cusparsestruct->rowoffsets_gpu = NULL;
try {
if (a->compressedrow.use) {
m = a->compressedrow.nrows;
ii = a->compressedrow.i;
ridx = a->compressedrow.rindex;
} else {
m = A->rmap->n;
ii = a->i;
ridx = NULL;
}
if (!ii) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR row data");
if (m && !a->j) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR column data");
if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; }
else nnz = a->nz;
/* create cusparse matrix */
cusparsestruct->nrows = m;
matstruct = new Mat_SeqAIJCUSPARSEMultStruct;
stat = cusparseCreateMatDescr(&matstruct->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
err = cudaMalloc((void **)&(matstruct->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstruct->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMemcpy(matstruct->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstruct->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* Build a hybrid/ellpack matrix if this option is chosen for the storage */
if (cusparsestruct->format==MAT_CUSPARSE_CSR) {
/* set the matrix */
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
/* assign the pointer */
matstruct->mat = mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mat->num_rows) { /* cusparse errors on empty matrices! */
stat = cusparseCreateCsr(&matstruct->matDescr,
mat->num_rows, mat->num_cols, mat->num_entries,
mat->row_offsets->data().get(), mat->column_indices->data().get(),
mat->values->data().get(),
CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
CUSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
#endif
} else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols,
matstruct->descr, mat->values->data().get(),
mat->row_offsets->data().get(),
mat->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstruct->mat = hybMat;
if (mat) {
if (mat->values) delete (THRUSTARRAY*)mat->values;
if (mat->column_indices) delete (THRUSTINTARRAY32*)mat->column_indices;
if (mat->row_offsets) delete (THRUSTINTARRAY32*)mat->row_offsets;
delete (CsrMatrix*)mat;
}
#endif
}
/* assign the compressed row indices */
if (a->compressedrow.use) {
cusparsestruct->workVector = new THRUSTARRAY(m);
matstruct->cprowIndices = new THRUSTINTARRAY(m);
matstruct->cprowIndices->assign(ridx,ridx+m);
tmp = m;
} else {
cusparsestruct->workVector = NULL;
matstruct->cprowIndices = NULL;
tmp = 0;
}
ierr = PetscLogCpuToGpu(((m+1)+(a->nz))*sizeof(int)+tmp*sizeof(PetscInt)+(3+(a->nz))*sizeof(PetscScalar));CHKERRQ(ierr);
/* assign the pointer */
cusparsestruct->mat = matstruct;
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
cusparsestruct->nonzerostate = A->nonzerostate;
}
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
struct VecCUDAPlusEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t);
}
};
struct VecCUDAEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<0>(t);
}
};
struct VecCUDAEqualsReverse
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<0>(t) = thrust::get<1>(t);
}
};
struct MatMatCusparse {
PetscBool cisdense;
PetscScalar *Bt;
Mat X;
PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */
PetscLogDouble flops;
CsrMatrix *Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseSpMatDescr_t matSpBDescr;
PetscBool initialized; /* C = alpha op(A) op(B) + beta C */
cusparseDnMatDescr_t matBDescr;
cusparseDnMatDescr_t matCDescr;
PetscInt Blda,Clda; /* Record leading dimensions of B and C here to detect changes*/
size_t mmBufferSize;
void *mmBuffer;
void *mmBuffer2; /* SpGEMM WorkEstimation buffer */
cusparseSpGEMMDescr_t spgemmDesc;
#endif
};
static PetscErrorCode MatDestroy_MatMatCusparse(void *data)
{
PetscErrorCode ierr;
MatMatCusparse *mmdata = (MatMatCusparse *)data;
cudaError_t cerr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseStatus_t stat;
#endif
PetscFunctionBegin;
cerr = cudaFree(mmdata->Bt);CHKERRCUDA(cerr);
delete mmdata->Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mmdata->matSpBDescr) { stat = cusparseDestroySpMat(mmdata->matSpBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->mmBuffer) { cerr = cudaFree(mmdata->mmBuffer);CHKERRCUDA(cerr); }
if (mmdata->mmBuffer2) { cerr = cudaFree(mmdata->mmBuffer2);CHKERRCUDA(cerr); }
if (mmdata->matBDescr) { stat = cusparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->matCDescr) { stat = cusparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); }
if (mmdata->spgemmDesc) { stat = cusparseSpGEMM_destroyDescr(mmdata->spgemmDesc);CHKERRCUSPARSE(stat); }
#endif
ierr = MatDestroy(&mmdata->X);CHKERRQ(ierr);
ierr = PetscFree(data);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat,Mat,Mat,PetscBool,PetscBool);
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n,blda,clda;
PetscBool flg,biscuda;
Mat_SeqAIJCUSPARSE *cusp;
cusparseStatus_t stat;
cusparseOperation_t opA;
const PetscScalar *barray;
PetscScalar *carray;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSEMultStruct *mat;
CsrMatrix *csrmat;
cudaError_t cerr;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
mmdata = (MatMatCusparse*)product->data;
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
/* currently CopyToGpu does not copy if the matrix is bound to CPU
Instead of silently accepting the wrong answer, I prefer to raise the error */
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_PtAP:
mat = cusp->mat;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
if (!A->form_explicit_transpose) {
mat = cusp->mat;
opA = CUSPARSE_OPERATION_TRANSPOSE;
} else {
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
mat = cusp->matTranspose;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
}
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
case MATPRODUCT_RARt:
mat = cusp->mat;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
if (!mat) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csrmat = (CsrMatrix*)mat->mat;
/* if the user passed a CPU matrix, copy the data to the GPU */
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr);
if (!biscuda) {ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);}
ierr = MatDenseCUDAGetArrayRead(B,&barray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDAGetArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(mmdata->X,&clda);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDAGetArrayWrite(C,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE;
/* (re)allcoate mmBuffer if not initialized or LDAs are different */
if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) {
size_t mmBufferSize;
if (mmdata->initialized && mmdata->Blda != blda) {stat = cusparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); mmdata->matBDescr = NULL;}
if (!mmdata->matBDescr) {
stat = cusparseCreateDnMat(&mmdata->matBDescr,B->rmap->n,B->cmap->n,blda,(void*)barray,cusparse_scalartype,CUSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Blda = blda;
}
if (mmdata->initialized && mmdata->Clda != clda) {stat = cusparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); mmdata->matCDescr = NULL;}
if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */
stat = cusparseCreateDnMat(&mmdata->matCDescr,m,n,clda,(void*)carray,cusparse_scalartype,CUSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Clda = clda;
}
if (!mat->matDescr) {
stat = cusparseCreateCsr(&mat->matDescr,
csrmat->num_rows, csrmat->num_cols, csrmat->num_entries,
csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(),
csrmat->values->data().get(),
CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
CUSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
stat = cusparseSpMM_bufferSize(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,&mmBufferSize);CHKERRCUSPARSE(stat);
if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) {
cerr = cudaFree(mmdata->mmBuffer);CHKERRCUDA(cerr);
cerr = cudaMalloc(&mmdata->mmBuffer,mmBufferSize);CHKERRCUDA(cerr);
mmdata->mmBufferSize = mmBufferSize;
}
mmdata->initialized = PETSC_TRUE;
} else {
/* to be safe, always update pointers of the mats */
stat = cusparseSpMatSetValues(mat->matDescr,csrmat->values->data().get());CHKERRCUSPARSE(stat);
stat = cusparseDnMatSetValues(mmdata->matBDescr,(void*)barray);CHKERRCUSPARSE(stat);
stat = cusparseDnMatSetValues(mmdata->matCDescr,(void*)carray);CHKERRCUSPARSE(stat);
}
/* do cusparseSpMM, which supports transpose on B */
stat = cusparseSpMM(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,mmdata->mmBuffer);CHKERRCUSPARSE(stat);
#else
PetscInt k;
/* cusparseXcsrmm does not support transpose on B */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
cublasHandle_t cublasv2handle;
cublasStatus_t cerr;
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
cerr = cublasXgeam(cublasv2handle,CUBLAS_OP_T,CUBLAS_OP_T,
B->cmap->n,B->rmap->n,
&PETSC_CUSPARSE_ONE ,barray,blda,
&PETSC_CUSPARSE_ZERO,barray,blda,
mmdata->Bt,B->cmap->n);CHKERRCUBLAS(cerr);
blda = B->cmap->n;
k = B->cmap->n;
} else {
k = B->rmap->n;
}
/* perform the MatMat operation, op(A) is m x k, op(B) is k x n */
stat = cusparse_csr_spmm(cusp->handle,opA,m,n,k,
csrmat->num_entries,mat->alpha_one,mat->descr,
csrmat->values->data().get(),
csrmat->row_offsets->data().get(),
csrmat->column_indices->data().get(),
mmdata->Bt ? mmdata->Bt : barray,blda,mat->beta_zero,
carray,clda);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(n*2.0*csrmat->num_entries);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(B,&barray);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
} else if (product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDARestoreArrayWrite(C,&carray);CHKERRQ(ierr);
}
if (mmdata->cisdense) {
ierr = MatConvert(C,MATSEQDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr);
}
if (!biscuda) {
ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n;
PetscBool cisdense,flg;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSE *cusp;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (cusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
switch (product->type) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
break;
case MATPRODUCT_PtAP:
m = B->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_RARt:
m = B->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
/* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQDENSE,&cisdense);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQDENSECUDA);CHKERRQ(ierr);
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
mmdata->cisdense = cisdense;
#if PETSC_PKG_CUDA_VERSION_LT(11,0,0)
/* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
cudaError_t cerr = cudaMalloc((void**)&mmdata->Bt,(size_t)B->rmap->n*(size_t)B->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
#endif
/* for these products we need intermediate storage */
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatCreate(PetscObjectComm((PetscObject)C),&mmdata->X);CHKERRQ(ierr);
ierr = MatSetType(mmdata->X,MATSEQDENSECUDA);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->rmap->n,A->rmap->n,B->rmap->n);CHKERRQ(ierr);
} else {
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->cmap->n,A->rmap->n,B->cmap->n);CHKERRQ(ierr);
}
}
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscBool flg;
PetscErrorCode ierr;
cusparseStatus_t stat;
cudaError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseSpMatDescr_t BmatSpDescr;
#endif
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for C of type %s",((PetscObject)C)->type_name);
mmdata = (MatMatCusparse*)C->product->data;
A = product->A;
B = product->B;
if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */
mmdata->reusesym = PETSC_FALSE;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
Cmat = Ccusp->mat;
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[C->product->type]);
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct");
goto finalize;
}
if (!c->nz) goto finalize;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name);
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
if (B->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
switch (ptype) {
case MATPRODUCT_AB:
Amat = Acusp->mat;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_AtB:
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_ABt:
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
Cmat = Ccusp->mat;
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix*)Bmat->mat; /* B may be in compressed row storage */
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct");
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */
stat = cusparseSpGEMM_compute(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
stat = cusparseSpGEMM_copy(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#else
stat = cusparse_csr_spgemm(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
C->offloadmask = PETSC_OFFLOAD_GPU;
finalize:
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(C,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",C->rmap->n,C->cmap->n,c->nz);CHKERRQ(ierr);
ierr = PetscInfo(C,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(C,"Maximum nonzeros in any row is %D\n",c->rmax);CHKERRQ(ierr);
c->reallocs = 0;
C->info.mallocs += 0;
C->info.nz_unneeded = 0;
C->assembled = C->was_assembled = PETSC_TRUE;
C->num_ass++;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *a,*b,*c;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt i,j,m,n,k;
PetscBool flg;
PetscErrorCode ierr;
cusparseStatus_t stat;
cudaError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
PetscLogDouble flops;
PetscBool biscompressed,ciscompressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
int64_t C_num_rows1, C_num_cols1, C_nnz1;
size_t bufSize2;
cusparseSpMatDescr_t BmatSpDescr;
#else
int cnz;
#endif
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name);
a = (Mat_SeqAIJ*)A->data;
b = (Mat_SeqAIJ*)B->data;
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
biscompressed = PETSC_FALSE;
ciscompressed = PETSC_FALSE;
switch (ptype) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
k = A->cmap->n;
Amat = Acusp->mat;
Bmat = Bcusp->mat;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
k = A->rmap->n;
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
k = A->cmap->n;
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr);
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
/* create cusparse matrix */
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)C->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
c->compressedrow.use = ciscompressed;
if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */
c->compressedrow.nrows = a->compressedrow.nrows;
ierr = PetscMalloc2(c->compressedrow.nrows+1,&c->compressedrow.i,c->compressedrow.nrows,&c->compressedrow.rindex);CHKERRQ(ierr);
ierr = PetscArraycpy(c->compressedrow.rindex,a->compressedrow.rindex,c->compressedrow.nrows);CHKERRQ(ierr);
Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices->assign(c->compressedrow.rindex,c->compressedrow.rindex + c->compressedrow.nrows);
} else {
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Cmat->cprowIndices = NULL;
}
Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = Ccusp->nrows;
Ccsr->num_cols = n;
Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows+1);
stat = cusparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */
thrust::fill(thrust::device,Ccsr->row_offsets->begin(),Ccsr->row_offsets->end(),0);
c->nz = 0;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
goto finalizesym;
}
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
if (!biscompressed) {
Bcsr = (CsrMatrix*)Bmat->mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = Bmat->matDescr;
#endif
} else { /* we need to use row offsets for the full matrix */
CsrMatrix *cBcsr = (CsrMatrix*)Bmat->mat;
Bcsr = new CsrMatrix;
Bcsr->num_rows = B->rmap->n;
Bcsr->num_cols = cBcsr->num_cols;
Bcsr->num_entries = cBcsr->num_entries;
Bcsr->column_indices = cBcsr->column_indices;
Bcsr->values = cBcsr->values;
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Bcsr->row_offsets = Bcusp->rowoffsets_gpu;
mmdata->Bcsr = Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (Bcsr->num_rows && Bcsr->num_cols) {
stat = cusparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries,
Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Bcsr->values->data().get(),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
}
BmatSpDescr = mmdata->matSpBDescr;
#endif
}
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct");
/* precompute flops count */
if (ptype == MATPRODUCT_AB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt st = a->i[i];
const PetscInt en = a->i[i+1];
for (j=st; j<en; j++) {
const PetscInt brow = a->j[j];
flops += 2.*(b->i[brow+1] - b->i[brow]);
}
}
} else if (ptype == MATPRODUCT_AtB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt anzi = a->i[i+1] - a->i[i];
const PetscInt bnzi = b->i[i+1] - b->i[i];
flops += (2.*anzi)*bnzi;
}
} else { /* TODO */
flops = 0.;
}
mmdata->flops = flops;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0,
NULL, NULL, NULL,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = cusparseSpGEMM_createDescr(&mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
/* ask bufferSize bytes for external memory */
stat = cusparseSpGEMM_workEstimation(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, NULL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void**) &mmdata->mmBuffer2, bufSize2);CHKERRCUDA(cerr);
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = cusparseSpGEMM_workEstimation(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);CHKERRCUSPARSE(stat);
/* ask bufferSize again bytes for external memory */
stat = cusparseSpGEMM_compute(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);CHKERRCUSPARSE(stat);
/* The CUSPARSE documentation is not clear, nor the API
We need both buffers to perform the operations properly!
mmdata->mmBuffer2 does not appear anywhere in the compute/copy API
it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address
is stored in the descriptor! What a messy API... */
cerr = cudaMalloc((void**) &mmdata->mmBuffer, mmdata->mmBufferSize);CHKERRCUDA(cerr);
/* compute the intermediate product of A * B */
stat = cusparseSpGEMM_compute(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
/* get matrix C non-zero entries C_nnz1 */
stat = cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat);
c->nz = (PetscInt) C_nnz1;
ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufSize2/1024,mmdata->mmBufferSize/1024);CHKERRQ(ierr);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(),
Ccsr->values->data().get());CHKERRCUSPARSE(stat);
stat = cusparseSpGEMM_copy(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
stat = cusparseXcsrgemmNnz(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);CHKERRCUSPARSE(stat);
c->nz = cnz;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only.
I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when
D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */
stat = cusparse_csr_spgemm(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalizesym:
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
PetscInt *d_i = c->i;
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = cudaMemcpy(d_i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
PetscInt *d_i = c->i;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = cudaMemcpy(d_i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (ciscompressed) { /* need to expand host row offsets */
PetscInt r = 0;
c->i[0] = 0;
for (k = 0; k < c->compressedrow.nrows; k++) {
const PetscInt next = c->compressedrow.rindex[k];
const PetscInt old = c->compressedrow.i[k];
for (; r < next; r++) c->i[r+1] = old;
}
for (; r < m; r++) c->i[r+1] = c->compressedrow.i[c->compressedrow.nrows];
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (k = 0; k < m; k++) {
const PetscInt nn = c->i[k+1] - c->i[k];
c->ilen[k] = c->imax[k] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
Ccsr->num_entries = c->nz;
C->nonzerostate++;
ierr = PetscLayoutSetUp(C->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(C->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = C->nonzerostate;
C->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
C->preallocated = PETSC_TRUE;
C->assembled = PETSC_FALSE;
C->was_assembled = PETSC_FALSE;
if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */
mmdata->reusesym = PETSC_TRUE;
C->offloadmask = PETSC_OFFLOAD_GPU;
}
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat);
/* handles sparse or dense B */
static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat)
{
Mat_Product *product = mat->product;
PetscErrorCode ierr;
PetscBool isdense = PETSC_FALSE,Biscusp = PETSC_FALSE,Ciscusp = PETSC_TRUE;
PetscFunctionBegin;
MatCheckProduct(mat,1);
ierr = PetscObjectBaseTypeCompare((PetscObject)product->B,MATSEQDENSE,&isdense);CHKERRQ(ierr);
if (!product->A->boundtocpu && !product->B->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->B,MATSEQAIJCUSPARSE,&Biscusp);CHKERRQ(ierr);
}
if (product->type == MATPRODUCT_ABC) {
Ciscusp = PETSC_FALSE;
if (!product->C->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->C,MATSEQAIJCUSPARSE,&Ciscusp);CHKERRQ(ierr);
}
}
if (isdense) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
if (product->A->boundtocpu) {
ierr = MatProductSetFromOptions_SeqAIJ_SeqDense(mat);CHKERRQ(ierr);
} else {
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA;
}
break;
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else if (Biscusp && Ciscusp) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
break;
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else { /* fallback for AIJ */
ierr = MatProductSetFromOptions_SeqAIJ(mat);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy, Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
__global__ static void ScatterAdd(PetscInt n, PetscInt *idx,const PetscScalar *x,PetscScalar *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[idx[i]] += x[i];
}
/* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans,PetscBool herm)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct;
PetscScalar *xarray,*zarray,*dptr,*beta,*xptr;
PetscErrorCode ierr;
cudaError_t cerr;
cusparseStatus_t stat;
cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
PetscBool compressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
PetscInt nx,ny;
#endif
PetscFunctionBegin;
if (herm && !trans) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Hermitian and not transpose not supported");
if (!a->nonzerorowcnt) {
if (!yy) {ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);}
else {ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);}
PetscFunctionReturn(0);
}
/* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!trans) {
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
if (!matstruct) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"SeqAIJCUSPARSE does not have a 'mat' (need to fix)");
} else {
if (herm || !A->form_explicit_transpose) {
opA = herm ? CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE;
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
} else {
if (!cusparsestruct->matTranspose) {ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);}
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
}
}
/* Does the matrix use compressed rows (i.e., drop zero rows)? */
compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE;
try {
ierr = VecCUDAGetArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);} /* read & write zz, so need to get uptodate zarray on GPU */
else {ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr);} /* write zz, so no need to init zarray on GPU */
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) {
/* z = A x + beta y.
If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax.
When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call.
*/
xptr = xarray;
dptr = compressed ? cusparsestruct->workVector->data().get() : zarray;
beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is
allocated to accommodate different uses. So we get the length info directly from mat.
*/
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_cols;
ny = mat->num_rows;
}
#endif
} else {
/* z = A^T x + beta y
If A is compressed, then we need a work vector as the shorter version of x to compute A^T x.
Note A^Tx is of full length, so we set beta to 1.0 if y exists.
*/
xptr = compressed ? cusparsestruct->workVector->data().get() : xarray;
dptr = zarray;
beta = yy ? matstruct->beta_one : matstruct->beta_zero;
if (compressed) { /* Scatter x to work vector */
thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray);
thrust::for_each(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAEqualsReverse());
}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_rows;
ny = mat->num_cols;
}
#endif
}
/* csr_spmv does y = alpha op(A) x + beta y */
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (opA < 0 || opA > 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE ABI on cusparseOperation_t has changed and PETSc has not been updated accordingly");
if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */
stat = cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr,nx,xptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr,ny,dptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = cusparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one,
matstruct->matDescr,
matstruct->cuSpMV[opA].vecXDescr, beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
&matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&matstruct->cuSpMV[opA].spmvBuffer,matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUDA(cerr);
matstruct->cuSpMV[opA].initialized = PETSC_TRUE;
} else {
/* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */
stat = cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr,xptr);CHKERRCUSPARSE(stat);
stat = cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr,dptr);CHKERRCUSPARSE(stat);
}
stat = cusparseSpMV(cusparsestruct->handle, opA,
matstruct->alpha_one,
matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTransposeForMult() */
matstruct->cuSpMV[opA].vecXDescr,
beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
matstruct->cuSpMV[opA].spmvBuffer);CHKERRCUSPARSE(stat);
#else
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
stat = cusparse_csr_spmv(cusparsestruct->handle, opA,
mat->num_rows, mat->num_cols,
mat->num_entries, matstruct->alpha_one, matstruct->descr,
mat->values->data().get(), mat->row_offsets->data().get(),
mat->column_indices->data().get(), xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
} else {
if (cusparsestruct->nrows) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat;
stat = cusparse_hyb_spmv(cusparsestruct->handle, opA,
matstruct->alpha_one, matstruct->descr, hybMat,
xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
}
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) {
if (yy) { /* MatMultAdd: zz = A*xx + yy */
if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */
ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); /* zz = yy */
} else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
} else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */
ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);
}
/* ScatterAdd the result from work vector into the full vector when A is compressed */
if (compressed) {
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registerred)
and in the destructor of the scope, it will call cudaStreamSynchronize() on this stream. One has to store all events to
prevent that. So I just add a ScatterAdd kernel.
*/
#if 0
thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray);
thrust::async::for_each(thrust::cuda::par.on(cusparsestruct->stream),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAPlusEquals());
#else
PetscInt n = matstruct->cprowIndices->size();
ScatterAdd<<<(n+255)/256,256,0,PetscDefaultCudaStream>>>(n,matstruct->cprowIndices->data().get(),cusparsestruct->workVector->data().get(),zarray);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
} else {
if (yy && yy != zz) {
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
}
ierr = VecCUDARestoreArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);}
else {ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr);}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
if (yy) {
ierr = PetscLogGpuFlops(2.0*a->nz);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(2.0*a->nz-a->nonzerorowcnt);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode)
{
PetscErrorCode ierr;
PetscSplitCSRDataStructure *d_mat = NULL;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat;
}
ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); // this does very little if assembled on GPU - call it?
if (mode == MAT_FLUSH_ASSEMBLY || A->boundtocpu) PetscFunctionReturn(0);
if (d_mat) {
A->offloadmask = PETSC_OFFLOAD_GPU;
}
PetscFunctionReturn(0);
}
/* --------------------------------------------------------------------------------*/
/*@
MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter nz (or the array nnz). By setting these parameters accurately,
performance during matrix assembly can be increased by more than a factor of 50.
Collective
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows)
- nnz - array containing the number of nonzeros in the various rows
(possibly different for each row) or NULL
Output Parameter:
. A - the matrix
It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
MatXXXXSetPreallocation() paradgm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
Notes:
If nnz is given then nz is ignored
The AIJ format (also called the Yale sparse matrix format or
compressed row storage), is fully compatible with standard Fortran 77
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero. See the users' manual for details.
Specify the preallocated storage with either nz or nnz (not both).
Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
allocation. For large problems you MUST preallocate memory or you
will get TERRIBLE performance, see the users' manual chapter on matrices.
By default, this format uses inodes (identical nodes) when possible, to
improve numerical efficiency of matrix-vector products and solves. We
search for consecutive rows with the same nonzero structure, thereby
reusing matrix information to achieve increased efficiency.
Level: intermediate
.seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE
@*/
PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscSplitCSRDataStructure *d_mat = NULL;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat;
((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat = NULL;
ierr = MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr);
}
if (d_mat) {
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
cudaError_t err;
PetscSplitCSRDataStructure h_mat;
ierr = PetscInfo(A,"Have device matrix\n");CHKERRQ(ierr);
err = cudaMemcpy( &h_mat, d_mat, sizeof(PetscSplitCSRDataStructure), cudaMemcpyDeviceToHost);CHKERRCUDA(err);
if (a->compressedrow.use) {
err = cudaFree(h_mat.diag.i);CHKERRCUDA(err);
}
err = cudaFree(d_mat);CHKERRCUDA(err);
}
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatFactorGetSolverType_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat,MatType,MatReuse,Mat*);
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat,PetscBool);
static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A,MatDuplicateOption cpvalues,Mat *B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatDuplicate_SeqAIJ(A,cpvalues,B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y,PetscScalar a,Mat X,MatStructure str)
{
PetscErrorCode ierr;
Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data,*y = (Mat_SeqAIJ*)Y->data;
Mat_SeqAIJCUSPARSE *cy;
Mat_SeqAIJCUSPARSE *cx;
PetscScalar *ay;
const PetscScalar *ax;
CsrMatrix *csry,*csrx;
cudaError_t cerr;
PetscFunctionBegin;
cy = (Mat_SeqAIJCUSPARSE*)Y->spptr;
cx = (Mat_SeqAIJCUSPARSE*)X->spptr;
if (X->ops->axpy != Y->ops->axpy) {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr);
ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* if we are here, it means both matrices are bound to GPU */
ierr = MatSeqAIJCUSPARSECopyToGPU(Y);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(X);CHKERRQ(ierr);
if (cy->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)Y),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported");
if (cx->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported");
csry = (CsrMatrix*)cy->mat->mat;
csrx = (CsrMatrix*)cx->mat->mat;
/* see if we can turn this into a cublas axpy */
if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) {
bool eq = thrust::equal(thrust::device,csry->row_offsets->begin(),csry->row_offsets->end(),csrx->row_offsets->begin());
if (eq) {
eq = thrust::equal(thrust::device,csry->column_indices->begin(),csry->column_indices->end(),csrx->column_indices->begin());
}
if (eq) str = SAME_NONZERO_PATTERN;
}
/* spgeam is buggy with one column */
if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN;
if (str == SUBSET_NONZERO_PATTERN) {
cusparseStatus_t stat;
PetscScalar b = 1.0;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
size_t bufferSize;
void *buffer;
#endif
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
stat = cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparse_csr_spgeam_bufferSize(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),&bufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&buffer,bufferSize);CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),buffer);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
cerr = cudaFree(buffer);CHKERRCUDA(cerr);
#else
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get());CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
stat = cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else if (str == SAME_NONZERO_PATTERN) {
cublasHandle_t cublasv2handle;
cublasStatus_t berr;
PetscBLASInt one = 1, bnz = 1;
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXaxpy(cublasv2handle,bnz,&a,ax,one,ay,one);CHKERRCUBLAS(berr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(2.0*bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr);
ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y,PetscScalar a)
{
PetscErrorCode ierr;
Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data;
PetscScalar *ay;
cudaError_t cerr;
cublasHandle_t cublasv2handle;
cublasStatus_t berr;
PetscBLASInt one = 1, bnz = 1;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscBLASIntCast(y->nz,&bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXscal(cublasv2handle,bnz,&a,ay,one);CHKERRCUBLAS(berr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscBool both = PETSC_FALSE;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (spptr->mat) {
CsrMatrix* matrix = (CsrMatrix*)spptr->mat->mat;
if (matrix->values) {
both = PETSC_TRUE;
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
if (spptr->matTranspose) {
CsrMatrix* matrix = (CsrMatrix*)spptr->matTranspose->mat;
if (matrix->values) {
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
}
//ierr = MatZeroEntries_SeqAIJ(A);CHKERRQ(ierr);
ierr = PetscArrayzero(a->a,a->i[A->rmap->n]);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(A);CHKERRQ(ierr);
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
else A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A,PetscBool flg)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype != MAT_FACTOR_NONE) PetscFunctionReturn(0);
if (flg) {
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
A->ops->scale = MatScale_SeqAIJ;
A->ops->axpy = MatAXPY_SeqAIJ;
A->ops->zeroentries = MatZeroEntries_SeqAIJ;
A->ops->mult = MatMult_SeqAIJ;
A->ops->multadd = MatMultAdd_SeqAIJ;
A->ops->multtranspose = MatMultTranspose_SeqAIJ;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ;
A->ops->multhermitiantranspose = NULL;
A->ops->multhermitiantransposeadd = NULL;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJ);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
} else {
A->ops->scale = MatScale_SeqAIJCUSPARSE;
A->ops->axpy = MatAXPY_SeqAIJCUSPARSE;
A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE;
A->ops->mult = MatMult_SeqAIJCUSPARSE;
A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE;
A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE;
A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE;
A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",MatSeqAIJCopySubArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
}
A->boundtocpu = flg;
a->inode.use = flg;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType mtype, MatReuse reuse, Mat* newmat)
{
PetscErrorCode ierr;
cusparseStatus_t stat;
Mat B;
PetscFunctionBegin;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); /* first use of CUSPARSE may be via MatConvert */
if (reuse == MAT_INITIAL_MATRIX) {
ierr = MatDuplicate(A,MAT_COPY_VALUES,newmat);CHKERRQ(ierr);
} else if (reuse == MAT_REUSE_MATRIX) {
ierr = MatCopy(A,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
}
B = *newmat;
ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr);
if (reuse != MAT_REUSE_MATRIX && !B->spptr) {
if (B->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
stat = cusparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
stat = cusparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat);
spptr->format = MAT_CUSPARSE_CSR;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
spptr->spmvAlg = CUSPARSE_CSRMV_ALG1; /* default, since we only support csr */
spptr->spmmAlg = CUSPARSE_SPMM_CSR_ALG1; /* default, only support column-major dense matrix B */
spptr->csr2cscAlg = CUSPARSE_CSR2CSC_ALG1;
#endif
B->spptr = spptr;
} else {
Mat_SeqAIJCUSPARSETriFactors *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
stat = cusparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
stat = cusparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat);
B->spptr = spptr;
}
B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE;
B->ops->destroy = MatDestroy_SeqAIJCUSPARSE;
B->ops->setoption = MatSetOption_SeqAIJCUSPARSE;
B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE;
B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE;
B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE;
ierr = MatBindToCPU_SeqAIJCUSPARSE(B,PETSC_FALSE);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*MC
MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either
CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later.
All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library.
Options Database Keys:
+ -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions()
. -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
- -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
Level: beginner
.seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat,MatFactorType,Mat*);
PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse_band);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct)
{
PetscErrorCode ierr;
cusparseStatus_t stat;
PetscFunctionBegin;
if (*cusparsestruct) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format);CHKERRQ(ierr);
delete (*cusparsestruct)->workVector;
delete (*cusparsestruct)->rowoffsets_gpu;
delete (*cusparsestruct)->cooPerm;
delete (*cusparsestruct)->cooPerm_a;
delete (*cusparsestruct)->csr2csc_i;
if ((*cusparsestruct)->handle) {stat = cusparseDestroy((*cusparsestruct)->handle);CHKERRCUSPARSE(stat);}
ierr = PetscFree(*cusparsestruct);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat)
{
PetscFunctionBegin;
if (*mat) {
delete (*mat)->values;
delete (*mat)->column_indices;
delete (*mat)->row_offsets;
delete *mat;
*mat = 0;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor)
{
cusparseStatus_t stat;
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactor) {
if ((*trifactor)->descr) { stat = cusparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSPARSE(stat); }
if ((*trifactor)->solveInfo) { stat = cusparse_destroy_analysis_info((*trifactor)->solveInfo);CHKERRCUSPARSE(stat); }
ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr);
if ((*trifactor)->solveBuffer) {cudaError_t cerr = cudaFree((*trifactor)->solveBuffer);CHKERRCUDA(cerr);}
if ((*trifactor)->AA_h) {cudaError_t cerr = cudaFreeHost((*trifactor)->AA_h);CHKERRCUDA(cerr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if ((*trifactor)->csr2cscBuffer) {cudaError_t cerr = cudaFree((*trifactor)->csr2cscBuffer);CHKERRCUDA(cerr);}
#endif
ierr = PetscFree(*trifactor);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format)
{
CsrMatrix *mat;
cusparseStatus_t stat;
cudaError_t err;
PetscFunctionBegin;
if (*matstruct) {
if ((*matstruct)->mat) {
if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat;
stat = cusparseDestroyHybMat(hybMat);CHKERRCUSPARSE(stat);
#endif
} else {
mat = (CsrMatrix*)(*matstruct)->mat;
CsrMatrix_Destroy(&mat);
}
}
if ((*matstruct)->descr) { stat = cusparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSPARSE(stat); }
delete (*matstruct)->cprowIndices;
if ((*matstruct)->alpha_one) { err=cudaFree((*matstruct)->alpha_one);CHKERRCUDA(err); }
if ((*matstruct)->beta_zero) { err=cudaFree((*matstruct)->beta_zero);CHKERRCUDA(err); }
if ((*matstruct)->beta_one) { err=cudaFree((*matstruct)->beta_one);CHKERRCUDA(err); }
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct;
if (mdata->matDescr) {stat = cusparseDestroySpMat(mdata->matDescr);CHKERRCUSPARSE(stat);}
for (int i=0; i<3; i++) {
if (mdata->cuSpMV[i].initialized) {
err = cudaFree(mdata->cuSpMV[i].spmvBuffer);CHKERRCUDA(err);
stat = cusparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr);CHKERRCUSPARSE(stat);
stat = cusparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr);CHKERRCUSPARSE(stat);
}
}
#endif
delete *matstruct;
*matstruct = NULL;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors** trifactors)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose);CHKERRQ(ierr);
delete (*trifactors)->rpermIndices;
delete (*trifactors)->cpermIndices;
delete (*trifactors)->workVector;
(*trifactors)->rpermIndices = NULL;
(*trifactors)->cpermIndices = NULL;
(*trifactors)->workVector = NULL;
if ((*trifactors)->a_band_d) {cudaError_t cerr = cudaFree((*trifactors)->a_band_d);CHKERRCUDA(cerr);}
if ((*trifactors)->i_band_d) {cudaError_t cerr = cudaFree((*trifactors)->i_band_d);CHKERRCUDA(cerr);}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors)
{
PetscErrorCode ierr;
cusparseHandle_t handle;
cusparseStatus_t stat;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSETriFactors_Reset(trifactors);CHKERRQ(ierr);
if (handle = (*trifactors)->handle) {
stat = cusparseDestroy(handle);CHKERRCUSPARSE(stat);
}
ierr = PetscFree(*trifactors);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
struct IJCompare
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct IJEqual
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false;
return true;
}
};
struct IJDiff
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1 == t2 ? 0 : 1;
}
};
struct IJSum
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1||t2;
}
};
#include <thrust/iterator/discard_iterator.h>
PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
THRUSTARRAY *cooPerm_v = NULL;
thrust::device_ptr<const PetscScalar> d_v;
CsrMatrix *matrix;
PetscErrorCode ierr;
cudaError_t cerr;
PetscInt n;
PetscFunctionBegin;
if (!cusp) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE struct");
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE CsrMatrix");
if (!cusp->cooPerm) {
ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
matrix = (CsrMatrix*)cusp->mat->mat;
if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
if (!v) {
if (imode == INSERT_VALUES) thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
goto finalize;
}
n = cusp->cooPerm->size();
if (isCudaMem(v)) {
d_v = thrust::device_pointer_cast(v);
} else {
cooPerm_v = new THRUSTARRAY(n);
cooPerm_v->assign(v,v+n);
d_v = cooPerm_v->data();
ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */
if (cusp->cooPerm_a) {
THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size());
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),cooPerm_w->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
thrust::transform(cooPerm_w->begin(),cooPerm_w->end(),matrix->values->begin(),matrix->values->begin(),thrust::plus<PetscScalar>());
delete cooPerm_w;
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAPlusEquals());
}
} else {
if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),matrix->values->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAEquals());
}
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalize:
delete cooPerm_v;
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(A,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",A->rmap->n,A->cmap->n,a->nz);CHKERRQ(ierr);
ierr = PetscInfo(A,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(A,"Maximum nonzeros in any row is %D\n",a->rmax);CHKERRQ(ierr);
a->reallocs = 0;
A->info.mallocs += 0;
A->info.nz_unneeded = 0;
A->assembled = A->was_assembled = PETSC_TRUE;
A->num_ass++;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (!cusp) PetscFunctionReturn(0);
if (destroy) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr);
delete cusp->csr2csc_i;
cusp->csr2csc_i = NULL;
}
A->transupdated = PETSC_FALSE;
PetscFunctionReturn(0);
}
#include <thrust/binary_search.h>
PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[])
{
PetscErrorCode ierr;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt cooPerm_n, nzr = 0;
cudaError_t cerr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr);
cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0;
if (n != cooPerm_n) {
delete cusp->cooPerm;
delete cusp->cooPerm_a;
cusp->cooPerm = NULL;
cusp->cooPerm_a = NULL;
}
if (n) {
THRUSTINTARRAY d_i(n);
THRUSTINTARRAY d_j(n);
THRUSTINTARRAY ii(A->rmap->n);
if (!cusp->cooPerm) { cusp->cooPerm = new THRUSTINTARRAY(n); }
if (!cusp->cooPerm_a) { cusp->cooPerm_a = new THRUSTINTARRAY(n); }
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
d_i.assign(coo_i,coo_i+n);
d_j.assign(coo_j,coo_j+n);
auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin()));
auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end()));
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0);
thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare());
*cusp->cooPerm_a = d_i;
THRUSTINTARRAY w = d_j;
auto nekey = thrust::unique(fkey, ekey, IJEqual());
if (nekey == ekey) { /* all entries are unique */
delete cusp->cooPerm_a;
cusp->cooPerm_a = NULL;
} else { /* I couldn't come up with a more elegant algorithm */
adjacent_difference(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),IJDiff());
adjacent_difference(w.begin(),w.end(),w.begin(),IJDiff());
(*cusp->cooPerm_a)[0] = 0;
w[0] = 0;
thrust::transform(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),w.begin(),cusp->cooPerm_a->begin(),IJSum());
thrust::inclusive_scan(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),thrust::plus<PetscInt>());
}
thrust::counting_iterator<PetscInt> search_begin(0);
thrust::upper_bound(d_i.begin(), nekey.get_iterator_tuple().get<0>(),
search_begin, search_begin + A->rmap->n,
ii.begin());
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqXAIJFreeAIJ(A,&a->a,&a->j,&a->i);CHKERRQ(ierr);
a->singlemalloc = PETSC_FALSE;
a->free_a = PETSC_TRUE;
a->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(A->rmap->n+1,&a->i);CHKERRQ(ierr);
a->i[0] = 0;
cerr = cudaMemcpy(a->i+1,ii.data().get(),A->rmap->n*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
a->nz = a->maxnz = a->i[A->rmap->n];
a->rmax = 0;
ierr = PetscMalloc1(a->nz,&a->a);CHKERRQ(ierr);
ierr = PetscMalloc1(a->nz,&a->j);CHKERRQ(ierr);
cerr = cudaMemcpy(a->j,d_j.data().get(),a->nz*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
if (!a->ilen) { ierr = PetscMalloc1(A->rmap->n,&a->ilen);CHKERRQ(ierr); }
if (!a->imax) { ierr = PetscMalloc1(A->rmap->n,&a->imax);CHKERRQ(ierr); }
for (PetscInt i = 0; i < A->rmap->n; i++) {
const PetscInt nnzr = a->i[i+1] - a->i[i];
nzr += (PetscInt)!!(nnzr);
a->ilen[i] = a->imax[i] = nnzr;
a->rmax = PetscMax(a->rmax,nnzr);
}
a->nonzerorowcnt = nzr;
A->preallocated = PETSC_TRUE;
ierr = PetscLogGpuToCpu((A->rmap->n+a->nz)*sizeof(PetscInt));CHKERRQ(ierr);
ierr = MatMarkDiagonal_SeqAIJ(A);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJSetPreallocation(A,0,NULL);CHKERRQ(ierr);
}
ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
/* We want to allocate the CUSPARSE struct for matvec now.
The code is so convoluted now that I prefer to copy zeros */
ierr = PetscArrayzero(a->a,a->nz);CHKERRQ(ierr);
ierr = MatCheckCompressedRow(A,nzr,&a->compressedrow,a->i,A->rmap->n,0.6);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_CPU;
A->nonzerostate++;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
A->assembled = PETSC_FALSE;
A->was_assembled = PETSC_FALSE;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar** a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
*a = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
struct IJCompare4
{
__host__ __device__
inline bool operator() (const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct Shift
{
int _shift;
Shift(int shift) : _shift(shift) {}
__host__ __device__
inline int operator() (const int &c)
{
return c + _shift;
}
};
/* merges to SeqAIJCUSPARSE matrices, [A';B']' operation in matlab notation */
PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A,Mat B,MatReuse reuse,Mat* C)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data, *b = (Mat_SeqAIJ*)B->data, *c;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr, *Ccusp;
Mat_SeqAIJCUSPARSEMultStruct *Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt Annz,Bnnz;
cusparseStatus_t stat;
PetscInt i,m,n,zero = 0;
cudaError_t cerr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidHeaderSpecific(B,MAT_CLASSID,2);
PetscValidPointer(C,4);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
PetscCheckTypeName(B,MATSEQAIJCUSPARSE);
if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",A->rmap->n,B->rmap->n);
if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_INPLACE_MATRIX not supported");
if (Acusp->format == MAT_CUSPARSE_ELL || Acusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Bcusp->format == MAT_CUSPARSE_ELL || Bcusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (reuse == MAT_INITIAL_MATRIX) {
m = A->rmap->n;
n = A->cmap->n + B->cmap->n;
ierr = MatCreate(PETSC_COMM_SELF,C);CHKERRQ(ierr);
ierr = MatSetSizes(*C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)(*C)->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
Cmat->cprowIndices = NULL;
c->compressedrow.use = PETSC_FALSE;
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Ccusp->nrows = m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = m;
Ccsr->num_cols = n;
stat = cusparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Annz = (PetscInt)Acsr->column_indices->size();
Bnnz = (PetscInt)Bcsr->column_indices->size();
c->nz = Annz + Bnnz;
Ccsr->row_offsets = new THRUSTINTARRAY32(m+1);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
Ccsr->num_entries = c->nz;
Ccusp->cooPerm = new THRUSTINTARRAY(c->nz);
if (c->nz) {
auto Acoo = new THRUSTINTARRAY32(Annz);
auto Bcoo = new THRUSTINTARRAY32(Bnnz);
auto Ccoo = new THRUSTINTARRAY32(c->nz);
THRUSTINTARRAY32 *Aroff,*Broff;
if (a->compressedrow.use) { /* need full row offset */
if (!Acusp->rowoffsets_gpu) {
Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
Acusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Aroff = Acusp->rowoffsets_gpu;
} else Aroff = Acsr->row_offsets;
if (b->compressedrow.use) { /* need full row offset */
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Broff = Bcusp->rowoffsets_gpu;
} else Broff = Bcsr->row_offsets;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparseXcsr2coo(Acusp->handle,
Aroff->data().get(),
Annz,
m,
Acoo->data().get(),
CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseXcsr2coo(Bcusp->handle,
Broff->data().get(),
Bnnz,
m,
Bcoo->data().get(),
CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
/* Issues when using bool with large matrices on SUMMIT 10.2.89 */
auto Aperm = thrust::make_constant_iterator(1);
auto Bperm = thrust::make_constant_iterator(0);
#if PETSC_PKG_CUDA_VERSION_GE(10,0,0)
auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(),Shift(A->cmap->n));
auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(),Shift(A->cmap->n));
#else
/* there are issues instantiating the merge operation using a transform iterator for the columns of B */
auto Bcib = Bcsr->column_indices->begin();
auto Bcie = Bcsr->column_indices->end();
thrust::transform(Bcib,Bcie,Bcib,Shift(A->cmap->n));
#endif
auto wPerm = new THRUSTINTARRAY32(Annz+Bnnz);
auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(),Acsr->column_indices->begin(),Acsr->values->begin(),Aperm));
auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(),Acsr->column_indices->end(),Acsr->values->end(),Aperm));
auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(),Bcib,Bcsr->values->begin(),Bperm));
auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(),Bcie,Bcsr->values->end(),Bperm));
auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(),Ccsr->column_indices->begin(),Ccsr->values->begin(),wPerm->begin()));
auto p1 = Ccusp->cooPerm->begin();
auto p2 = Ccusp->cooPerm->begin();
thrust::advance(p2,Annz);
PetscStackCallThrust(thrust::merge(thrust::device,Azb,Aze,Bzb,Bze,Czb,IJCompare4()));
#if PETSC_PKG_CUDA_VERSION_LT(10,0,0)
thrust::transform(Bcib,Bcie,Bcib,Shift(-A->cmap->n));
#endif
auto cci = thrust::make_counting_iterator(zero);
auto cce = thrust::make_counting_iterator(c->nz);
#if 0 //Errors on SUMMIT cuda 11.1.0
PetscStackCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>()));
#else
auto pred = thrust::identity<int>();
PetscStackCallThrust(thrust::copy_if(thrust::device,cci,cce,wPerm->begin(),p1,pred));
PetscStackCallThrust(thrust::remove_copy_if(thrust::device,cci,cce,wPerm->begin(),p2,pred));
#endif
stat = cusparseXcoo2csr(Ccusp->handle,
Ccoo->data().get(),
c->nz,
m,
Ccsr->row_offsets->data().get(),
CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
delete wPerm;
delete Acoo;
delete Bcoo;
delete Ccoo;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries,
Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct;
CsrMatrix *CcsrT = new CsrMatrix;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
(*C)->form_explicit_transpose = PETSC_TRUE;
(*C)->transupdated = PETSC_TRUE;
Ccusp->rowoffsets_gpu = NULL;
CmatT->cprowIndices = NULL;
CmatT->mat = CcsrT;
CcsrT->num_rows = n;
CcsrT->num_cols = m;
CcsrT->num_entries = c->nz;
CcsrT->row_offsets = new THRUSTINTARRAY32(n+1);
CcsrT->column_indices = new THRUSTINTARRAY32(c->nz);
CcsrT->values = new THRUSTARRAY(c->nz);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto rT = CcsrT->row_offsets->begin();
if (AT) {
rT = thrust::copy(AcsrT->row_offsets->begin(),AcsrT->row_offsets->end(),rT);
thrust::advance(rT,-1);
}
if (BT) {
auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(),Shift(a->nz));
auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(),Shift(a->nz));
thrust::copy(titb,tite,rT);
}
auto cT = CcsrT->column_indices->begin();
if (AT) cT = thrust::copy(AcsrT->column_indices->begin(),AcsrT->column_indices->end(),cT);
if (BT) thrust::copy(BcsrT->column_indices->begin(),BcsrT->column_indices->end(),cT);
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
stat = cusparseCreateMatDescr(&CmatT->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(CmatT->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(CmatT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void **)&(CmatT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(CmatT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMemcpy(CmatT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(CmatT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries,
CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
Ccusp->matTranspose = CmatT;
}
}
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
cerr = cudaMemcpy(c->i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
cerr = cudaMemcpy(c->i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (i = 0; i < m; i++) {
const PetscInt nn = c->i[i+1] - c->i[i];
c->ilen[i] = c->imax[i] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(*C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
(*C)->nonzerostate++;
ierr = PetscLayoutSetUp((*C)->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp((*C)->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = (*C)->nonzerostate;
(*C)->preallocated = PETSC_TRUE;
} else {
if ((*C)->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",(*C)->rmap->n,B->rmap->n);
c = (Mat_SeqAIJ*)(*C)->data;
if (c->nz) {
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
if (!Ccusp->cooPerm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cooPerm");
if (Ccusp->format == MAT_CUSPARSE_ELL || Ccusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Ccusp->nonzerostate != (*C)->nonzerostate) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Wrong nonzerostate");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Ccsr = (CsrMatrix*)Ccusp->mat->mat;
if (Acsr->num_entries != (PetscInt)Acsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"A nnz %D != %D",Acsr->num_entries,(PetscInt)Acsr->values->size());
if (Bcsr->num_entries != (PetscInt)Bcsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"B nnz %D != %D",Bcsr->num_entries,(PetscInt)Bcsr->values->size());
if (Ccsr->num_entries != (PetscInt)Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D",Ccsr->num_entries,(PetscInt)Ccsr->values->size());
if (Ccsr->num_entries != Acsr->num_entries + Bcsr->num_entries) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D + %D",Ccsr->num_entries,Acsr->num_entries,Bcsr->num_entries);
if (Ccusp->cooPerm->size() != Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"permSize %D != %D",(PetscInt)Ccusp->cooPerm->size(),(PetscInt)Ccsr->values->size());
auto pmid = Ccusp->cooPerm->begin();
thrust::advance(pmid,Acsr->num_entries);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->begin())));
auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
thrust::for_each(zibait,zieait,VecCUDAEquals());
auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->end())));
thrust::for_each(zibbit,ziebit,VecCUDAEquals());
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(*C,PETSC_FALSE);CHKERRQ(ierr);
if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) {
if (!Ccusp->matTranspose) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing transpose Mat_SeqAIJCUSPARSEMultStruct");
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
CsrMatrix *CcsrT = (CsrMatrix*)Ccusp->matTranspose->mat;
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
(*C)->transupdated = PETSC_TRUE;
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
}
ierr = PetscObjectStateIncrease((PetscObject)*C);CHKERRQ(ierr);
(*C)->assembled = PETSC_TRUE;
(*C)->was_assembled = PETSC_FALSE;
(*C)->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
{
PetscErrorCode ierr;
bool dmem;
const PetscScalar *av;
cudaError_t cerr;
PetscFunctionBegin;
dmem = isCudaMem(v);
ierr = MatSeqAIJCUSPARSEGetArrayRead(A,&av);CHKERRQ(ierr);
if (n && idx) {
THRUSTINTARRAY widx(n);
widx.assign(idx,idx+n);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
THRUSTARRAY *w = NULL;
thrust::device_ptr<PetscScalar> dv;
if (dmem) {
dv = thrust::device_pointer_cast(v);
} else {
w = new THRUSTARRAY(n);
dv = w->data();
}
thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av);
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.begin()),dv));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.end()),dv+n));
thrust::for_each(zibit,zieit,VecCUDAEquals());
if (w) {
cerr = cudaMemcpy(v,w->data().get(),n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
delete w;
} else {
cerr = cudaMemcpy(v,av,n*sizeof(PetscScalar),dmem ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (!dmem) { ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); }
ierr = MatSeqAIJCUSPARSERestoreArrayRead(A,&av);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*
LU BAND factorization with optimization for block diagonal (Nf blocks) in natural order (-mat_no_inode -pc_factor_mat_ordering_type rcm with Nf>1 fields)
requires:
structurally symmetric: fix with transpose/column meta data
*/
/*
The GPU LU factor kernel
*/
__global__
void __launch_bounds__(1024,1)
mat_lu_factor_band_init_set_i(const PetscInt n, const int bw, int bi_csr[])
{
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt nloc_i = (nloc/Nblk + !!(nloc%Nblk)), start_i = field*nloc + blkIdx*nloc_i, end_i = (start_i + nloc_i) > (field+1)*nloc ? (field+1)*nloc : (start_i + nloc_i);
// set i (row+1)
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) bi_csr[0] = 0; // dummy at zero
// for (int rowb = start_i + blkIdx*blockDim.y + threadIdx.y; rowb < end_i; rowb += Nblk*blockDim.y) { // rows in block
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i && threadIdx.x==0) {
PetscInt i=rowb+1, ni = (rowb>bw) ? bw+1 : i, n1L = ni*(ni-1)/2, nug= i*bw, n2L = bw*((rowb>bw) ? (rowb-bw) : 0), mi = bw + rowb + 1 - n, clip = (mi>0) ? mi*(mi-1)/2 + mi: 0;
bi_csr[rowb+1] = n1L + nug - clip + n2L + i;
}
}
}
// copy AIJ to AIJ_BAND
__global__
void __launch_bounds__(1024,1)
mat_lu_factor_band_copy_aij_aij(const PetscInt n, const int bw, const PetscInt r[], const PetscInt ic[],
const int ai_d[], const int aj_d[], const PetscScalar aa_d[],
const int bi_csr[], PetscScalar ba_csr[])
{
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt nloc_i = (nloc/Nblk + !!(nloc%Nblk)), start_i = field*nloc + blkIdx*nloc_i, end_i = (start_i + nloc_i) > (field+1)*nloc ? (field+1)*nloc : (start_i + nloc_i);
// zero B
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) ba_csr[bi_csr[n]] = 0; // flop count at end
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i) {
PetscScalar *batmp = ba_csr + bi_csr[rowb];
const PetscInt nzb = bi_csr[rowb+1] - bi_csr[rowb];
for (int j=threadIdx.x ; j<nzb ; j += blockDim.x) {
if (j<nzb) {
batmp[j] = 0;
}
}
}
}
// copy A into B with CSR format -- these two loops can be fused
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i) {
const PetscInt rowa = r[rowb], nza = ai_d[rowa+1] - ai_d[rowa];
const int *ajtmp = aj_d + ai_d[rowa], bjStart = (rowb>bw) ? rowb-bw : 0;
const PetscScalar *av = aa_d + ai_d[rowa];
PetscScalar *batmp = ba_csr + bi_csr[rowb];
/* load in initial (unfactored row) */
for (int j=threadIdx.x ; j<nza ; j += blockDim.x) {
if (j<nza) {
PetscInt colb = ic[ajtmp[j]], idx = colb - bjStart;
PetscScalar vala = av[j];
batmp[idx] = vala;
}
}
}
}
}
// print AIJ_BAND
__global__
void print_mat_aij_band(const PetscInt n, const int bi_csr[], const PetscScalar ba_csr[])
{
// debug
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0){
printf("B (AIJ) n=%d:\n",(int)n);
for (int rowb=0;rowb<n;rowb++) {
const PetscInt nz = bi_csr[rowb+1] - bi_csr[rowb];
const PetscScalar *batmp = ba_csr + bi_csr[rowb];
for (int j=0; j<nz; j++) printf("(%13.6e) ",PetscRealPart(batmp[j]));
printf(" bi=%d\n",bi_csr[rowb+1]);
}
}
}
// Band LU kernel --- ba_csr bi_csr
__global__
void __launch_bounds__(1024,1)
mat_lu_factor_band(const PetscInt n, const PetscInt bw, const int bi_csr[], PetscScalar ba_csr[])
{
extern __shared__ PetscInt smemInt[];
PetscInt *sm_pkIdx = &smemInt[0];
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt start = field*nloc, end = start + nloc;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
auto g = cooperative_groups::this_grid();
#endif
// A22 panel update for each row A(1,:) and col A(:,1)
for (int glbDD=start, locDD = 0; glbDD<end; glbDD++, locDD++) {
PetscInt tnzUd = bw, maxU = end-1 - glbDD; // we are chopping off the inter ears
const PetscInt nzUd = (tnzUd>maxU) ? maxU : tnzUd, dOffset = (glbDD > bw) ? bw : glbDD; // global to go past ears after first
const PetscInt nzUd_pad = blockDim.y*(nzUd/blockDim.y + !!(nzUd%blockDim.y));
PetscScalar *pBdd = ba_csr + bi_csr[glbDD] + dOffset;
const PetscScalar *baUd = pBdd + 1; // vector of data U(i,i+1:end)
const PetscScalar Bdd = *pBdd;
const PetscInt offset = blkIdx*blockDim.y + threadIdx.y, inc = Nblk*blockDim.y;
for (int idx = offset, myi = glbDD + offset + 1; idx < nzUd_pad ; idx += inc, myi += inc) { /* assuming symmetric structure */
if (idx < nzUd && threadIdx.x==0) { /* assuming symmetric structure */
const PetscInt bwi = myi > bw ? bw : myi, kIdx = bwi - (myi-glbDD); // cuts off just the first (global) block
PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx;
*Aid = *Aid/Bdd;
sm_pkIdx[threadIdx.y] = kIdx;
}
__syncthreads(); // synch on threadIdx.x only
if (idx < nzUd) { /* assuming symmetric structure */
PetscInt kIdx = sm_pkIdx[threadIdx.y];
PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx;
PetscScalar *Aij = Aid + 1;
PetscScalar Lid = *Aid;
for (int jIdx=threadIdx.x ; jIdx<nzUd ; jIdx += blockDim.x) {
if (jIdx<nzUd) {
Aij[jIdx] -= Lid*baUd[jIdx];
}
}
}
}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
g.sync();
#else
__syncthreads();
#endif
} /* endof for (i=0; i<n; i++) { */
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat,Vec,Vec);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
Mat_SeqAIJCUSPARSE *cusparsestructA = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstructA;
CsrMatrix *matrixA;
PetscErrorCode ierr;
cudaError_t cerr;
const PetscInt n=A->rmap->n, *ic, *r;
const int *ai_d, *aj_d;
const PetscScalar *aa_d;
PetscScalar *ba_t = cusparseTriFactors->a_band_d;
int *bi_t = cusparseTriFactors->i_band_d;
PetscContainer container;
int Ni = 10, team_size=9, Nf, nVec=56, nconcurrent = 1, nsm = -1;
PetscFunctionBegin;
if (A->rmap->n == 0) {
PetscFunctionReturn(0);
}
// cusparse setup
if (!cusparsestructA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparsestructA");
matstructA = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestructA->mat; // matstruct->cprowIndices
if (!matstructA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing mat struct");
matrixA = (CsrMatrix*)matstructA->mat;
if (!matrixA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing matrix cusparsestructA->mat->mat");
// factor: get Nf if available
ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr);
if (container) {
PetscInt *pNf=NULL;
ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr);
Nf = (*pNf)%1000;
if ((*pNf)/1000>0) nconcurrent = (*pNf)/1000; // number of SMs to use
} else Nf = 1;
if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf);
// get data
ic = thrust::raw_pointer_cast(cusparseTriFactors->cpermIndices->data());
ai_d = thrust::raw_pointer_cast(matrixA->row_offsets->data());
aj_d = thrust::raw_pointer_cast(matrixA->column_indices->data());
aa_d = thrust::raw_pointer_cast(matrixA->values->data().get());
r = thrust::raw_pointer_cast(cusparseTriFactors->rpermIndices->data());
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
{
int bw = (2*n-1 - (int)(PetscSqrtReal(1+4*(n*n-b->nz))+PETSC_MACHINE_EPSILON))/2, bm1=bw-1,nl=n/Nf;
int gpuid;
cudaDeviceProp prop;
cudaGetDevice(&gpuid);
cudaGetDeviceProperties(&prop, gpuid);
#if PETSC_PKG_CUDA_VERSION_LT(11,0,0)
Ni = 1/nconcurrent;
Ni = 1;
#else
nsm = prop.multiProcessorCount;
Ni = nsm/Nf/nconcurrent;
#endif
team_size = bw/Ni + !!(bw%Ni);
nVec = PetscMin(bw, 1024/team_size);
ierr = PetscInfo5(A,"Matrix Bandwidth = %d, number SMs/block = %d, num concurency = %d, num fields = %d, numSMs/GPU = %d\n",bw,Ni,nconcurrent,Nf,nsm);CHKERRQ(ierr);
{
dim3 dimBlockTeam(nVec,team_size);
dim3 dimBlockLeague(Nf,Ni);
mat_lu_factor_band_copy_aij_aij<<<dimBlockLeague,dimBlockTeam>>>(n, bw, r, ic, ai_d, aj_d, aa_d, bi_t, ba_t);
CHECK_LAUNCH_ERROR(); // does a sync
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
void *kernelArgs[] = { (void*)&n, (void*)&bw, (void*)&bi_t, (void*)&ba_t};
cudaLaunchCooperativeKernel((void*)mat_lu_factor_band, dimBlockLeague, dimBlockTeam, kernelArgs, team_size*sizeof(PetscInt), NULL);
#else
mat_lu_factor_band<<<dimBlockLeague,dimBlockTeam,team_size*sizeof(PetscInt)>>>(n, bw, bi_t, ba_t);
#endif
CHECK_LAUNCH_ERROR(); // does a sync
#if defined(PETSC_USE_LOG)
ierr = PetscLogGpuFlops((PetscLogDouble)Nf*(bm1*(bm1 + 1)*(2*bm1 + 1)/3 + 2*(nl-bw)*bw*bw + nl*(nl+1)/2));CHKERRQ(ierr);
#endif
}
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
/* determine which version of MatSolve needs to be used. from MatLUFactorNumeric_AIJ_SeqAIJCUSPARSE */
B->ops->solve = MatSolve_SeqAIJCUSPARSEBAND;
B->ops->solvetranspose = NULL; // need transpose
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatrixNfDestroy(void *ptr)
{
PetscInt *nf = (PetscInt *)ptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscFree(nf);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b;
IS isicol;
PetscErrorCode ierr;
cudaError_t cerr;
const PetscInt *ic,*ai=a->i,*aj=a->j;
PetscScalar *ba_t;
int *bi_t;
PetscInt i,n=A->rmap->n,Nf;
PetscInt nzBcsr,bwL,bwU;
PetscBool missing;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscContainer container;
PetscFunctionBegin;
if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"matrix must be square");
ierr = MatMissingDiagonal(A,&missing,&i);CHKERRQ(ierr);
if (missing) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix is missing diagonal entry %D",i);
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"!cusparseTriFactors");
ierr = MatGetOption(A,MAT_STRUCTURALLY_SYMMETRIC,&missing);CHKERRQ(ierr);
if (!missing) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"only structrally symmetric matrices supported");
// factor: get Nf if available
ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr);
if (container) {
PetscInt *pNf=NULL;
ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr);
Nf = (*pNf)%1000;
ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr);
ierr = PetscMalloc(sizeof(PetscInt), &pNf);CHKERRQ(ierr);
*pNf = Nf;
ierr = PetscContainerSetPointer(container, (void *)pNf);CHKERRQ(ierr);
ierr = PetscContainerSetUserDestroy(container, MatrixNfDestroy);CHKERRQ(ierr);
ierr = PetscObjectCompose((PetscObject)B, "Nf", (PetscObject) container);CHKERRQ(ierr);
ierr = PetscContainerDestroy(&container);CHKERRQ(ierr);
} else Nf = 1;
if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf);
ierr = ISInvertPermutation(iscol,PETSC_DECIDE,&isicol);CHKERRQ(ierr);
ierr = ISGetIndices(isicol,&ic);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation_SeqAIJ(B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)isicol);CHKERRQ(ierr);
b = (Mat_SeqAIJ*)(B)->data;
/* get band widths, MatComputeBandwidth should take a reordering ic and do this */
bwL = bwU = 0;
for (int rwb=0; rwb<n; rwb++) {
const PetscInt rwa = ic[rwb], anz = ai[rwb+1] - ai[rwb], *ajtmp = aj + ai[rwb];
for (int j=0;j<anz;j++) {
PetscInt colb = ic[ajtmp[j]];
if (colb<rwa) { // L
if (rwa-colb > bwL) bwL = rwa-colb;
} else {
if (colb-rwa > bwU) bwU = colb-rwa;
}
}
}
ierr = ISRestoreIndices(isicol,&ic);CHKERRQ(ierr);
/* only support structurally symmetric, but it might work */
if (bwL!=bwU) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Only symmetric structure supported (now) W_L=%D W_U=%D",bwL,bwU);
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
nzBcsr = n + (2*n-1)*bwU - bwU*bwU;
b->maxnz = b->nz = nzBcsr;
cusparseTriFactors->nnz = b->nz; // only meta data needed: n & nz
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cerr = cudaMalloc(&ba_t,(b->nz+1)*sizeof(PetscScalar));CHKERRCUDA(cerr); // incude a place for flops
cerr = cudaMalloc(&bi_t,(n+1)*sizeof(int));CHKERRCUDA(cerr);
cusparseTriFactors->a_band_d = ba_t;
cusparseTriFactors->i_band_d = bi_t;
/* In b structure: Free imax, ilen, old a, old j. Allocate solve_work, new a, new j */
ierr = PetscLogObjectMemory((PetscObject)B,(nzBcsr+1)*(sizeof(PetscInt)+sizeof(PetscScalar)));CHKERRQ(ierr);
{
dim3 dimBlockTeam(1,128);
dim3 dimBlockLeague(Nf,1);
mat_lu_factor_band_init_set_i<<<dimBlockLeague,dimBlockTeam>>>(n, bwU, bi_t);
}
CHECK_LAUNCH_ERROR(); // does a sync
// setup data
if (!cusparseTriFactors->rpermIndices) {
const PetscInt *r;
ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r+n);
ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
/* upper triangular indices */
if (!cusparseTriFactors->cpermIndices) {
const PetscInt *c;
ierr = ISGetIndices(isicol,&c);CHKERRQ(ierr);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c+n);
ierr = ISRestoreIndices(isicol,&c);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
/* put together the new matrix */
b->free_a = PETSC_FALSE;
b->free_ij = PETSC_FALSE;
b->singlemalloc = PETSC_FALSE;
b->ilen = NULL;
b->imax = NULL;
b->row = isrow;
b->col = iscol;
ierr = PetscObjectReference((PetscObject)isrow);CHKERRQ(ierr);
ierr = PetscObjectReference((PetscObject)iscol);CHKERRQ(ierr);
b->icol = isicol;
ierr = PetscMalloc1(n+1,&b->solve_work);CHKERRQ(ierr);
B->factortype = MAT_FACTOR_LU;
B->info.factor_mallocs = 0;
B->info.fill_ratio_given = 0;
if (ai[n]) {
B->info.fill_ratio_needed = ((PetscReal)(nzBcsr))/((PetscReal)ai[n]);
} else {
B->info.fill_ratio_needed = 0.0;
}
#if defined(PETSC_USE_INFO)
if (ai[n] != 0) {
PetscReal af = B->info.fill_ratio_needed;
ierr = PetscInfo1(A,"Band fill ratio %g\n",(double)af);CHKERRQ(ierr);
} else {
ierr = PetscInfo(A,"Empty matrix\n");CHKERRQ(ierr);
}
#endif
if (a->inode.size) {
ierr = PetscInfo(A,"Warning: using inodes in band solver.\n");CHKERRQ(ierr);
}
ierr = MatSeqAIJCheckInode_FactorLU(B);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSEBAND;
B->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
/* Use -pc_factor_mat_solver_type cusparseband */
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse_band(Mat A,MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSEBAND;
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat A,MatFactorType ftype,Mat *B)
{
PetscErrorCode ierr;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr);
ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr);
(*B)->factortype = ftype;
(*B)->useordering = PETSC_TRUE;
ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
if (ftype == MAT_FACTOR_LU) {
ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr);
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSEBAND;
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSEBAND Matrix Types");
ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse_band);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#define WARP_SIZE 32
template <typename T>
__forceinline__ __device__
T wreduce(T a)
{
T b;
#pragma unroll
for (int i = WARP_SIZE/2; i >= 1; i = i >> 1) {
b = __shfl_down_sync(0xffffffff, a, i);
a += b;
}
return a;
}
// reduce in a block, returns result in thread 0
template <typename T, int BLOCK_SIZE>
__device__
T breduce(T a)
{
constexpr int NWARP = BLOCK_SIZE/WARP_SIZE;
__shared__ double buf[NWARP];
int wid = threadIdx.x / WARP_SIZE;
int laneid = threadIdx.x % WARP_SIZE;
T b = wreduce<T>(a);
if (laneid == 0)
buf[wid] = b;
__syncthreads();
if (wid == 0) {
if (threadIdx.x < NWARP)
a = buf[threadIdx.x];
else
a = 0;
for (int i = (NWARP+1)/2; i >= 1; i = i >> 1) {
a += __shfl_down_sync(0xffffffff, a, i);
}
}
return a;
}
// Band LU kernel --- ba_csr bi_csr
template <int BLOCK_SIZE>
__global__
void __launch_bounds__(256,1)
mat_solve_band(const PetscInt n, const PetscInt bw, const PetscScalar ba_csr[], PetscScalar x[])
{
const PetscInt Nf = gridDim.x, nloc = n/Nf, field = blockIdx.x, start = field*nloc, end = start + nloc, chopnz = bw*(bw+1)/2, blocknz=(2*bw+1)*nloc, blocknz_0 = blocknz-chopnz;
const PetscScalar *pLi;
const int tid = threadIdx.x;
/* Next, solve L */
pLi = ba_csr + (field==0 ? 0 : blocknz_0 + (field-1)*blocknz + bw); // diagonal (0,0) in field
for (int glbDD=start, locDD = 0; glbDD<end; glbDD++, locDD++) {
const PetscInt col = locDD<bw ? start : (glbDD-bw);
PetscScalar t = 0;
for (int j=col+tid,idx=tid;j<glbDD;j+=blockDim.x,idx+=blockDim.x) {
t += pLi[idx]*x[j];
}
#if defined(PETSC_USE_COMPLEX)
PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t);
PetscScalar tt(breduce<PetscReal,BLOCK_SIZE>(tr), breduce<PetscReal,BLOCK_SIZE>(ti));
t = tt;
#else
t = breduce<PetscReal,BLOCK_SIZE>(t);
#endif
if (threadIdx.x == 0)
x[glbDD] -= t; // /1.0
__syncthreads();
// inc
pLi += glbDD-col; // get to diagonal
if (glbDD > n-1-bw) pLi += n-1-glbDD; // skip over U, only last block has funny offset
else pLi += bw;
pLi += 1; // skip to next row
if (field>0 && (locDD+1)<bw) pLi += bw-(locDD+1); // skip padding at beginning (ear)
}
/* Then, solve U */
pLi = ba_csr + Nf*blocknz - 2*chopnz - 1; // end of real data on block (diagonal)
if (field != Nf-1) pLi -= blocknz_0 + (Nf-2-field)*blocknz + bw; // diagonal of last local row
for (int glbDD=end-1, locDD = 0; glbDD >= start; glbDD--, locDD++) {
const PetscInt col = (locDD<bw) ? end-1 : glbDD+bw; // end of row in U
PetscScalar t = 0;
for (int j=col-tid,idx=tid;j>glbDD;j-=blockDim.x,idx+=blockDim.x) {
t += pLi[-idx]*x[j];
}
#if defined(PETSC_USE_COMPLEX)
PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t);
PetscScalar tt(breduce<PetscReal,BLOCK_SIZE>(tr), breduce<PetscReal,BLOCK_SIZE>(ti));
t = tt;
#else
t = breduce<PetscReal,BLOCK_SIZE>(PetscRealPart(t));
#endif
pLi -= col-glbDD; // diagonal
if (threadIdx.x == 0) {
x[glbDD] -= t;
x[glbDD] /= pLi[0];
}
__syncthreads();
// inc past L to start of previous U
pLi -= bw+1;
if (glbDD<bw) pLi += bw-glbDD; // overshot in top left corner
if (((locDD+1) < bw) && field != Nf-1) pLi -= (bw - (locDD+1)); // skip past right corner
}
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscInt n=A->rmap->n, nz=cusparseTriFactors->nnz, bw=(2*n-1 - (int)(PetscSqrtReal(1+4*(n*n-nz))+PETSC_MACHINE_EPSILON))/2, Nf;
PetscErrorCode ierr;
cudaError_t cerr;
PetscContainer container;
PetscFunctionBegin;
if (A->rmap->n == 0) {
PetscFunctionReturn(0);
}
// factor: get Nf if available
ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr);
if (container) {
PetscInt *pNf=NULL;
ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr);
Nf = (*pNf)%1000;
} else Nf = 1;
if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf);
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()),
tempGPU->begin());
constexpr int block = 128;
mat_solve_band<block><<<Nf,block>>>(n,bw,cusparseTriFactors->a_band_d,tempGPU->data().get());
CHECK_LAUNCH_ERROR(); // does a sync
/* Last, reorder with the column permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()),
xGPU);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
|
ad0543a4d1b62689bf0d748c7dcb25ba2de4504d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
texture<float, 2, hipReadModeElementType> tex;
__global__ void transformKernel(float *output, int width, int height, float theta) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
float u = (float)x - (float)width / 2;
float v = (float)y - (float)height / 2;
float tu = (u * cosf(theta) - v * sinf(theta)) / width;
float tv = (v * cosf(theta) + u * sinf(theta)) / height;
output[y * width + x] = tex2D(tex, tu + 0.5, tv + 0.5);
}
int main() {
int width = 5, height = 5;
unsigned size = width * height * sizeof(float);
float *hData = (float *)malloc(size);
for (unsigned ii = 0; ii < width; ++ii)
for (unsigned jj = 0; jj < height; ++jj)
hData[ii * height + jj] = ii + jj;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray *cuArray;
hipMallocArray(&cuArray, &channelDesc, width, height);
hipMemcpyToArray(cuArray, 0, 0, hData, size, hipMemcpyHostToDevice);
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex.filterMode = hipFilterModeLinear;
tex.normalized = true;
hipBindTextureToArray(tex, cuArray, channelDesc);
float *dData;
hipMalloc(&dData, size);
dim3 block(8, 8, 1);
dim3 grid(width / block.x, height / block.y, 1);;
hipLaunchKernelGGL(( transformKernel), dim3(grid), dim3(block), 0, 0, dData, width, height, 0.6);
return 0;
}
| ad0543a4d1b62689bf0d748c7dcb25ba2de4504d.cu | #include <cuda.h>
#include <stdio.h>
texture<float, 2, cudaReadModeElementType> tex;
__global__ void transformKernel(float *output, int width, int height, float theta) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
float u = (float)x - (float)width / 2;
float v = (float)y - (float)height / 2;
float tu = (u * cosf(theta) - v * sinf(theta)) / width;
float tv = (v * cosf(theta) + u * sinf(theta)) / height;
output[y * width + x] = tex2D(tex, tu + 0.5, tv + 0.5);
}
int main() {
int width = 5, height = 5;
unsigned size = width * height * sizeof(float);
float *hData = (float *)malloc(size);
for (unsigned ii = 0; ii < width; ++ii)
for (unsigned jj = 0; jj < height; ++jj)
hData[ii * height + jj] = ii + jj;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *cuArray;
cudaMallocArray(&cuArray, &channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, hData, size, cudaMemcpyHostToDevice);
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = true;
cudaBindTextureToArray(tex, cuArray, channelDesc);
float *dData;
cudaMalloc(&dData, size);
dim3 block(8, 8, 1);
dim3 grid(width / block.x, height / block.y, 1);;
transformKernel<<<grid, block>>>(dData, width, height, 0.6);
return 0;
}
|
4d1f50d3b85b989c22a5c5c21021735b82612a03.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <vector>
#include <utility>
#include "dali/kernels/imgproc/resample/resampling_filters.cuh"
#include "dali/kernels/imgproc/resample/resampling_windows.h"
#include "dali/core/mm/memory.h"
#include "dali/core/span.h"
namespace dali {
namespace kernels {
template <typename Function>
inline void InitFilter(ResamplingFilter &filter, Function F) {
for (int i = 0; i < filter.num_coeffs; i++)
filter.coeffs[i] = F(i);
}
void InitGaussianFilter(ResamplingFilter filter) {
InitFilter(filter, [&](int i) {
float x = 4 * (i - (filter.num_coeffs-1)*0.5f) / (filter.num_coeffs-1);
return expf(-x*x);
});
}
void InitLanczosFilter(ResamplingFilter filter, float a) {
InitFilter(filter, [&](int i) {
float x = 2 * a * (i - (filter.num_coeffs-1)*0.5f) / (filter.num_coeffs-1);
return LanczosWindow(x, a);
});
}
void InitCubicFilter(ResamplingFilter filter) {
InitFilter(filter, [&](int i) {
float x = 4 * (i - (filter.num_coeffs-1)*0.5f) / (filter.num_coeffs-1);
return CubicWindow(x);
});
}
enum FilterIdx {
Idx_Triangular = 0,
Idx_Gaussian,
Idx_Lanczos3,
Idx_Cubic
};
template <typename MemoryKind>
void InitFilters(ResamplingFilters &filters) {
const int lanczos_resolution = 32;
const int lanczos_a = 3;
const int triangular_size = 3;
const int gaussian_size = 65;
const int cubic_size = 129;
const int lanczos_size = (2*lanczos_a*lanczos_resolution + 1);
const int total_size = triangular_size + gaussian_size + cubic_size + lanczos_size;
constexpr bool need_staging =
!cuda::kind_has_property<MemoryKind, cuda::memory_access::host>::value;
using tmp_kind = std::conditional_t<need_staging, mm::memory_kind::host, MemoryKind>;
filters.filter_data = mm::alloc_raw_unique<float, tmp_kind>(total_size);
auto add_filter = [&](int size) {
float *base = filters.filters.empty()
? filters.filter_data.get()
: filters.filters.back().coeffs + filters.filters.back().num_coeffs;
filters.filters.push_back({ base, size, 1, (size - 1) * 0.5f});
};
add_filter(triangular_size);
add_filter(gaussian_size);
add_filter(lanczos_size);
add_filter(cubic_size);
assert(filters.filters.back().coeffs + filters.filters.back().num_coeffs -
filters.filter_data.get() <= total_size);
auto *tri_coeffs = filters.filters[Idx_Triangular].coeffs;
tri_coeffs[0] = 0;
tri_coeffs[1] = 1;
tri_coeffs[2] = 0;
InitGaussianFilter(filters.filters[Idx_Gaussian]);
InitLanczosFilter(filters.filters[Idx_Lanczos3], lanczos_a);
InitCubicFilter(filters.filters[Idx_Cubic]);
filters[2].rescale(6);
filters[3].rescale(4);
if (need_staging) {
auto filter_data_gpu = mm::alloc_raw_unique<float, mm::memory_kind::device>(total_size);
CUDA_CALL(hipMemcpy(filter_data_gpu.get(), filters.filter_data.get(),
total_size * sizeof(float), hipMemcpyHostToDevice));
ptrdiff_t diff = filter_data_gpu.get() - filters.filter_data.get();
filters.filter_data = std::move(filter_data_gpu);
for (auto &f : filters.filters)
f.coeffs += diff;
}
}
ResamplingFilter ResamplingFilters::Cubic() const noexcept {
return filters[Idx_Cubic];
}
ResamplingFilter ResamplingFilters::Gaussian(float sigma) const noexcept {
auto flt = filters[Idx_Gaussian];
flt.rescale(::max(1.0f, static_cast<float>(4*M_SQRT2)*sigma));
return flt;
}
ResamplingFilter ResamplingFilters::Lanczos3(float radius) const noexcept {
auto flt = filters[Idx_Lanczos3];
flt.rescale(2.0f * ::max(3.0f, radius));
return flt;
}
ResamplingFilter ResamplingFilters::Triangular(float radius) const noexcept {
auto flt = filters[Idx_Triangular];
flt.rescale(::max(1.0f, 2*radius));
return flt;
}
std::shared_ptr<ResamplingFilters> GetResamplingFilters() {
(void)mm::GetDefaultDeviceResource();
static std::mutex filter_mutex;
static std::vector<std::weak_ptr<ResamplingFilters>> filters;
std::lock_guard<std::mutex> lock(filter_mutex);
int device = 0;
if (hipGetDevice(&device) != hipSuccess)
return nullptr;
if (filters.empty()) {
int count;
hipGetDeviceCount(&count);
filters.resize(count);
}
auto ptr = filters[device].lock();
if (!ptr) {
ptr = std::make_shared<ResamplingFilters>();
InitFilters<mm::memory_kind::device>(*ptr);
filters[device] = ptr;
}
return ptr;
}
std::shared_ptr<ResamplingFilters> GetResamplingFiltersCPU() {
static std::once_flag once;
static std::shared_ptr<ResamplingFilters> cpu_filters;
std::call_once(once, []() {
(void)mm::GetDefaultResource<mm::memory_kind::host>();
cpu_filters = std::make_shared<ResamplingFilters>();
InitFilters<mm::memory_kind::host>(*cpu_filters);
});
return cpu_filters;
}
} // namespace kernels
} // namespace dali
| 4d1f50d3b85b989c22a5c5c21021735b82612a03.cu | // Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <vector>
#include <utility>
#include "dali/kernels/imgproc/resample/resampling_filters.cuh"
#include "dali/kernels/imgproc/resample/resampling_windows.h"
#include "dali/core/mm/memory.h"
#include "dali/core/span.h"
namespace dali {
namespace kernels {
template <typename Function>
inline void InitFilter(ResamplingFilter &filter, Function F) {
for (int i = 0; i < filter.num_coeffs; i++)
filter.coeffs[i] = F(i);
}
void InitGaussianFilter(ResamplingFilter filter) {
InitFilter(filter, [&](int i) {
float x = 4 * (i - (filter.num_coeffs-1)*0.5f) / (filter.num_coeffs-1);
return expf(-x*x);
});
}
void InitLanczosFilter(ResamplingFilter filter, float a) {
InitFilter(filter, [&](int i) {
float x = 2 * a * (i - (filter.num_coeffs-1)*0.5f) / (filter.num_coeffs-1);
return LanczosWindow(x, a);
});
}
void InitCubicFilter(ResamplingFilter filter) {
InitFilter(filter, [&](int i) {
float x = 4 * (i - (filter.num_coeffs-1)*0.5f) / (filter.num_coeffs-1);
return CubicWindow(x);
});
}
enum FilterIdx {
Idx_Triangular = 0,
Idx_Gaussian,
Idx_Lanczos3,
Idx_Cubic
};
template <typename MemoryKind>
void InitFilters(ResamplingFilters &filters) {
const int lanczos_resolution = 32;
const int lanczos_a = 3;
const int triangular_size = 3;
const int gaussian_size = 65;
const int cubic_size = 129;
const int lanczos_size = (2*lanczos_a*lanczos_resolution + 1);
const int total_size = triangular_size + gaussian_size + cubic_size + lanczos_size;
constexpr bool need_staging =
!cuda::kind_has_property<MemoryKind, cuda::memory_access::host>::value;
using tmp_kind = std::conditional_t<need_staging, mm::memory_kind::host, MemoryKind>;
filters.filter_data = mm::alloc_raw_unique<float, tmp_kind>(total_size);
auto add_filter = [&](int size) {
float *base = filters.filters.empty()
? filters.filter_data.get()
: filters.filters.back().coeffs + filters.filters.back().num_coeffs;
filters.filters.push_back({ base, size, 1, (size - 1) * 0.5f});
};
add_filter(triangular_size);
add_filter(gaussian_size);
add_filter(lanczos_size);
add_filter(cubic_size);
assert(filters.filters.back().coeffs + filters.filters.back().num_coeffs -
filters.filter_data.get() <= total_size);
auto *tri_coeffs = filters.filters[Idx_Triangular].coeffs;
tri_coeffs[0] = 0;
tri_coeffs[1] = 1;
tri_coeffs[2] = 0;
InitGaussianFilter(filters.filters[Idx_Gaussian]);
InitLanczosFilter(filters.filters[Idx_Lanczos3], lanczos_a);
InitCubicFilter(filters.filters[Idx_Cubic]);
filters[2].rescale(6);
filters[3].rescale(4);
if (need_staging) {
auto filter_data_gpu = mm::alloc_raw_unique<float, mm::memory_kind::device>(total_size);
CUDA_CALL(cudaMemcpy(filter_data_gpu.get(), filters.filter_data.get(),
total_size * sizeof(float), cudaMemcpyHostToDevice));
ptrdiff_t diff = filter_data_gpu.get() - filters.filter_data.get();
filters.filter_data = std::move(filter_data_gpu);
for (auto &f : filters.filters)
f.coeffs += diff;
}
}
ResamplingFilter ResamplingFilters::Cubic() const noexcept {
return filters[Idx_Cubic];
}
ResamplingFilter ResamplingFilters::Gaussian(float sigma) const noexcept {
auto flt = filters[Idx_Gaussian];
flt.rescale(std::max(1.0f, static_cast<float>(4*M_SQRT2)*sigma));
return flt;
}
ResamplingFilter ResamplingFilters::Lanczos3(float radius) const noexcept {
auto flt = filters[Idx_Lanczos3];
flt.rescale(2.0f * std::max(3.0f, radius));
return flt;
}
ResamplingFilter ResamplingFilters::Triangular(float radius) const noexcept {
auto flt = filters[Idx_Triangular];
flt.rescale(std::max(1.0f, 2*radius));
return flt;
}
std::shared_ptr<ResamplingFilters> GetResamplingFilters() {
(void)mm::GetDefaultDeviceResource();
static std::mutex filter_mutex;
static std::vector<std::weak_ptr<ResamplingFilters>> filters;
std::lock_guard<std::mutex> lock(filter_mutex);
int device = 0;
if (cudaGetDevice(&device) != cudaSuccess)
return nullptr;
if (filters.empty()) {
int count;
cudaGetDeviceCount(&count);
filters.resize(count);
}
auto ptr = filters[device].lock();
if (!ptr) {
ptr = std::make_shared<ResamplingFilters>();
InitFilters<mm::memory_kind::device>(*ptr);
filters[device] = ptr;
}
return ptr;
}
std::shared_ptr<ResamplingFilters> GetResamplingFiltersCPU() {
static std::once_flag once;
static std::shared_ptr<ResamplingFilters> cpu_filters;
std::call_once(once, []() {
(void)mm::GetDefaultResource<mm::memory_kind::host>();
cpu_filters = std::make_shared<ResamplingFilters>();
InitFilters<mm::memory_kind::host>(*cpu_filters);
});
return cpu_filters;
}
} // namespace kernels
} // namespace dali
|
6b81d156a9328a9486bb4bff26b2e5a94906b7a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
libcudann
Copyright (C) 2011 Luca Donati ([email protected])
*/
#include "CudaActivationFunctions.cuh"
#include <stdexcept>
#include <stdlib.h>
#include <stdio.h>
#define BLOCKSIZE 512
#define clip(x, lo, hi) (((x) < (lo)) ? (lo) : (((x) > (hi)) ? (hi) : (x)))
__global__ void actLinear(float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number)
neurons[g_tid]=neurons[g_tid];
}
__global__ void actSigmoid(float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number)
neurons[g_tid]=(1.0f/(1.0f+exp(-neurons[g_tid])));
}
__global__ void actTanh(float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number)
neurons[g_tid]=(2.0f/(1.0f+exp(-neurons[g_tid])))-1.0f;
}
__global__ void actRelu(float * neurons, const int number) {
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if (g_tid<number)
neurons[g_tid] = neurons[g_tid] > 0 ? neurons[g_tid] : 0;
}
__global__ void derivLinear(float * deltas, const float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number){
deltas[g_tid]*=1;
}
}
__global__ void derivSigmoid(float * deltas, const float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number){
const float y=clip(neurons[g_tid],0.01f,0.99f);
deltas[g_tid]*=y*(1.0f-y);
}
}
__global__ void derivTanh(float * deltas, const float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number){
const float y=clip(neurons[g_tid],-0.98f,0.98f);
deltas[g_tid]*=0.5f*(1.0f-(y*y));
}
}
__global__ void derivRelu(float * deltas, const float * neurons, const int number) {
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if (g_tid<number) {
deltas[g_tid] *= neurons[g_tid] > 0 ? 1 : 0;
}
}
//computes the activation function for (number) elements of (neurons) and store the results in (neurons)
void computeActFunct(float * neurons, const int number, const int funct) {
int numBlocks = number/BLOCKSIZE+1;
switch(funct){
case ACT_LINEAR: break;
case ACT_SIGMOID: actSigmoid << <numBlocks, BLOCKSIZE >> > (neurons, number); break;
case ACT_TANH: hipLaunchKernelGGL(( actTanh), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, neurons,number); break;
case ACT_RELU: hipLaunchKernelGGL(( actRelu), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, neurons, number); break;
default: throw std::runtime_error("Function not yet implemented"); break;
}
}
//computes the derivation function for (number) elements of (neurons) and multiplies and stores the results with and in (delta)
void computeDerivFunct(float * deltas, const float * neurons, const int number, const int funct){
int numBlocks = number/BLOCKSIZE+1;
switch(funct){
case ACT_LINEAR: break;
case ACT_SIGMOID: derivSigmoid << <numBlocks, BLOCKSIZE >> > (deltas, neurons, number); break;
case ACT_TANH: hipLaunchKernelGGL(( derivTanh), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, deltas,neurons,number); break;
case ACT_RELU: hipLaunchKernelGGL(( derivRelu), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, deltas,neurons,number); break;
default: throw std::runtime_error("Function not yet implemented"); break;
}
}
| 6b81d156a9328a9486bb4bff26b2e5a94906b7a9.cu | /*
libcudann
Copyright (C) 2011 Luca Donati ([email protected])
*/
#include "CudaActivationFunctions.cuh"
#include <stdexcept>
#include <stdlib.h>
#include <stdio.h>
#define BLOCKSIZE 512
#define clip(x, lo, hi) (((x) < (lo)) ? (lo) : (((x) > (hi)) ? (hi) : (x)))
__global__ void actLinear(float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number)
neurons[g_tid]=neurons[g_tid];
}
__global__ void actSigmoid(float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number)
neurons[g_tid]=(1.0f/(1.0f+exp(-neurons[g_tid])));
}
__global__ void actTanh(float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number)
neurons[g_tid]=(2.0f/(1.0f+exp(-neurons[g_tid])))-1.0f;
}
__global__ void actRelu(float * neurons, const int number) {
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if (g_tid<number)
neurons[g_tid] = neurons[g_tid] > 0 ? neurons[g_tid] : 0;
}
__global__ void derivLinear(float * deltas, const float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number){
deltas[g_tid]*=1;
}
}
__global__ void derivSigmoid(float * deltas, const float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number){
const float y=clip(neurons[g_tid],0.01f,0.99f);
deltas[g_tid]*=y*(1.0f-y);
}
}
__global__ void derivTanh(float * deltas, const float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number){
const float y=clip(neurons[g_tid],-0.98f,0.98f);
deltas[g_tid]*=0.5f*(1.0f-(y*y));
}
}
__global__ void derivRelu(float * deltas, const float * neurons, const int number) {
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if (g_tid<number) {
deltas[g_tid] *= neurons[g_tid] > 0 ? 1 : 0;
}
}
//computes the activation function for (number) elements of (neurons) and store the results in (neurons)
void computeActFunct(float * neurons, const int number, const int funct) {
int numBlocks = number/BLOCKSIZE+1;
switch(funct){
case ACT_LINEAR: break;
case ACT_SIGMOID: actSigmoid << <numBlocks, BLOCKSIZE >> > (neurons, number); break;
case ACT_TANH: actTanh<<<numBlocks, BLOCKSIZE>>>(neurons,number); break;
case ACT_RELU: actRelu<<<numBlocks, BLOCKSIZE>>>(neurons, number); break;
default: throw std::runtime_error("Function not yet implemented"); break;
}
}
//computes the derivation function for (number) elements of (neurons) and multiplies and stores the results with and in (delta)
void computeDerivFunct(float * deltas, const float * neurons, const int number, const int funct){
int numBlocks = number/BLOCKSIZE+1;
switch(funct){
case ACT_LINEAR: break;
case ACT_SIGMOID: derivSigmoid << <numBlocks, BLOCKSIZE >> > (deltas, neurons, number); break;
case ACT_TANH: derivTanh<<<numBlocks, BLOCKSIZE>>>(deltas,neurons,number); break;
case ACT_RELU: derivRelu<<<numBlocks, BLOCKSIZE>>>(deltas,neurons,number); break;
default: throw std::runtime_error("Function not yet implemented"); break;
}
}
|
4cd485be5719726071772bf0237407d3bd928e96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Cambricon Corporation: 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <vector>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/prelu_layer.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n,
const int rows, const int rowPitch, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
for ( int k = 1; k < rows; k++ ) {
out_diff[index] += in_diff[index + k*rowPitch]
* in_data[index + k*rowPitch] * (in_data[index + k*rowPitch] <= 0);
}
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(cdim)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
cdim, bottom[0]->num(), top[0]->offset(1), top_diff ,
bottom_data ,
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype dsum;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &dsum);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
| 4cd485be5719726071772bf0237407d3bd928e96.cu | /*
All modification made by Cambricon Corporation: © 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <vector>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/prelu_layer.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n,
const int rows, const int rowPitch, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
for ( int k = 1; k < rows; k++ ) {
out_diff[index] += in_diff[index + k*rowPitch]
* in_data[index + k*rowPitch] * (in_data[index + k*rowPitch] <= 0);
}
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUParamBackward<Dtype><<<CAFFE_GET_BLOCKS(cdim),
CAFFE_CUDA_NUM_THREADS>>>(
cdim, bottom[0]->num(), top[0]->offset(1), top_diff ,
bottom_data ,
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype dsum;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &dsum);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
|
acf6071a25cce6031e2f58ca589edf788e877c39.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef __clang__
#include <hip/hip_runtime.h>
#endif
#include <hip/hip_complex.h>
#include "common.h"
#include "metrix.h"
/* 128x128 bins, 8x8-blocks 16x16 threads each.
BU - u-number of bin
BV - v-number of bin
*/
template <
bool is_half_gcf
>
__device__ __inline__ static void grid_kernel_gather(
const Pregridded uvo[]
, const complexd vis[]
, const hipDoubleComplex * gcf[]
, complexd _grid[]
, int num_of_vals
, int BU
, int BV
, int grid_size
, int bstep
){
__ACC(complexd, grid, grid_size);
int gu = blockIdx.x * blockDim.x + threadIdx.x + BU * bstep;
int gv = blockIdx.y * blockDim.y + threadIdx.y + BV * bstep;
for (int i = 0; i < num_of_vals; i++){
int du, dv, supp;
du = gu - uvo[i].u;
dv = gv - uvo[i].v;
supp = uvo[i].gcf_layer_supp;
// We have input u and v translated by -supp/2!
if (du >= 0 && dv >= 0 && du < supp && dv < supp) {
complexd supportPixel;
#define __layeroff du * supp + dv
if (is_half_gcf) {
int index = uvo[i].gcf_layer_index;
// Negative index indicates that original w was mirrored
// and we shall negate the index to obtain correct
// offset *and* conjugate the result.
if (index < 0) {
supportPixel = gcf[-index][__layeroff];
supportPixel.y = - supportPixel.y;
} else {
supportPixel = gcf[index][__layeroff];
}
} else {
supportPixel = gcf[uvo[i].gcf_layer_index][__layeroff];
}
grid[gu][gv] = cuCfma(supportPixel, vis[i], grid[gu][gv]);
}
}
}
#define gridKernelGather(suff, ishalf) \
extern "C" __global__ void gridKernelGather##suff( \
const Pregridded uvo[] \
, const complexd vis[] \
, const hipDoubleComplex * gcf[] \
, complexd _grid[] \
, int num_of_vals \
, int BU \
, int BV \
, int grid_size \
, int bstep \
) { \
grid_kernel_gather<ishalf>( \
uvo \
, vis \
, gcf \
, _grid \
, num_of_vals \
, BU \
, BV \
, grid_size \
, bstep \
); \
}
gridKernelGather(HalfGCF, true)
gridKernelGather(FullGCF, false)
| acf6071a25cce6031e2f58ca589edf788e877c39.cu | #ifdef __clang__
#include <cuda.h>
#endif
#include <cuComplex.h>
#include "common.h"
#include "metrix.h"
/* 128x128 bins, 8x8-blocks 16x16 threads each.
BU - u-number of bin
BV - v-number of bin
*/
template <
bool is_half_gcf
>
__device__ __inline__ static void grid_kernel_gather(
const Pregridded uvo[]
, const complexd vis[]
, const cuDoubleComplex * gcf[]
, complexd _grid[]
, int num_of_vals
, int BU
, int BV
, int grid_size
, int bstep
){
__ACC(complexd, grid, grid_size);
int gu = blockIdx.x * blockDim.x + threadIdx.x + BU * bstep;
int gv = blockIdx.y * blockDim.y + threadIdx.y + BV * bstep;
for (int i = 0; i < num_of_vals; i++){
int du, dv, supp;
du = gu - uvo[i].u;
dv = gv - uvo[i].v;
supp = uvo[i].gcf_layer_supp;
// We have input u and v translated by -supp/2!
if (du >= 0 && dv >= 0 && du < supp && dv < supp) {
complexd supportPixel;
#define __layeroff du * supp + dv
if (is_half_gcf) {
int index = uvo[i].gcf_layer_index;
// Negative index indicates that original w was mirrored
// and we shall negate the index to obtain correct
// offset *and* conjugate the result.
if (index < 0) {
supportPixel = gcf[-index][__layeroff];
supportPixel.y = - supportPixel.y;
} else {
supportPixel = gcf[index][__layeroff];
}
} else {
supportPixel = gcf[uvo[i].gcf_layer_index][__layeroff];
}
grid[gu][gv] = cuCfma(supportPixel, vis[i], grid[gu][gv]);
}
}
}
#define gridKernelGather(suff, ishalf) \
extern "C" __global__ void gridKernelGather##suff( \
const Pregridded uvo[] \
, const complexd vis[] \
, const cuDoubleComplex * gcf[] \
, complexd _grid[] \
, int num_of_vals \
, int BU \
, int BV \
, int grid_size \
, int bstep \
) { \
grid_kernel_gather<ishalf>( \
uvo \
, vis \
, gcf \
, _grid \
, num_of_vals \
, BU \
, BV \
, grid_size \
, bstep \
); \
}
gridKernelGather(HalfGCF, true)
gridKernelGather(FullGCF, false)
|
3c1a698b738b34e404534ac1ba50629734e74c3c.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSphere
template hipError_t gpu_hpmc_free_volume<ShapeSphere>(const hpmc_free_volume_args_t &args,
const typename ShapeSphere::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeSphere>(const hpmc_args_t& args,
const typename ShapeSphere::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeSphere>(const hpmc_implicit_args_t& args,
const typename ShapeSphere::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeSphere>(const hpmc_implicit_args_t& args,
const typename ShapeSphere::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 3c1a698b738b34e404534ac1ba50629734e74c3c.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSphere
template cudaError_t gpu_hpmc_free_volume<ShapeSphere>(const hpmc_free_volume_args_t &args,
const typename ShapeSphere::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeSphere>(const hpmc_args_t& args,
const typename ShapeSphere::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeSphere>(const hpmc_implicit_args_t& args,
const typename ShapeSphere::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeSphere>(const hpmc_implicit_args_t& args,
const typename ShapeSphere::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
aae7f5397c9482bdf1c7ddef54f04a045a17e3cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <sys/mman.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <cuda_occupancy.h>
#include "GoLgeneric.h"
#include "tictoc.h"
#include "defines.h"
__global__ void cuda_kernel(int * src, int * dst, size_t width, size_t height);
void die(char * message) {
printf("Error: %s\nExiting...\n", message);
exit(1);
}
void checkCuda() {
hipError_t status = hipPeekAtLastError();
if (status!=hipSuccess) {
fprintf(stderr, "%s: %s\n", hipGetErrorName(status), hipGetErrorString(status));
exit(2);
}
}
int main(int nargs, char ** args) {
tic();
struct inputParameters p;
if (parseInput(nargs, args, &p))
die("Input error");
printInputParameters(&p);
printf("[%f] initialization done.\n", toc());
//allocate data
size_t allocsize = p.width * p.height * sizeof(int);
int * data = (int*) aligned_alloc(16, allocsize);
if (!data)
die("Host allocation error");
printf("[%f] Memory allocated (%fMb).\n", toc(), (double)allocsize/1.0e6);
//Pin memory
mlock((void *)data, p.width * p.height * sizeof(int));
printf("[%f] Memory locked.\n", toc());
//Generate GOL
struct vector2u size;
size.x = p.width;
size.y = p.height;
if (generateMapInt(data, p.density, p.seed, size))
exit(3);
printf("[%f] Map generated.\n", toc());
//intial count
size_t alive = countAliveInt(data, size);
printf("[%f] Alive: %zu\n", toc(), alive);
//Initializing nvidia (cuda)
int *cudaSrc, *cudaDst;
hipMalloc(&cudaSrc, allocsize);
checkCuda();
hipMalloc(&cudaDst, allocsize);
checkCuda();
printf("[%f] CUDA initialized and memory allocated.\n", toc());
//copy memory
tic2();
hipMemcpy(cudaSrc, data, allocsize, hipMemcpyHostToDevice);
double elaps = toc2();
checkCuda();
printf("[%f] Memory copy succesfull, speed=%fGb/s\n", toc(), GET_MEMSPEED(allocsize, elaps)/GBYTE);
//Invoke kernel for step times
dim3 blockDim;
blockDim.x = 10;
blockDim.y = 10;
tic2();
for(size_t i = 0; i<p.steps/2; i++) {
hipLaunchKernelGGL(( cuda_kernel), dim3((p.width*p.height/100)+100), dim3(blockDim), 0, 0, cudaSrc, cudaDst, p.width, p.height);
hipLaunchKernelGGL(( cuda_kernel), dim3((p.width*p.height/100)+100), dim3(blockDim), 0, 0, cudaDst, cudaSrc, p.width, p.height);
}
elaps = toc2();
checkCuda();
printf("[%f] Execution succesfull, speed=%fGFLOPS\n", toc(), FLOPS_GOL_INT(p.width, p.height, p.steps, elaps)/GFLOPS);
printf("[%f] Execution succesfull, GByte/s=%f\n", toc(), MOPS_GOL_INT(4*p.width, p.height, p.steps, elaps)/GBYTE);
} | aae7f5397c9482bdf1c7ddef54f04a045a17e3cf.cu | #include <stdio.h>
#include <sys/mman.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <cuda_occupancy.h>
#include "GoLgeneric.h"
#include "tictoc.h"
#include "defines.h"
__global__ void cuda_kernel(int * src, int * dst, size_t width, size_t height);
void die(char * message) {
printf("Error: %s\nExiting...\n", message);
exit(1);
}
void checkCuda() {
cudaError_t status = cudaPeekAtLastError();
if (status!=cudaSuccess) {
fprintf(stderr, "%s: %s\n", cudaGetErrorName(status), cudaGetErrorString(status));
exit(2);
}
}
int main(int nargs, char ** args) {
tic();
struct inputParameters p;
if (parseInput(nargs, args, &p))
die("Input error");
printInputParameters(&p);
printf("[%f] initialization done.\n", toc());
//allocate data
size_t allocsize = p.width * p.height * sizeof(int);
int * data = (int*) aligned_alloc(16, allocsize);
if (!data)
die("Host allocation error");
printf("[%f] Memory allocated (%fMb).\n", toc(), (double)allocsize/1.0e6);
//Pin memory
mlock((void *)data, p.width * p.height * sizeof(int));
printf("[%f] Memory locked.\n", toc());
//Generate GOL
struct vector2u size;
size.x = p.width;
size.y = p.height;
if (generateMapInt(data, p.density, p.seed, size))
exit(3);
printf("[%f] Map generated.\n", toc());
//intial count
size_t alive = countAliveInt(data, size);
printf("[%f] Alive: %zu\n", toc(), alive);
//Initializing nvidia (cuda)
int *cudaSrc, *cudaDst;
cudaMalloc(&cudaSrc, allocsize);
checkCuda();
cudaMalloc(&cudaDst, allocsize);
checkCuda();
printf("[%f] CUDA initialized and memory allocated.\n", toc());
//copy memory
tic2();
cudaMemcpy(cudaSrc, data, allocsize, cudaMemcpyHostToDevice);
double elaps = toc2();
checkCuda();
printf("[%f] Memory copy succesfull, speed=%fGb/s\n", toc(), GET_MEMSPEED(allocsize, elaps)/GBYTE);
//Invoke kernel for step times
dim3 blockDim;
blockDim.x = 10;
blockDim.y = 10;
tic2();
for(size_t i = 0; i<p.steps/2; i++) {
cuda_kernel<<<(p.width*p.height/100)+100, blockDim>>>(cudaSrc, cudaDst, p.width, p.height);
cuda_kernel<<<(p.width*p.height/100)+100, blockDim>>>(cudaDst, cudaSrc, p.width, p.height);
}
elaps = toc2();
checkCuda();
printf("[%f] Execution succesfull, speed=%fGFLOPS\n", toc(), FLOPS_GOL_INT(p.width, p.height, p.steps, elaps)/GFLOPS);
printf("[%f] Execution succesfull, GByte/s=%f\n", toc(), MOPS_GOL_INT(4*p.width, p.height, p.steps, elaps)/GBYTE);
} |
d37d1a956c43f250f60b4e6d2ffc36b5510e1122.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This sample demonstrates how 2D convolutions
* with very large kernel sizes
* can be efficiently implemented
* using FFT transformations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hipfft.h>
#include <cutil_inline.h>
typedef float2 Complex;
////////////////////////////////////////////////////////////////////////////////
// Helper functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b){
return (a % b != 0) ? (a - a % b + b) : a;
}
////////////////////////////////////////////////////////////////////////////////
// Reference straightfroward CPU convolution
////////////////////////////////////////////////////////////////////////////////
extern "C" void convolutionCPU(
Complex *h_Result,
Complex *h_Data,
Complex *h_Kernel,
int dataW,
int dataH,
int kernelW,
int kernelH,
int kernelX,
int kernelY
);
////////////////////////////////////////////////////////////////////////////////
// Padding kernels
////////////////////////////////////////////////////////////////////////////////
#include "convolutionFFT2D_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
int calculateFFTsize(int dataSize){
//Highest non-zero bit position of dataSize
int hiBit;
//Neares lower and higher powers of two numbers for dataSize
unsigned int lowPOT, hiPOT;
//Align data size to a multiple of half-warp
//in order to have each line starting at properly aligned addresses
//for coalesced global memory writes in padKernel() and padData()
dataSize = iAlignUp(dataSize, 16);
//Find highest non-zero bit
for(hiBit = 31; hiBit >= 0; hiBit--)
if(dataSize & (1U << hiBit)) break;
//No need to align, if already power of two
lowPOT = 1U << hiBit;
if(lowPOT == dataSize) return dataSize;
//Align to a nearest higher power of two, if the size is small enough,
//else align only to a nearest higher multiple of 512,
//in order to save computation and memory bandwidth
hiPOT = 1U << (hiBit + 1);
if(hiPOT <= 1024)
return hiPOT;
else
return iAlignUp(dataSize, 512);
}
//Kernel dimensions
const int KERNEL_W = 7;
const int KERNEL_H = 7;
//Kernel center position
const int KERNEL_X = 1;
const int KERNEL_Y = 6;
//Width and height of padding for "clamp to border" addressing mode
const int PADDING_W = KERNEL_W - 1;
const int PADDING_H = KERNEL_H - 1;
//Input data dimension
#if 1
const int DATA_W = 200;
const int DATA_H = 200;
#else
const int DATA_W = 1000;
const int DATA_H = 1000;
#endif
//Derive FFT size from data and kernel dimensions
const int FFT_W = calculateFFTsize(DATA_W + PADDING_W);
const int FFT_H = calculateFFTsize(DATA_H + PADDING_H);
const int FFT_SIZE = FFT_W * FFT_H * sizeof(Complex);
const int KERNEL_SIZE = KERNEL_W * KERNEL_H * sizeof(Complex);
const int DATA_SIZE = DATA_W * DATA_H * sizeof(Complex);
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv){
Complex
*h_Kernel,
*h_Data,
*h_ResultCPU,
*h_ResultGPU;
hipArray
*a_Kernel,
*a_Data;
hipChannelFormatDesc float2tex
= hipCreateChannelDesc<float2>();
Complex
*d_PaddedKernel,
*d_PaddedData;
hipfftHandle FFTplan;
Complex
rCPU, rGPU;
double
max_delta_ref, delta, ref, sum_delta2, sum_ref2, L2norm;
int i, x, y;
unsigned int hTimer;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if ( cutCheckCmdLineFlag(argc, (const char **)argv, "device"))
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
cutilCheckError( cutCreateTimer(&hTimer) );
printf("Input data size : %i x %i\n", DATA_W, DATA_H );
printf("Convolution kernel size : %i x %i\n", KERNEL_W, KERNEL_H );
printf("Padded image size : %i x %i\n", DATA_W + PADDING_W, DATA_H + PADDING_H);
printf("Aligned padded image size : %i x %i\n", FFT_W, FFT_H );
printf("Allocating memory...\n");
h_Kernel = (Complex *)malloc(KERNEL_SIZE);
h_Data = (Complex *)malloc(DATA_SIZE);
h_ResultCPU = (Complex *)malloc(DATA_SIZE);
h_ResultGPU = (Complex *)malloc(FFT_SIZE);
cutilSafeCall( hipMallocArray(&a_Kernel, &float2tex, KERNEL_W, KERNEL_H) );
cutilSafeCall( hipMallocArray(&a_Data, &float2tex, DATA_W, DATA_H) );
cutilSafeCall( hipMalloc((void **)&d_PaddedKernel, FFT_SIZE) );
cutilSafeCall( hipMalloc((void **)&d_PaddedData, FFT_SIZE) );
printf("Generating random input data...\n");
srand(2007);
for(i = 0; i < (KERNEL_W * KERNEL_H); i++){
h_Kernel[i].x = (float)rand() / (float)RAND_MAX;
h_Kernel[i].y = 0;
}
for(i = 0; i < (DATA_W * DATA_H); i++){
h_Data[i].x = (float)rand() / (float)RAND_MAX;
h_Data[i].y = 0;
}
printf("Creating FFT plan for %i x %i...\n", FFT_W, FFT_H);
cufftSafeCall( hipfftPlan2d(&FFTplan, FFT_H, FFT_W, HIPFFT_C2C) );
printf("Uploading to GPU and padding convolution kernel and input data...\n");
printf("...initializing padded kernel and data storage with zeroes...\n");
cutilSafeCall( hipMemset(d_PaddedKernel, 0, FFT_SIZE) );
cutilSafeCall( hipMemset(d_PaddedData, 0, FFT_SIZE) );
printf("...copying input data and convolution kernel from host to CUDA arrays\n");
cutilSafeCall( hipMemcpyToArray(a_Kernel, 0, 0, h_Kernel, KERNEL_SIZE, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpyToArray(a_Data, 0, 0, h_Data, DATA_SIZE, hipMemcpyHostToDevice) );
printf("...binding CUDA arrays to texture references\n");
cutilSafeCall( hipBindTextureToArray(texKernel, a_Kernel) );
cutilSafeCall( hipBindTextureToArray(texData, a_Data) );
//Block width should be a multiple of maximum coalesced write size
//for coalesced memory writes in padKernel() and padData()
dim3 threadBlock(16, 12);
dim3 kernelBlockGrid(iDivUp(KERNEL_W, threadBlock.x), iDivUp(KERNEL_H, threadBlock.y));
dim3 dataBlockGrid(iDivUp(FFT_W, threadBlock.x), iDivUp(FFT_H, threadBlock.y));
printf("...padding convolution kernel\n");
hipLaunchKernelGGL(( padKernel), dim3(kernelBlockGrid), dim3(threadBlock), 0, 0,
d_PaddedKernel,
FFT_W,
FFT_H,
KERNEL_W,
KERNEL_H,
KERNEL_X,
KERNEL_Y
);
cutilCheckMsg("padKernel() execution failed\n");
printf("...padding input data array\n");
hipLaunchKernelGGL(( padData), dim3(dataBlockGrid), dim3(threadBlock), 0, 0,
d_PaddedData,
FFT_W,
FFT_H,
DATA_W,
DATA_H,
KERNEL_W,
KERNEL_H,
KERNEL_X,
KERNEL_Y
);
cutilCheckMsg("padData() execution failed\n");
//Not including kernel transformation into time measurement,
//since convolution kernel is not changed very frequently
printf("Transforming convolution kernel...\n");
cufftSafeCall( hipfftExecC2C(FFTplan, (hipfftComplex *)d_PaddedKernel, (hipfftComplex *)d_PaddedKernel, HIPFFT_FORWARD) );
printf("Running GPU FFT convolution...\n");
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
cufftSafeCall( hipfftExecC2C(FFTplan, (hipfftComplex *)d_PaddedData, (hipfftComplex *)d_PaddedData, HIPFFT_FORWARD) );
hipLaunchKernelGGL(( modulateAndNormalize), dim3(16), dim3(128), 0, 0,
d_PaddedData,
d_PaddedKernel,
FFT_W * FFT_H
);
cutilCheckMsg("modulateAndNormalize() execution failed\n");
cufftSafeCall( hipfftExecC2C(FFTplan, (hipfftComplex *)d_PaddedData, (hipfftComplex *)d_PaddedData, HIPFFT_BACKWARD) );
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
double gpuTime = cutGetTimerValue(hTimer);
printf("GPU time: %f msecs. //%f MPix/s\n", gpuTime, DATA_W * DATA_H * 1e-6 / (gpuTime * 0.001) );
printf("Reading back GPU FFT results...\n");
cutilSafeCall( hipMemcpy(h_ResultGPU, d_PaddedData, FFT_SIZE, hipMemcpyDeviceToHost) );
printf("Checking GPU results...\n");
printf("...running reference CPU convolution\n");
convolutionCPU(
h_ResultCPU,
h_Data,
h_Kernel,
DATA_W,
DATA_H,
KERNEL_W,
KERNEL_H,
KERNEL_X,
KERNEL_Y
);
printf("...comparing the results\n");
sum_delta2 = 0;
sum_ref2 = 0;
max_delta_ref = 0;
for(y = 0; y < DATA_H; y++)
for(x = 0; x < DATA_W; x++){
rCPU = h_ResultCPU[y * DATA_W + x];
rGPU = h_ResultGPU[y * FFT_W + x];
delta = (rCPU.x - rGPU.x) * (rCPU.x - rGPU.x) + (rCPU.y - rGPU.y) * (rCPU.y - rGPU.y);
ref = rCPU.x * rCPU.x + rCPU.y * rCPU.y;
if((delta / ref) > max_delta_ref) max_delta_ref = delta / ref;
sum_delta2 += delta;
sum_ref2 += ref;
}
L2norm = sqrt(sum_delta2 / sum_ref2);
printf("Max delta / CPU value %E\n", sqrt(max_delta_ref));
printf("L2 norm: %E\n", L2norm);
printf((L2norm < 1e-6) ? "TEST PASSED\n" : "TEST FAILED\n");
printf("Shutting down...\n");
cutilSafeCall( hipUnbindTexture(texData) );
cutilSafeCall( hipUnbindTexture(texKernel) );
cufftSafeCall( hipfftDestroy(FFTplan) );
cutilSafeCall( hipFree(d_PaddedData) );
cutilSafeCall( hipFree(d_PaddedKernel) );
cutilSafeCall( hipFreeArray(a_Data) );
cutilSafeCall( hipFreeArray(a_Kernel) );
free(h_ResultGPU);
free(h_ResultCPU);
free(h_Data);
free(h_Kernel);
hipDeviceReset();
cutilExit(argc, argv);
}
| d37d1a956c43f250f60b4e6d2ffc36b5510e1122.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This sample demonstrates how 2D convolutions
* with very large kernel sizes
* can be efficiently implemented
* using FFT transformations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cufft.h>
#include <cutil_inline.h>
typedef float2 Complex;
////////////////////////////////////////////////////////////////////////////////
// Helper functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b){
return (a % b != 0) ? (a - a % b + b) : a;
}
////////////////////////////////////////////////////////////////////////////////
// Reference straightfroward CPU convolution
////////////////////////////////////////////////////////////////////////////////
extern "C" void convolutionCPU(
Complex *h_Result,
Complex *h_Data,
Complex *h_Kernel,
int dataW,
int dataH,
int kernelW,
int kernelH,
int kernelX,
int kernelY
);
////////////////////////////////////////////////////////////////////////////////
// Padding kernels
////////////////////////////////////////////////////////////////////////////////
#include "convolutionFFT2D_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
int calculateFFTsize(int dataSize){
//Highest non-zero bit position of dataSize
int hiBit;
//Neares lower and higher powers of two numbers for dataSize
unsigned int lowPOT, hiPOT;
//Align data size to a multiple of half-warp
//in order to have each line starting at properly aligned addresses
//for coalesced global memory writes in padKernel() and padData()
dataSize = iAlignUp(dataSize, 16);
//Find highest non-zero bit
for(hiBit = 31; hiBit >= 0; hiBit--)
if(dataSize & (1U << hiBit)) break;
//No need to align, if already power of two
lowPOT = 1U << hiBit;
if(lowPOT == dataSize) return dataSize;
//Align to a nearest higher power of two, if the size is small enough,
//else align only to a nearest higher multiple of 512,
//in order to save computation and memory bandwidth
hiPOT = 1U << (hiBit + 1);
if(hiPOT <= 1024)
return hiPOT;
else
return iAlignUp(dataSize, 512);
}
//Kernel dimensions
const int KERNEL_W = 7;
const int KERNEL_H = 7;
//Kernel center position
const int KERNEL_X = 1;
const int KERNEL_Y = 6;
//Width and height of padding for "clamp to border" addressing mode
const int PADDING_W = KERNEL_W - 1;
const int PADDING_H = KERNEL_H - 1;
//Input data dimension
#if 1
const int DATA_W = 200;
const int DATA_H = 200;
#else
const int DATA_W = 1000;
const int DATA_H = 1000;
#endif
//Derive FFT size from data and kernel dimensions
const int FFT_W = calculateFFTsize(DATA_W + PADDING_W);
const int FFT_H = calculateFFTsize(DATA_H + PADDING_H);
const int FFT_SIZE = FFT_W * FFT_H * sizeof(Complex);
const int KERNEL_SIZE = KERNEL_W * KERNEL_H * sizeof(Complex);
const int DATA_SIZE = DATA_W * DATA_H * sizeof(Complex);
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv){
Complex
*h_Kernel,
*h_Data,
*h_ResultCPU,
*h_ResultGPU;
cudaArray
*a_Kernel,
*a_Data;
cudaChannelFormatDesc float2tex
= cudaCreateChannelDesc<float2>();
Complex
*d_PaddedKernel,
*d_PaddedData;
cufftHandle FFTplan;
Complex
rCPU, rGPU;
double
max_delta_ref, delta, ref, sum_delta2, sum_ref2, L2norm;
int i, x, y;
unsigned int hTimer;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if ( cutCheckCmdLineFlag(argc, (const char **)argv, "device"))
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
cutilCheckError( cutCreateTimer(&hTimer) );
printf("Input data size : %i x %i\n", DATA_W, DATA_H );
printf("Convolution kernel size : %i x %i\n", KERNEL_W, KERNEL_H );
printf("Padded image size : %i x %i\n", DATA_W + PADDING_W, DATA_H + PADDING_H);
printf("Aligned padded image size : %i x %i\n", FFT_W, FFT_H );
printf("Allocating memory...\n");
h_Kernel = (Complex *)malloc(KERNEL_SIZE);
h_Data = (Complex *)malloc(DATA_SIZE);
h_ResultCPU = (Complex *)malloc(DATA_SIZE);
h_ResultGPU = (Complex *)malloc(FFT_SIZE);
cutilSafeCall( cudaMallocArray(&a_Kernel, &float2tex, KERNEL_W, KERNEL_H) );
cutilSafeCall( cudaMallocArray(&a_Data, &float2tex, DATA_W, DATA_H) );
cutilSafeCall( cudaMalloc((void **)&d_PaddedKernel, FFT_SIZE) );
cutilSafeCall( cudaMalloc((void **)&d_PaddedData, FFT_SIZE) );
printf("Generating random input data...\n");
srand(2007);
for(i = 0; i < (KERNEL_W * KERNEL_H); i++){
h_Kernel[i].x = (float)rand() / (float)RAND_MAX;
h_Kernel[i].y = 0;
}
for(i = 0; i < (DATA_W * DATA_H); i++){
h_Data[i].x = (float)rand() / (float)RAND_MAX;
h_Data[i].y = 0;
}
printf("Creating FFT plan for %i x %i...\n", FFT_W, FFT_H);
cufftSafeCall( cufftPlan2d(&FFTplan, FFT_H, FFT_W, CUFFT_C2C) );
printf("Uploading to GPU and padding convolution kernel and input data...\n");
printf("...initializing padded kernel and data storage with zeroes...\n");
cutilSafeCall( cudaMemset(d_PaddedKernel, 0, FFT_SIZE) );
cutilSafeCall( cudaMemset(d_PaddedData, 0, FFT_SIZE) );
printf("...copying input data and convolution kernel from host to CUDA arrays\n");
cutilSafeCall( cudaMemcpyToArray(a_Kernel, 0, 0, h_Kernel, KERNEL_SIZE, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpyToArray(a_Data, 0, 0, h_Data, DATA_SIZE, cudaMemcpyHostToDevice) );
printf("...binding CUDA arrays to texture references\n");
cutilSafeCall( cudaBindTextureToArray(texKernel, a_Kernel) );
cutilSafeCall( cudaBindTextureToArray(texData, a_Data) );
//Block width should be a multiple of maximum coalesced write size
//for coalesced memory writes in padKernel() and padData()
dim3 threadBlock(16, 12);
dim3 kernelBlockGrid(iDivUp(KERNEL_W, threadBlock.x), iDivUp(KERNEL_H, threadBlock.y));
dim3 dataBlockGrid(iDivUp(FFT_W, threadBlock.x), iDivUp(FFT_H, threadBlock.y));
printf("...padding convolution kernel\n");
padKernel<<<kernelBlockGrid, threadBlock>>>(
d_PaddedKernel,
FFT_W,
FFT_H,
KERNEL_W,
KERNEL_H,
KERNEL_X,
KERNEL_Y
);
cutilCheckMsg("padKernel() execution failed\n");
printf("...padding input data array\n");
padData<<<dataBlockGrid, threadBlock>>>(
d_PaddedData,
FFT_W,
FFT_H,
DATA_W,
DATA_H,
KERNEL_W,
KERNEL_H,
KERNEL_X,
KERNEL_Y
);
cutilCheckMsg("padData() execution failed\n");
//Not including kernel transformation into time measurement,
//since convolution kernel is not changed very frequently
printf("Transforming convolution kernel...\n");
cufftSafeCall( cufftExecC2C(FFTplan, (cufftComplex *)d_PaddedKernel, (cufftComplex *)d_PaddedKernel, CUFFT_FORWARD) );
printf("Running GPU FFT convolution...\n");
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
cufftSafeCall( cufftExecC2C(FFTplan, (cufftComplex *)d_PaddedData, (cufftComplex *)d_PaddedData, CUFFT_FORWARD) );
modulateAndNormalize<<<16, 128>>>(
d_PaddedData,
d_PaddedKernel,
FFT_W * FFT_H
);
cutilCheckMsg("modulateAndNormalize() execution failed\n");
cufftSafeCall( cufftExecC2C(FFTplan, (cufftComplex *)d_PaddedData, (cufftComplex *)d_PaddedData, CUFFT_INVERSE) );
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
double gpuTime = cutGetTimerValue(hTimer);
printf("GPU time: %f msecs. //%f MPix/s\n", gpuTime, DATA_W * DATA_H * 1e-6 / (gpuTime * 0.001) );
printf("Reading back GPU FFT results...\n");
cutilSafeCall( cudaMemcpy(h_ResultGPU, d_PaddedData, FFT_SIZE, cudaMemcpyDeviceToHost) );
printf("Checking GPU results...\n");
printf("...running reference CPU convolution\n");
convolutionCPU(
h_ResultCPU,
h_Data,
h_Kernel,
DATA_W,
DATA_H,
KERNEL_W,
KERNEL_H,
KERNEL_X,
KERNEL_Y
);
printf("...comparing the results\n");
sum_delta2 = 0;
sum_ref2 = 0;
max_delta_ref = 0;
for(y = 0; y < DATA_H; y++)
for(x = 0; x < DATA_W; x++){
rCPU = h_ResultCPU[y * DATA_W + x];
rGPU = h_ResultGPU[y * FFT_W + x];
delta = (rCPU.x - rGPU.x) * (rCPU.x - rGPU.x) + (rCPU.y - rGPU.y) * (rCPU.y - rGPU.y);
ref = rCPU.x * rCPU.x + rCPU.y * rCPU.y;
if((delta / ref) > max_delta_ref) max_delta_ref = delta / ref;
sum_delta2 += delta;
sum_ref2 += ref;
}
L2norm = sqrt(sum_delta2 / sum_ref2);
printf("Max delta / CPU value %E\n", sqrt(max_delta_ref));
printf("L2 norm: %E\n", L2norm);
printf((L2norm < 1e-6) ? "TEST PASSED\n" : "TEST FAILED\n");
printf("Shutting down...\n");
cutilSafeCall( cudaUnbindTexture(texData) );
cutilSafeCall( cudaUnbindTexture(texKernel) );
cufftSafeCall( cufftDestroy(FFTplan) );
cutilSafeCall( cudaFree(d_PaddedData) );
cutilSafeCall( cudaFree(d_PaddedKernel) );
cutilSafeCall( cudaFreeArray(a_Data) );
cutilSafeCall( cudaFreeArray(a_Kernel) );
free(h_ResultGPU);
free(h_ResultCPU);
free(h_Data);
free(h_Kernel);
cudaThreadExit();
cutilExit(argc, argv);
}
|
6b68bceef0c44a2d432d0ebba63b8d18fa5d8d9a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <time.h>
#include "parallel.hip"
#include <unistd.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
typedef struct {
int n_inputs;
int n_hidden;
int n_outputs;
float *out_input;
float *out_hidden;
float *out_output;
float *changes_input_hidden;
float *changes_hidden_output;
float *w_input_hidden;
float *w_hidden_output;
} NeuralNet;
typedef struct {
int *result;
int *data;
} Pattern;
void buildLayer(float *arr, int n, float initial) {
int i=0;
while(i < n){
*arr = initial;
arr++;
i++;
}
}
float* buildWeightsLayer(int outer_n, int inner_n, float seed) {
int total = outer_n * inner_n;
float *w = (float *)malloc(sizeof(float) * total);
for(int i=0; i < total; i++) {
if (seed == -1) {
w[i] = ((float)rand()/(float)RAND_MAX);
} else {
w[i] = seed;
}
}
return w;
}
NeuralNet buildNeuralNet(int n_inputs, int n_outputs, int n_hidden) {
float *out_input = (float *)malloc(sizeof(float) * (n_inputs + 1));
float *out_hidden = (float *)malloc(sizeof(float) * n_hidden);
float *out_output = (float *)malloc(sizeof(float) * n_outputs);
buildLayer(out_input, n_inputs + 1, 1.0f);
buildLayer(out_hidden, n_hidden, 1.0f);
buildLayer(out_output, n_outputs, 1.0f);
// Build changes layer
float *changes_input_hidden = buildWeightsLayer(n_inputs + 1, n_hidden, 0.0f);
float *changes_hidden_output = buildWeightsLayer(n_hidden, n_outputs, 0.0f);
// Build weight matrix
float *w_input_hidden = buildWeightsLayer(n_inputs + 1, n_hidden, -1.0f);
float *w_hidden_output = buildWeightsLayer(n_hidden, n_outputs, -1.0f);
NeuralNet nn;
nn.n_inputs = n_inputs + 1;
nn.n_outputs = n_outputs;
nn.n_hidden = n_hidden;
nn.out_input = out_input;
nn.out_hidden = out_hidden;
nn.out_output = out_output;
nn.changes_input_hidden = changes_input_hidden;
nn.changes_hidden_output = changes_hidden_output;
nn.w_input_hidden = w_input_hidden;
nn.w_hidden_output = w_hidden_output;
return nn;
}
float dsigmoid(float y) {
return 1.0 - pow(y,2.0f);
}
void update_pattern(Pattern pattern, NeuralNet nn) {
if (DEBUG) {
printf("\n ***** LAYER UPDATE *****\n");
}
// Write inputs
int i;
for(i=0; i < nn.n_inputs -1; i++) {
nn.out_input[i] = pattern.data[i];
}
// Run parallel update
update_layer(nn.out_input, nn.out_hidden, nn.n_inputs, nn.n_hidden, nn.w_input_hidden);
update_layer(nn.out_hidden, nn.out_output, nn.n_hidden, nn.n_outputs, nn.w_hidden_output);
if (DEBUG) {
printf("\n ***** END LAYER UPDATE *****\n");
}
}
float back_propagate_network(Pattern p, NeuralNet n) {
if (DEBUG) {
printf("\n ***** BACK PROPAGATE *****\n");
}
int i, j;
float *output_delta = (float*)malloc(sizeof(float) * n.n_outputs);
float *hidden_delta = (float*)malloc(sizeof(float) * n.n_hidden);
// Calculate output delta
for (i=0; i < n.n_outputs; i++) {
float error = p.result[i] - n.out_output[i];
output_delta[i] = dsigmoid(n.out_output[i]) * error;
}
// Calculate hidden delta
for(i=0; i < n.n_hidden; i++) {
float error = 0.0f;
for (j=0; j < n.n_outputs; j++) {
error += output_delta[j] * n.w_hidden_output[i * n.n_outputs + j];
}
hidden_delta[i] = dsigmoid(n.out_hidden[i]) * error;
}
// Set hidden-output weights
setWeightsForLayers(n.w_hidden_output, n.changes_hidden_output, output_delta, n.out_hidden, n.n_hidden, n.n_outputs);
if (DEBUG) {
printf("\nHidden-Output weights\n");
drawMatrix(n.w_hidden_output, n.n_outputs, n.n_hidden);
_sleep(1);
}
setWeightsForLayers(n.w_input_hidden, n.changes_input_hidden, hidden_delta, n.out_input, n.n_inputs, n.n_hidden);
if (DEBUG) {
printf("\nInput-Hidden weights\n");
drawMatrix(n.w_input_hidden, n.n_hidden, n.n_inputs);
_sleep(1);
}
// Calculate error
float error = 0.0f;
for (i=0; i < n.n_outputs; i++) {
error = error + 0.5f * pow(p.result[i] - n.out_output[i], 2);
}
if (DEBUG) {
printf("\n ***** Error for this pattern is: %f *****\n", error);
_sleep(2);
}
return error;
}
void train_network(Pattern *patterns, int n_patterns, int n_iterations, NeuralNet nn) {
int i, j;
for (i=0; i < n_iterations; i++) {
float error = 0;
for (j=0; j < n_patterns; j++) {
update_pattern(patterns[j], nn);
error += back_propagate_network(patterns[j], nn);
}
if (i % 10 == 0) {
printf("Error is: %-.5f\n", error);
if (DEBUG) _sleep(2);
}
}
}
Pattern makePatternSingleOutput(int *data, int result) {
Pattern p;
p.data = data;
p.result = (int *)malloc(sizeof(int));
p.result[0] = result;
return p;
}
int main() {
srand((unsigned)time(NULL));
int n_inputs = 2;
int n_hidden = 4;
int n_outputs = 1;
// Build output layer
NeuralNet nn = buildNeuralNet(n_inputs, n_outputs, n_hidden);
// Build training samples
int _p1[] = {0,0};
Pattern p1 = makePatternSingleOutput(_p1, 0);
int _p2[] = {0,1};
Pattern p2 = makePatternSingleOutput(_p2, 1);
int _p3[] = {1,1};
Pattern p3 = makePatternSingleOutput(_p3, 1);
int _p4[] = {1,0};
Pattern p4 = makePatternSingleOutput(_p4, 1);
Pattern patterns[] = {p3, p2, p1, p4};
// Train the network
train_network(patterns, 4, 1000, nn);
printf("\n\nTesting the network\n");
update_pattern(p2, nn);
for (int i=0; i < nn.n_outputs; i++) {
printf("Output: %f, expected: %i\n", nn.out_output[i], p2.result[i]);
}
hipDeviceReset();
return 0;
}
| 6b68bceef0c44a2d432d0ebba63b8d18fa5d8d9a.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <time.h>
#include "parallel.cu"
#include <unistd.h>
#include <string.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
typedef struct {
int n_inputs;
int n_hidden;
int n_outputs;
float *out_input;
float *out_hidden;
float *out_output;
float *changes_input_hidden;
float *changes_hidden_output;
float *w_input_hidden;
float *w_hidden_output;
} NeuralNet;
typedef struct {
int *result;
int *data;
} Pattern;
void buildLayer(float *arr, int n, float initial) {
int i=0;
while(i < n){
*arr = initial;
arr++;
i++;
}
}
float* buildWeightsLayer(int outer_n, int inner_n, float seed) {
int total = outer_n * inner_n;
float *w = (float *)malloc(sizeof(float) * total);
for(int i=0; i < total; i++) {
if (seed == -1) {
w[i] = ((float)rand()/(float)RAND_MAX);
} else {
w[i] = seed;
}
}
return w;
}
NeuralNet buildNeuralNet(int n_inputs, int n_outputs, int n_hidden) {
float *out_input = (float *)malloc(sizeof(float) * (n_inputs + 1));
float *out_hidden = (float *)malloc(sizeof(float) * n_hidden);
float *out_output = (float *)malloc(sizeof(float) * n_outputs);
buildLayer(out_input, n_inputs + 1, 1.0f);
buildLayer(out_hidden, n_hidden, 1.0f);
buildLayer(out_output, n_outputs, 1.0f);
// Build changes layer
float *changes_input_hidden = buildWeightsLayer(n_inputs + 1, n_hidden, 0.0f);
float *changes_hidden_output = buildWeightsLayer(n_hidden, n_outputs, 0.0f);
// Build weight matrix
float *w_input_hidden = buildWeightsLayer(n_inputs + 1, n_hidden, -1.0f);
float *w_hidden_output = buildWeightsLayer(n_hidden, n_outputs, -1.0f);
NeuralNet nn;
nn.n_inputs = n_inputs + 1;
nn.n_outputs = n_outputs;
nn.n_hidden = n_hidden;
nn.out_input = out_input;
nn.out_hidden = out_hidden;
nn.out_output = out_output;
nn.changes_input_hidden = changes_input_hidden;
nn.changes_hidden_output = changes_hidden_output;
nn.w_input_hidden = w_input_hidden;
nn.w_hidden_output = w_hidden_output;
return nn;
}
float dsigmoid(float y) {
return 1.0 - pow(y,2.0f);
}
void update_pattern(Pattern pattern, NeuralNet nn) {
if (DEBUG) {
printf("\n ***** LAYER UPDATE *****\n");
}
// Write inputs
int i;
for(i=0; i < nn.n_inputs -1; i++) {
nn.out_input[i] = pattern.data[i];
}
// Run parallel update
update_layer(nn.out_input, nn.out_hidden, nn.n_inputs, nn.n_hidden, nn.w_input_hidden);
update_layer(nn.out_hidden, nn.out_output, nn.n_hidden, nn.n_outputs, nn.w_hidden_output);
if (DEBUG) {
printf("\n ***** END LAYER UPDATE *****\n");
}
}
float back_propagate_network(Pattern p, NeuralNet n) {
if (DEBUG) {
printf("\n ***** BACK PROPAGATE *****\n");
}
int i, j;
float *output_delta = (float*)malloc(sizeof(float) * n.n_outputs);
float *hidden_delta = (float*)malloc(sizeof(float) * n.n_hidden);
// Calculate output delta
for (i=0; i < n.n_outputs; i++) {
float error = p.result[i] - n.out_output[i];
output_delta[i] = dsigmoid(n.out_output[i]) * error;
}
// Calculate hidden delta
for(i=0; i < n.n_hidden; i++) {
float error = 0.0f;
for (j=0; j < n.n_outputs; j++) {
error += output_delta[j] * n.w_hidden_output[i * n.n_outputs + j];
}
hidden_delta[i] = dsigmoid(n.out_hidden[i]) * error;
}
// Set hidden-output weights
setWeightsForLayers(n.w_hidden_output, n.changes_hidden_output, output_delta, n.out_hidden, n.n_hidden, n.n_outputs);
if (DEBUG) {
printf("\nHidden-Output weights\n");
drawMatrix(n.w_hidden_output, n.n_outputs, n.n_hidden);
_sleep(1);
}
setWeightsForLayers(n.w_input_hidden, n.changes_input_hidden, hidden_delta, n.out_input, n.n_inputs, n.n_hidden);
if (DEBUG) {
printf("\nInput-Hidden weights\n");
drawMatrix(n.w_input_hidden, n.n_hidden, n.n_inputs);
_sleep(1);
}
// Calculate error
float error = 0.0f;
for (i=0; i < n.n_outputs; i++) {
error = error + 0.5f * pow(p.result[i] - n.out_output[i], 2);
}
if (DEBUG) {
printf("\n ***** Error for this pattern is: %f *****\n", error);
_sleep(2);
}
return error;
}
void train_network(Pattern *patterns, int n_patterns, int n_iterations, NeuralNet nn) {
int i, j;
for (i=0; i < n_iterations; i++) {
float error = 0;
for (j=0; j < n_patterns; j++) {
update_pattern(patterns[j], nn);
error += back_propagate_network(patterns[j], nn);
}
if (i % 10 == 0) {
printf("Error is: %-.5f\n", error);
if (DEBUG) _sleep(2);
}
}
}
Pattern makePatternSingleOutput(int *data, int result) {
Pattern p;
p.data = data;
p.result = (int *)malloc(sizeof(int));
p.result[0] = result;
return p;
}
int main() {
srand((unsigned)time(NULL));
int n_inputs = 2;
int n_hidden = 4;
int n_outputs = 1;
// Build output layer
NeuralNet nn = buildNeuralNet(n_inputs, n_outputs, n_hidden);
// Build training samples
int _p1[] = {0,0};
Pattern p1 = makePatternSingleOutput(_p1, 0);
int _p2[] = {0,1};
Pattern p2 = makePatternSingleOutput(_p2, 1);
int _p3[] = {1,1};
Pattern p3 = makePatternSingleOutput(_p3, 1);
int _p4[] = {1,0};
Pattern p4 = makePatternSingleOutput(_p4, 1);
Pattern patterns[] = {p3, p2, p1, p4};
// Train the network
train_network(patterns, 4, 1000, nn);
printf("\n\nTesting the network\n");
update_pattern(p2, nn);
for (int i=0; i < nn.n_outputs; i++) {
printf("Output: %f, expected: %i\n", nn.out_output[i], p2.result[i]);
}
cudaDeviceReset();
return 0;
}
|
e634b3cf2949c2c81ea99baa34757b30ec9db73c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void vecAdd(double *a, double *b, double *c, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char *argv[]) {
int n = 100000;
double *h_a, *h_b, *h_c;
double *d_a;
double *d_b;
double *d_c;
size_t bytes = n * sizeof(double);
h_a = (double *)malloc(bytes);
h_b = (double *)malloc(bytes);
h_c = (double *)malloc(bytes);
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
for ( int i = 0; i < n; i++) {
h_a[i] = sin(i) * sin(i);
h_b[i] = cos(i) * cos(i);
}
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 1024;
gridSize = (int)ceil((float)n / blockSize);
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
double sum = 0;
for (int i = 0; i < n; i++)
sum += h_c[i];
printf("final result: %f\n", sum / n);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
} | e634b3cf2949c2c81ea99baa34757b30ec9db73c.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void vecAdd(double *a, double *b, double *c, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char *argv[]) {
int n = 100000;
double *h_a, *h_b, *h_c;
double *d_a;
double *d_b;
double *d_c;
size_t bytes = n * sizeof(double);
h_a = (double *)malloc(bytes);
h_b = (double *)malloc(bytes);
h_c = (double *)malloc(bytes);
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
for ( int i = 0; i < n; i++) {
h_a[i] = sin(i) * sin(i);
h_b[i] = cos(i) * cos(i);
}
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 1024;
gridSize = (int)ceil((float)n / blockSize);
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
double sum = 0;
for (int i = 0; i < n; i++)
sum += h_c[i];
printf("final result: %f\n", sum / n);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
} |
2bf991fb15809368b5602c6086f17f1cf4457d86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Yan haixu
*/
#include <stdio.h>
#include "../parser.h"
template<unsigned int blocksize>
__device__ void warpReduce(volatile float* sdata , int tid) {
if (blocksize >= 64) sdata[tid] += sdata[tid + 32];
if (blocksize >= 32) sdata[tid] += sdata[tid + 16];
if (blocksize >= 16) sdata[tid] += sdata[tid + 8];
if (blocksize >= 8) sdata[tid] += sdata[tid + 4];
if (blocksize >= 4) sdata[tid] += sdata[tid + 2];
if (blocksize >= 2) sdata[tid] += sdata[tid + 1];
}
template<unsigned int blocksize>
__global__ void
multiple_thread_kernel(float* g_odata, float* g_idata)
{
extern __shared__ float sdata[];
// each thread loads one element from global to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * ( blocksize * 2 ) + threadIdx.x;
unsigned int n = 12; // number of elements
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i] + g_idata[i+blocksize];
i += 4;
}
__syncthreads();
// do reduction in shared memory
if (blocksize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256] ;} __syncthreads();}
if (blocksize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128] ;} __syncthreads();}
if (blocksize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64] ;} __syncthreads();}
if (tid < 32) warpReduce<blocksize>(sdata , tid);
// write result for this block to global memory
if (tid == 0 ) g_odata[0] = sdata[0];
}
void parser::total_sum( matrix& C) {
float* dev_a;
hipMalloc(&dev_a, A.row * A.col * sizeof(float));
hipMemcpy(dev_a, A.elements, A.row * A.col * sizeof(float), hipMemcpyHostToDevice);
float* dev_c;
hipMalloc(&dev_c, C.row * sizeof(float));
// switch (threads)
hipLaunchKernelGGL(( multiple_thread_kernel<2>), dim3(1) , dim3(2) , 32 * sizeof(float), 0, dev_c, dev_a);
hipDeviceSynchronize();
hipMemcpy(C.elements, dev_c, C.row * sizeof(float), hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_c);
return;
} | 2bf991fb15809368b5602c6086f17f1cf4457d86.cu | /*
* Yan haixu
*/
#include <stdio.h>
#include "../parser.h"
template<unsigned int blocksize>
__device__ void warpReduce(volatile float* sdata , int tid) {
if (blocksize >= 64) sdata[tid] += sdata[tid + 32];
if (blocksize >= 32) sdata[tid] += sdata[tid + 16];
if (blocksize >= 16) sdata[tid] += sdata[tid + 8];
if (blocksize >= 8) sdata[tid] += sdata[tid + 4];
if (blocksize >= 4) sdata[tid] += sdata[tid + 2];
if (blocksize >= 2) sdata[tid] += sdata[tid + 1];
}
template<unsigned int blocksize>
__global__ void
multiple_thread_kernel(float* g_odata, float* g_idata)
{
extern __shared__ float sdata[];
// each thread loads one element from global to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * ( blocksize * 2 ) + threadIdx.x;
unsigned int n = 12; // number of elements
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i] + g_idata[i+blocksize];
i += 4;
}
__syncthreads();
// do reduction in shared memory
if (blocksize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256] ;} __syncthreads();}
if (blocksize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128] ;} __syncthreads();}
if (blocksize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64] ;} __syncthreads();}
if (tid < 32) warpReduce<blocksize>(sdata , tid);
// write result for this block to global memory
if (tid == 0 ) g_odata[0] = sdata[0];
}
void parser::total_sum( matrix& C) {
float* dev_a;
cudaMalloc(&dev_a, A.row * A.col * sizeof(float));
cudaMemcpy(dev_a, A.elements, A.row * A.col * sizeof(float), cudaMemcpyHostToDevice);
float* dev_c;
cudaMalloc(&dev_c, C.row * sizeof(float));
// switch (threads)
multiple_thread_kernel<2><<< 1 , 2 , 32 * sizeof(float)>>>(dev_c, dev_a);
cudaDeviceSynchronize();
cudaMemcpy(C.elements, dev_c, C.row * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_c);
return;
} |
09a912e739e5836641d37ec9d3bee6ca79ac1c35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuVector_td_test_kernels.h"
#include "check_CUDA.h"
#include "vector_td_utilities.h"
#include "cuNDArray.h"
#include "cudaDeviceManager.h"
#include "thrust/device_vector.h"
using namespace Gadgetron;
template<class T, unsigned int D> __global__ void abs_kernel(vector_td<T,D>* data, unsigned int size){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < size) data[idx] = abs(data[idx]);
}
template<class T, unsigned int D> void Gadgetron::test_abs(cuNDArray< vector_td<T,D> >* data){
dim3 dimBlock(::min(cudaDeviceManager::Instance()->max_griddim(),(int)data->get_number_of_elements()));
dim3 dimGrid((dimBlock.x-1)/data->get_number_of_elements()+1);
hipLaunchKernelGGL(( abs_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, data->get_data_ptr(),data->get_number_of_elements());
hipDeviceSynchronize();
CHECK_FOR_CUDA_ERROR();
}
template<typename T, unsigned int D>
struct test_norm_functor : public thrust::unary_function<T,vector_td<T,D> >
{
__host__ __device__ T operator()(const vector_td<T,D> &x) const {return norm(x);}
};
template<class T, unsigned int D> thrust::device_vector<T> Gadgetron::test_norm(cuNDArray< vector_td<T,D> >* data){
thrust::device_vector<T> out(data->get_number_of_elements());
thrust::transform(data->begin(),data->end(),out.begin(),test_norm_functor<T,D>());
hipDeviceSynchronize();
CHECK_FOR_CUDA_ERROR();
return out;
}
template<typename T, unsigned int D>
struct test_min_functor : public thrust::unary_function<T,vector_td<T,D> >
{
__host__ __device__ T operator()(const vector_td<T,D> &x) const {return min(x);}
};
template<class T, unsigned int D> thrust::device_vector<T> Gadgetron::test_min(cuNDArray< vector_td<T,D> >* data){
thrust::device_vector<T> out(data->get_number_of_elements());
thrust::transform(data->begin(),data->end(),out.begin(),test_min_functor<T,D>());
hipDeviceSynchronize();
CHECK_FOR_CUDA_ERROR();
return out;
}
template<typename T, unsigned int D>
struct test_max_functor : public thrust::unary_function<T,vector_td<T,D> >
{
__host__ __device__ T operator()(const vector_td<T,D> &x) const {return max(x);}
};
template<class T, unsigned int D> thrust::device_vector<T> Gadgetron::test_max(cuNDArray< vector_td<T,D> >* data){
thrust::device_vector<T> out(data->get_number_of_elements());
thrust::transform(data->begin(),data->end(),out.begin(),test_max_functor<T,D>());
hipDeviceSynchronize();
CHECK_FOR_CUDA_ERROR();
return out;
}
template<typename T, unsigned int D>
struct test_amin_functor : public thrust::binary_function<vector_td<T,D>, vector_td<T,D>, vector_td<T,D> >
{
__host__ __device__ vector_td<T,D> operator()(const vector_td<T,D> &x, const vector_td<T,D> &y) const {return amin(x,y);}
};
template<class T, unsigned int D> boost::shared_ptr<cuNDArray<vector_td<T,D> > > Gadgetron::test_amin(cuNDArray< vector_td<T,D> >* data1, cuNDArray< vector_td<T,D> >* data2){
boost::shared_ptr<cuNDArray<vector_td<T,D> > > out( new cuNDArray<vector_td<T,D> >(data1->get_dimensions()));
thrust::transform(data1->begin(),data1->end(),data2->begin(),out->begin(),test_amin_functor<T,D>());
return out;
}
template<typename T, unsigned int D>
struct test_amax_functor : public thrust::binary_function<vector_td<T,D>, vector_td<T,D>, vector_td<T,D> >
{
__host__ __device__ vector_td<T,D> operator()(const vector_td<T,D> &x, const vector_td<T,D> &y) const {return amax(x,y);}
};
template<class T, unsigned int D> boost::shared_ptr<cuNDArray<vector_td<T,D> > > Gadgetron::test_amax(cuNDArray< vector_td<T,D> >* data1, cuNDArray< vector_td<T,D> >* data2){
boost::shared_ptr<cuNDArray<vector_td<T,D> > > out( new cuNDArray<vector_td<T,D> >(data1->get_dimensions()));
thrust::transform(data1->begin(),data1->end(),data2->begin(),out->begin(),test_amax_functor<T,D>());
return out;
}
template<typename T, unsigned int D>
class test_amin2_functor : public thrust::unary_function<vector_td<T,D>, vector_td<T,D> >
{
public:
test_amin2_functor(T _val): val(_val){};
__host__ __device__ vector_td<T,D> operator()(const vector_td<T,D> &x) const {return amin(x,val);}
T val;
};
template<class T, unsigned int D> boost::shared_ptr<cuNDArray<vector_td<T,D> > > Gadgetron::test_amin2(cuNDArray< vector_td<T,D> >* data1, T val){
boost::shared_ptr<cuNDArray<vector_td<T,D> > > out( new cuNDArray<vector_td<T,D> >(data1->get_dimensions()));
thrust::transform(data1->begin(),data1->end(),out->begin(),test_amin2_functor<T,D>(val));
return out;
}
template<typename T, unsigned int D>
class test_amax2_functor : public thrust::unary_function<vector_td<T,D>, vector_td<T,D> >
{
public:
test_amax2_functor(T _val): val(_val){};
__host__ __device__ vector_td<T,D> operator()(const vector_td<T,D> &x) const {return amax(x,val);}
T val;
};
template<class T, unsigned int D> boost::shared_ptr<cuNDArray<vector_td<T,D> > > Gadgetron::test_amax2(cuNDArray< vector_td<T,D> >* data1, T val){
boost::shared_ptr<cuNDArray<vector_td<T,D> > > out( new cuNDArray<vector_td<T,D> >(data1->get_dimensions()));
thrust::transform(data1->begin(),data1->end(),out->begin(),test_amax2_functor<T,D>(val));
return out;
}
template<class T, unsigned int D> void Gadgetron::vector_fill(cuNDArray< vector_td<T,D> >* data, vector_td<T,D> val){
thrust::fill(data->begin(),data->end(),val);
}
template void Gadgetron::test_abs<float,1>(cuNDArray< vector_td<float,1> > *);
template void Gadgetron::test_abs<float,2>(cuNDArray< vector_td<float,2> > *);
template void Gadgetron::test_abs<float,3>(cuNDArray< vector_td<float,3> > *);
template void Gadgetron::test_abs<float,4>(cuNDArray< vector_td<float,4> > *);
template void Gadgetron::test_abs<double,1>(cuNDArray< vector_td<double,1> > *);
template void Gadgetron::test_abs<double,2>(cuNDArray< vector_td<double,2> > *);
template void Gadgetron::test_abs<double,3>(cuNDArray< vector_td<double,3> > *);
template void Gadgetron::test_abs<double,4>(cuNDArray< vector_td<double,4> > *);
template thrust::device_vector<float> Gadgetron::test_norm<float,1>(cuNDArray< vector_td<float,1> > *);
template thrust::device_vector<float> Gadgetron::test_norm<float,2>(cuNDArray< vector_td<float,2> > *);
template thrust::device_vector<float> Gadgetron::test_norm<float,3>(cuNDArray< vector_td<float,3> > *);
template thrust::device_vector<float> Gadgetron::test_norm<float,4>(cuNDArray< vector_td<float,4> > *);
template thrust::device_vector<double> Gadgetron::test_norm<double,1>(cuNDArray< vector_td<double,1> > *);
template thrust::device_vector<double> Gadgetron::test_norm<double,2>(cuNDArray< vector_td<double,2> > *);
template thrust::device_vector<double> Gadgetron::test_norm<double,3>(cuNDArray< vector_td<double,3> > *);
template thrust::device_vector<double> Gadgetron::test_norm<double,4>(cuNDArray< vector_td<double,4> > *);
template thrust::device_vector<float> Gadgetron::test_min<float,1>(cuNDArray< vector_td<float,1> > *);
template thrust::device_vector<float> Gadgetron::test_min<float,2>(cuNDArray< vector_td<float,2> > *);
template thrust::device_vector<float> Gadgetron::test_min<float,3>(cuNDArray< vector_td<float,3> > *);
template thrust::device_vector<float> Gadgetron::test_min<float,4>(cuNDArray< vector_td<float,4> > *);
template thrust::device_vector<double> Gadgetron::test_min<double,1>(cuNDArray< vector_td<double,1> > *);
template thrust::device_vector<double> Gadgetron::test_min<double,2>(cuNDArray< vector_td<double,2> > *);
template thrust::device_vector<double> Gadgetron::test_min<double,3>(cuNDArray< vector_td<double,3> > *);
template thrust::device_vector<double> Gadgetron::test_min<double,4>(cuNDArray< vector_td<double,4> > *);
template thrust::device_vector<float> Gadgetron::test_max<float,1>(cuNDArray< vector_td<float,1> > *);
template thrust::device_vector<float> Gadgetron::test_max<float,2>(cuNDArray< vector_td<float,2> > *);
template thrust::device_vector<float> Gadgetron::test_max<float,3>(cuNDArray< vector_td<float,3> > *);
template thrust::device_vector<float> Gadgetron::test_max<float,4>(cuNDArray< vector_td<float,4> > *);
template thrust::device_vector<double> Gadgetron::test_max<double,1>(cuNDArray< vector_td<double,1> > *);
template thrust::device_vector<double> Gadgetron::test_max<double,2>(cuNDArray< vector_td<double,2> > *);
template thrust::device_vector<double> Gadgetron::test_max<double,3>(cuNDArray< vector_td<double,3> > *);
template thrust::device_vector<double> Gadgetron::test_max<double,4>(cuNDArray< vector_td<double,4> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,1> > > Gadgetron::test_amin<float,1>(cuNDArray< vector_td<float,1> > *,cuNDArray< vector_td<float,1> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,2> > > Gadgetron::test_amin<float,2>(cuNDArray< vector_td<float,2> > *, cuNDArray< vector_td<float,2> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,3> > > Gadgetron::test_amin<float,3>(cuNDArray< vector_td<float,3> > *, cuNDArray< vector_td<float,3> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,4> > > Gadgetron::test_amin<float,4>(cuNDArray< vector_td<float,4> > *, cuNDArray< vector_td<float,4> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,1> > > Gadgetron::test_amin<double,1>(cuNDArray< vector_td<double,1> > *, cuNDArray< vector_td<double,1> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,2> > > Gadgetron::test_amin<double,2>(cuNDArray< vector_td<double,2> > *, cuNDArray< vector_td<double,2> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,3> > > Gadgetron::test_amin<double,3>(cuNDArray< vector_td<double,3> > *, cuNDArray< vector_td<double,3> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,4> > > Gadgetron::test_amin<double,4>(cuNDArray< vector_td<double,4> > *, cuNDArray< vector_td<double,4> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,1> > > Gadgetron::test_amin2<float,1>(cuNDArray< vector_td<float,1> > *, float );
template boost::shared_ptr<cuNDArray<vector_td<float,2> > > Gadgetron::test_amin2<float,2>(cuNDArray< vector_td<float,2> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<float,3> > > Gadgetron::test_amin2<float,3>(cuNDArray< vector_td<float,3> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<float,4> > > Gadgetron::test_amin2<float,4>(cuNDArray< vector_td<float,4> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<double,1> > > Gadgetron::test_amin2<double,1>(cuNDArray< vector_td<double,1> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,2> > > Gadgetron::test_amin2<double,2>(cuNDArray< vector_td<double,2> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,3> > > Gadgetron::test_amin2<double,3>(cuNDArray< vector_td<double,3> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,4> > > Gadgetron::test_amin2<double,4>(cuNDArray< vector_td<double,4> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<float,1> > > Gadgetron::test_amax<float,1>(cuNDArray< vector_td<float,1> > *,cuNDArray< vector_td<float,1> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,2> > > Gadgetron::test_amax<float,2>(cuNDArray< vector_td<float,2> > *, cuNDArray< vector_td<float,2> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,3> > > Gadgetron::test_amax<float,3>(cuNDArray< vector_td<float,3> > *, cuNDArray< vector_td<float,3> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,4> > > Gadgetron::test_amax<float,4>(cuNDArray< vector_td<float,4> > *, cuNDArray< vector_td<float,4> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,1> > > Gadgetron::test_amax<double,1>(cuNDArray< vector_td<double,1> > *, cuNDArray< vector_td<double,1> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,2> > > Gadgetron::test_amax<double,2>(cuNDArray< vector_td<double,2> > *, cuNDArray< vector_td<double,2> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,3> > > Gadgetron::test_amax<double,3>(cuNDArray< vector_td<double,3> > *, cuNDArray< vector_td<double,3> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,4> > > Gadgetron::test_amax<double,4>(cuNDArray< vector_td<double,4> > *, cuNDArray< vector_td<double,4> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,1> > > Gadgetron::test_amax2<float,1>(cuNDArray< vector_td<float,1> > *, float );
template boost::shared_ptr<cuNDArray<vector_td<float,2> > > Gadgetron::test_amax2<float,2>(cuNDArray< vector_td<float,2> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<float,3> > > Gadgetron::test_amax2<float,3>(cuNDArray< vector_td<float,3> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<float,4> > > Gadgetron::test_amax2<float,4>(cuNDArray< vector_td<float,4> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<double,1> > > Gadgetron::test_amax2<double,1>(cuNDArray< vector_td<double,1> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,2> > > Gadgetron::test_amax2<double,2>(cuNDArray< vector_td<double,2> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,3> > > Gadgetron::test_amax2<double,3>(cuNDArray< vector_td<double,3> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,4> > > Gadgetron::test_amax2<double,4>(cuNDArray< vector_td<double,4> > *, double);
template void Gadgetron::vector_fill<float,1>(cuNDArray< vector_td<float,1> > *, vector_td<float,1>);
template void Gadgetron::vector_fill<float,2>(cuNDArray< vector_td<float,2> > *, vector_td<float,2>);
template void Gadgetron::vector_fill<float,3>(cuNDArray< vector_td<float,3> > *, vector_td<float,3>);
template void Gadgetron::vector_fill<float,4>(cuNDArray< vector_td<float,4> > *, vector_td<float,4>);
template void Gadgetron::vector_fill<double,1>(cuNDArray< vector_td<double,1> > *, vector_td<double,1>);
template void Gadgetron::vector_fill<double,2>(cuNDArray< vector_td<double,2> > *, vector_td<double,2>);
template void Gadgetron::vector_fill<double,3>(cuNDArray< vector_td<double,3> > *, vector_td<double,3>);
template void Gadgetron::vector_fill<double,4>(cuNDArray< vector_td<double,4> > *, vector_td<double,4>);
| 09a912e739e5836641d37ec9d3bee6ca79ac1c35.cu | #include "cuVector_td_test_kernels.h"
#include "check_CUDA.h"
#include "vector_td_utilities.h"
#include "cuNDArray.h"
#include "cudaDeviceManager.h"
#include "thrust/device_vector.h"
using namespace Gadgetron;
template<class T, unsigned int D> __global__ void abs_kernel(vector_td<T,D>* data, unsigned int size){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < size) data[idx] = abs(data[idx]);
}
template<class T, unsigned int D> void Gadgetron::test_abs(cuNDArray< vector_td<T,D> >* data){
dim3 dimBlock(std::min(cudaDeviceManager::Instance()->max_griddim(),(int)data->get_number_of_elements()));
dim3 dimGrid((dimBlock.x-1)/data->get_number_of_elements()+1);
abs_kernel<<<dimGrid,dimBlock>>>(data->get_data_ptr(),data->get_number_of_elements());
cudaThreadSynchronize();
CHECK_FOR_CUDA_ERROR();
}
template<typename T, unsigned int D>
struct test_norm_functor : public thrust::unary_function<T,vector_td<T,D> >
{
__host__ __device__ T operator()(const vector_td<T,D> &x) const {return norm(x);}
};
template<class T, unsigned int D> thrust::device_vector<T> Gadgetron::test_norm(cuNDArray< vector_td<T,D> >* data){
thrust::device_vector<T> out(data->get_number_of_elements());
thrust::transform(data->begin(),data->end(),out.begin(),test_norm_functor<T,D>());
cudaThreadSynchronize();
CHECK_FOR_CUDA_ERROR();
return out;
}
template<typename T, unsigned int D>
struct test_min_functor : public thrust::unary_function<T,vector_td<T,D> >
{
__host__ __device__ T operator()(const vector_td<T,D> &x) const {return min(x);}
};
template<class T, unsigned int D> thrust::device_vector<T> Gadgetron::test_min(cuNDArray< vector_td<T,D> >* data){
thrust::device_vector<T> out(data->get_number_of_elements());
thrust::transform(data->begin(),data->end(),out.begin(),test_min_functor<T,D>());
cudaThreadSynchronize();
CHECK_FOR_CUDA_ERROR();
return out;
}
template<typename T, unsigned int D>
struct test_max_functor : public thrust::unary_function<T,vector_td<T,D> >
{
__host__ __device__ T operator()(const vector_td<T,D> &x) const {return max(x);}
};
template<class T, unsigned int D> thrust::device_vector<T> Gadgetron::test_max(cuNDArray< vector_td<T,D> >* data){
thrust::device_vector<T> out(data->get_number_of_elements());
thrust::transform(data->begin(),data->end(),out.begin(),test_max_functor<T,D>());
cudaThreadSynchronize();
CHECK_FOR_CUDA_ERROR();
return out;
}
template<typename T, unsigned int D>
struct test_amin_functor : public thrust::binary_function<vector_td<T,D>, vector_td<T,D>, vector_td<T,D> >
{
__host__ __device__ vector_td<T,D> operator()(const vector_td<T,D> &x, const vector_td<T,D> &y) const {return amin(x,y);}
};
template<class T, unsigned int D> boost::shared_ptr<cuNDArray<vector_td<T,D> > > Gadgetron::test_amin(cuNDArray< vector_td<T,D> >* data1, cuNDArray< vector_td<T,D> >* data2){
boost::shared_ptr<cuNDArray<vector_td<T,D> > > out( new cuNDArray<vector_td<T,D> >(data1->get_dimensions()));
thrust::transform(data1->begin(),data1->end(),data2->begin(),out->begin(),test_amin_functor<T,D>());
return out;
}
template<typename T, unsigned int D>
struct test_amax_functor : public thrust::binary_function<vector_td<T,D>, vector_td<T,D>, vector_td<T,D> >
{
__host__ __device__ vector_td<T,D> operator()(const vector_td<T,D> &x, const vector_td<T,D> &y) const {return amax(x,y);}
};
template<class T, unsigned int D> boost::shared_ptr<cuNDArray<vector_td<T,D> > > Gadgetron::test_amax(cuNDArray< vector_td<T,D> >* data1, cuNDArray< vector_td<T,D> >* data2){
boost::shared_ptr<cuNDArray<vector_td<T,D> > > out( new cuNDArray<vector_td<T,D> >(data1->get_dimensions()));
thrust::transform(data1->begin(),data1->end(),data2->begin(),out->begin(),test_amax_functor<T,D>());
return out;
}
template<typename T, unsigned int D>
class test_amin2_functor : public thrust::unary_function<vector_td<T,D>, vector_td<T,D> >
{
public:
test_amin2_functor(T _val): val(_val){};
__host__ __device__ vector_td<T,D> operator()(const vector_td<T,D> &x) const {return amin(x,val);}
T val;
};
template<class T, unsigned int D> boost::shared_ptr<cuNDArray<vector_td<T,D> > > Gadgetron::test_amin2(cuNDArray< vector_td<T,D> >* data1, T val){
boost::shared_ptr<cuNDArray<vector_td<T,D> > > out( new cuNDArray<vector_td<T,D> >(data1->get_dimensions()));
thrust::transform(data1->begin(),data1->end(),out->begin(),test_amin2_functor<T,D>(val));
return out;
}
template<typename T, unsigned int D>
class test_amax2_functor : public thrust::unary_function<vector_td<T,D>, vector_td<T,D> >
{
public:
test_amax2_functor(T _val): val(_val){};
__host__ __device__ vector_td<T,D> operator()(const vector_td<T,D> &x) const {return amax(x,val);}
T val;
};
template<class T, unsigned int D> boost::shared_ptr<cuNDArray<vector_td<T,D> > > Gadgetron::test_amax2(cuNDArray< vector_td<T,D> >* data1, T val){
boost::shared_ptr<cuNDArray<vector_td<T,D> > > out( new cuNDArray<vector_td<T,D> >(data1->get_dimensions()));
thrust::transform(data1->begin(),data1->end(),out->begin(),test_amax2_functor<T,D>(val));
return out;
}
template<class T, unsigned int D> void Gadgetron::vector_fill(cuNDArray< vector_td<T,D> >* data, vector_td<T,D> val){
thrust::fill(data->begin(),data->end(),val);
}
template void Gadgetron::test_abs<float,1>(cuNDArray< vector_td<float,1> > *);
template void Gadgetron::test_abs<float,2>(cuNDArray< vector_td<float,2> > *);
template void Gadgetron::test_abs<float,3>(cuNDArray< vector_td<float,3> > *);
template void Gadgetron::test_abs<float,4>(cuNDArray< vector_td<float,4> > *);
template void Gadgetron::test_abs<double,1>(cuNDArray< vector_td<double,1> > *);
template void Gadgetron::test_abs<double,2>(cuNDArray< vector_td<double,2> > *);
template void Gadgetron::test_abs<double,3>(cuNDArray< vector_td<double,3> > *);
template void Gadgetron::test_abs<double,4>(cuNDArray< vector_td<double,4> > *);
template thrust::device_vector<float> Gadgetron::test_norm<float,1>(cuNDArray< vector_td<float,1> > *);
template thrust::device_vector<float> Gadgetron::test_norm<float,2>(cuNDArray< vector_td<float,2> > *);
template thrust::device_vector<float> Gadgetron::test_norm<float,3>(cuNDArray< vector_td<float,3> > *);
template thrust::device_vector<float> Gadgetron::test_norm<float,4>(cuNDArray< vector_td<float,4> > *);
template thrust::device_vector<double> Gadgetron::test_norm<double,1>(cuNDArray< vector_td<double,1> > *);
template thrust::device_vector<double> Gadgetron::test_norm<double,2>(cuNDArray< vector_td<double,2> > *);
template thrust::device_vector<double> Gadgetron::test_norm<double,3>(cuNDArray< vector_td<double,3> > *);
template thrust::device_vector<double> Gadgetron::test_norm<double,4>(cuNDArray< vector_td<double,4> > *);
template thrust::device_vector<float> Gadgetron::test_min<float,1>(cuNDArray< vector_td<float,1> > *);
template thrust::device_vector<float> Gadgetron::test_min<float,2>(cuNDArray< vector_td<float,2> > *);
template thrust::device_vector<float> Gadgetron::test_min<float,3>(cuNDArray< vector_td<float,3> > *);
template thrust::device_vector<float> Gadgetron::test_min<float,4>(cuNDArray< vector_td<float,4> > *);
template thrust::device_vector<double> Gadgetron::test_min<double,1>(cuNDArray< vector_td<double,1> > *);
template thrust::device_vector<double> Gadgetron::test_min<double,2>(cuNDArray< vector_td<double,2> > *);
template thrust::device_vector<double> Gadgetron::test_min<double,3>(cuNDArray< vector_td<double,3> > *);
template thrust::device_vector<double> Gadgetron::test_min<double,4>(cuNDArray< vector_td<double,4> > *);
template thrust::device_vector<float> Gadgetron::test_max<float,1>(cuNDArray< vector_td<float,1> > *);
template thrust::device_vector<float> Gadgetron::test_max<float,2>(cuNDArray< vector_td<float,2> > *);
template thrust::device_vector<float> Gadgetron::test_max<float,3>(cuNDArray< vector_td<float,3> > *);
template thrust::device_vector<float> Gadgetron::test_max<float,4>(cuNDArray< vector_td<float,4> > *);
template thrust::device_vector<double> Gadgetron::test_max<double,1>(cuNDArray< vector_td<double,1> > *);
template thrust::device_vector<double> Gadgetron::test_max<double,2>(cuNDArray< vector_td<double,2> > *);
template thrust::device_vector<double> Gadgetron::test_max<double,3>(cuNDArray< vector_td<double,3> > *);
template thrust::device_vector<double> Gadgetron::test_max<double,4>(cuNDArray< vector_td<double,4> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,1> > > Gadgetron::test_amin<float,1>(cuNDArray< vector_td<float,1> > *,cuNDArray< vector_td<float,1> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,2> > > Gadgetron::test_amin<float,2>(cuNDArray< vector_td<float,2> > *, cuNDArray< vector_td<float,2> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,3> > > Gadgetron::test_amin<float,3>(cuNDArray< vector_td<float,3> > *, cuNDArray< vector_td<float,3> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,4> > > Gadgetron::test_amin<float,4>(cuNDArray< vector_td<float,4> > *, cuNDArray< vector_td<float,4> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,1> > > Gadgetron::test_amin<double,1>(cuNDArray< vector_td<double,1> > *, cuNDArray< vector_td<double,1> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,2> > > Gadgetron::test_amin<double,2>(cuNDArray< vector_td<double,2> > *, cuNDArray< vector_td<double,2> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,3> > > Gadgetron::test_amin<double,3>(cuNDArray< vector_td<double,3> > *, cuNDArray< vector_td<double,3> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,4> > > Gadgetron::test_amin<double,4>(cuNDArray< vector_td<double,4> > *, cuNDArray< vector_td<double,4> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,1> > > Gadgetron::test_amin2<float,1>(cuNDArray< vector_td<float,1> > *, float );
template boost::shared_ptr<cuNDArray<vector_td<float,2> > > Gadgetron::test_amin2<float,2>(cuNDArray< vector_td<float,2> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<float,3> > > Gadgetron::test_amin2<float,3>(cuNDArray< vector_td<float,3> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<float,4> > > Gadgetron::test_amin2<float,4>(cuNDArray< vector_td<float,4> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<double,1> > > Gadgetron::test_amin2<double,1>(cuNDArray< vector_td<double,1> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,2> > > Gadgetron::test_amin2<double,2>(cuNDArray< vector_td<double,2> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,3> > > Gadgetron::test_amin2<double,3>(cuNDArray< vector_td<double,3> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,4> > > Gadgetron::test_amin2<double,4>(cuNDArray< vector_td<double,4> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<float,1> > > Gadgetron::test_amax<float,1>(cuNDArray< vector_td<float,1> > *,cuNDArray< vector_td<float,1> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,2> > > Gadgetron::test_amax<float,2>(cuNDArray< vector_td<float,2> > *, cuNDArray< vector_td<float,2> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,3> > > Gadgetron::test_amax<float,3>(cuNDArray< vector_td<float,3> > *, cuNDArray< vector_td<float,3> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,4> > > Gadgetron::test_amax<float,4>(cuNDArray< vector_td<float,4> > *, cuNDArray< vector_td<float,4> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,1> > > Gadgetron::test_amax<double,1>(cuNDArray< vector_td<double,1> > *, cuNDArray< vector_td<double,1> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,2> > > Gadgetron::test_amax<double,2>(cuNDArray< vector_td<double,2> > *, cuNDArray< vector_td<double,2> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,3> > > Gadgetron::test_amax<double,3>(cuNDArray< vector_td<double,3> > *, cuNDArray< vector_td<double,3> > *);
template boost::shared_ptr<cuNDArray<vector_td<double,4> > > Gadgetron::test_amax<double,4>(cuNDArray< vector_td<double,4> > *, cuNDArray< vector_td<double,4> > *);
template boost::shared_ptr<cuNDArray<vector_td<float,1> > > Gadgetron::test_amax2<float,1>(cuNDArray< vector_td<float,1> > *, float );
template boost::shared_ptr<cuNDArray<vector_td<float,2> > > Gadgetron::test_amax2<float,2>(cuNDArray< vector_td<float,2> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<float,3> > > Gadgetron::test_amax2<float,3>(cuNDArray< vector_td<float,3> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<float,4> > > Gadgetron::test_amax2<float,4>(cuNDArray< vector_td<float,4> > *, float);
template boost::shared_ptr<cuNDArray<vector_td<double,1> > > Gadgetron::test_amax2<double,1>(cuNDArray< vector_td<double,1> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,2> > > Gadgetron::test_amax2<double,2>(cuNDArray< vector_td<double,2> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,3> > > Gadgetron::test_amax2<double,3>(cuNDArray< vector_td<double,3> > *, double);
template boost::shared_ptr<cuNDArray<vector_td<double,4> > > Gadgetron::test_amax2<double,4>(cuNDArray< vector_td<double,4> > *, double);
template void Gadgetron::vector_fill<float,1>(cuNDArray< vector_td<float,1> > *, vector_td<float,1>);
template void Gadgetron::vector_fill<float,2>(cuNDArray< vector_td<float,2> > *, vector_td<float,2>);
template void Gadgetron::vector_fill<float,3>(cuNDArray< vector_td<float,3> > *, vector_td<float,3>);
template void Gadgetron::vector_fill<float,4>(cuNDArray< vector_td<float,4> > *, vector_td<float,4>);
template void Gadgetron::vector_fill<double,1>(cuNDArray< vector_td<double,1> > *, vector_td<double,1>);
template void Gadgetron::vector_fill<double,2>(cuNDArray< vector_td<double,2> > *, vector_td<double,2>);
template void Gadgetron::vector_fill<double,3>(cuNDArray< vector_td<double,3> > *, vector_td<double,3>);
template void Gadgetron::vector_fill<double,4>(cuNDArray< vector_td<double,4> > *, vector_td<double,4>);
|
49f030640a1638f17e7a73bb620fc8c25ff9e02e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* LA-CC-16080
Copyright 2016 Priscilla Kelly and Los Alamos National Laboratory. All Rights Reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY Priscilla Kelly and Los Alamos National Laboratory "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Priscilla Kelly <[email protected]>
GPULife applyRules_CUDA
*/
#include "stdio.h"
#include "stdlib.h"
#define maxThread 32
/*******************************************/
/* Cuda kernel to apply the rules of Life */
/*******************************************/
__global__ void applyRules(int row,int col,int *update, int *hold) {
int threadMax = blockDim.x;
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int linID;
if (blockID == 1) {
linID = threadID;
} else {
linID = blockID*threadMax+threadID;
}
int elements = (row-2)*(col-2);
int i = linID%(col-2);
int j = linID/(row-2);
int loc = col + i*col + j + 1;
if (linID < elements) {
int liveCells = 0;
int n, s, e, w, nw, ne, sw, se; // location in halo
n = loc-col;
nw = n-1;
ne = n+1;
w = loc-1;
e = loc+1;
s = loc+col;
sw = s-1;
se = s+1;
liveCells = hold[nw] + hold[n] + hold[ne]
+ hold[w] + hold[e]
+ hold[sw] + hold[s] + hold[se];
// Apply Rules
if (hold[loc] == 0) {
if (liveCells == 3) {
update[loc] = 1; // reproduction
} else {
update[loc] = 0; // remain dead
}
} else {
if (liveCells < 2){
update[loc] = 0; // under population
} else {
if (liveCells < 4) {
update[loc] = 1; // survivor
} else {
update[loc] = 0; // over population
}
}
}
}
}
/*******************************************/
/* Cuda kernel to upload N/S halo elements */
/*******************************************/
__global__ void add_NS_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add North portion
if (b == 0) {
subMat[t] = haloMat[t];
}
// add South portion
if (b == 1) {
int subLoc = col*(row-1)+t;
int haloLoc = (row+col)+t;
subMat[subLoc] = haloMat[haloLoc];
}
}
/*******************************************/
/* Cuda kernel to upload E/W halo elements */
/*******************************************/
__global__ void add_EW_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add East portion
if (b == 0) {
int subLoc = col-1+(col)*t;
int haloLoc = col+t;
subMat[subLoc] = haloMat[haloLoc];
}
// add the West portion
if (b == 1) {
int subLoc = (col)*t;
int haloLoc = 2*row+col+t;
subMat[subLoc] = haloMat[haloLoc];
}
}
/*******************************************/
/* Cuda kernel to get N/S halo elements */
/*******************************************/
__global__ void get_NS_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add North portion
if (b == 0) {
haloMat[t]=subMat[t+col];
}
// add South portion
if (b == 1) {
int subLoc = col*(row-2)+t;
int haloLoc = (row+col)+t;
haloMat[haloLoc]=subMat[subLoc];
}
}
/*******************************************/
/* Cuda kernel to get E/W halo elements */
/*******************************************/
__global__ void get_EW_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add East portion
if (b == 0) {
int subLoc = (col-2)+col*t;
int haloLoc = col+t;
haloMat[haloLoc]=subMat[subLoc];
}
// add the West portion
if (b == 1) {
int subLoc = 1+col*t;
int haloLoc = col+2*row+t;
haloMat[haloLoc]=subMat[subLoc] ;
}
}
/***************************************/
/* External c subroutine for CUDA */
/***************************************/
extern "C" void call_cuda_applyRules(int flag,int rows, int cols,int *halo, int *halo_dev, int *update, int *hold) {
/**************************************************/
/* Get the values to exchange over MPI */
/**************************************************/
if (flag == 0) {
int haloSize = sizeof(int)*2*(rows+cols);
hipError_t err = hipSuccess;
// Add the North and South rows to hold
hipLaunchKernelGGL(( get_NS_Halo), dim3(2), dim3(cols), 0, 0, rows,cols,halo_dev,hold);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Add the East and West columns to hold
hipLaunchKernelGGL(( get_EW_Halo), dim3(2), dim3(rows), 0, 0, rows,cols,halo_dev,hold);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
err = hipMemcpy(halo,halo_dev,haloSize,hipMemcpyDeviceToHost);
return;
}
/*****************************************************/
/* Update hold with halo, then apply rules to update */
/*****************************************************/
if (flag == 1) {
int haloSize = sizeof(int)*2*(rows+cols);
hipError_t err = hipSuccess;
// Copy updated halo to GPU
err = hipMemcpy(halo_dev,halo,haloSize,hipMemcpyHostToDevice);
// Add the North and South rows to hold
hipLaunchKernelGGL(( add_NS_Halo), dim3(2), dim3(cols), 0, 0, rows,cols,halo_dev,hold);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Add the East and West columns to hold
hipLaunchKernelGGL(( add_EW_Halo), dim3(2), dim3(rows), 0, 0, rows,cols,halo_dev,hold);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Apply the Rules
int N = (rows-2)*(cols-2);
int threadCnt;
int blockCnt;
//threads per block
threadCnt = maxThread;
//blocks
int check = N/maxThread;
if (check == 0) {
blockCnt = 1;
} else {
blockCnt = check + 1;
}
hipLaunchKernelGGL(( applyRules), dim3(blockCnt), dim3(threadCnt), 0, 0, rows,cols,update,hold);
if (err != hipSuccess) {
fprintf(stderr, "Failed to copy halo to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
return;
}
}
| 49f030640a1638f17e7a73bb620fc8c25ff9e02e.cu | /* LA-CC-16080
Copyright © 2016 Priscilla Kelly and Los Alamos National Laboratory. All Rights Reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY Priscilla Kelly and Los Alamos National Laboratory "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Priscilla Kelly <[email protected]>
GPULife applyRules_CUDA
*/
#include "stdio.h"
#include "stdlib.h"
#define maxThread 32
/*******************************************/
/* Cuda kernel to apply the rules of Life */
/*******************************************/
__global__ void applyRules(int row,int col,int *update, int *hold) {
int threadMax = blockDim.x;
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int linID;
if (blockID == 1) {
linID = threadID;
} else {
linID = blockID*threadMax+threadID;
}
int elements = (row-2)*(col-2);
int i = linID%(col-2);
int j = linID/(row-2);
int loc = col + i*col + j + 1;
if (linID < elements) {
int liveCells = 0;
int n, s, e, w, nw, ne, sw, se; // location in halo
n = loc-col;
nw = n-1;
ne = n+1;
w = loc-1;
e = loc+1;
s = loc+col;
sw = s-1;
se = s+1;
liveCells = hold[nw] + hold[n] + hold[ne]
+ hold[w] + hold[e]
+ hold[sw] + hold[s] + hold[se];
// Apply Rules
if (hold[loc] == 0) {
if (liveCells == 3) {
update[loc] = 1; // reproduction
} else {
update[loc] = 0; // remain dead
}
} else {
if (liveCells < 2){
update[loc] = 0; // under population
} else {
if (liveCells < 4) {
update[loc] = 1; // survivor
} else {
update[loc] = 0; // over population
}
}
}
}
}
/*******************************************/
/* Cuda kernel to upload N/S halo elements */
/*******************************************/
__global__ void add_NS_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add North portion
if (b == 0) {
subMat[t] = haloMat[t];
}
// add South portion
if (b == 1) {
int subLoc = col*(row-1)+t;
int haloLoc = (row+col)+t;
subMat[subLoc] = haloMat[haloLoc];
}
}
/*******************************************/
/* Cuda kernel to upload E/W halo elements */
/*******************************************/
__global__ void add_EW_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add East portion
if (b == 0) {
int subLoc = col-1+(col)*t;
int haloLoc = col+t;
subMat[subLoc] = haloMat[haloLoc];
}
// add the West portion
if (b == 1) {
int subLoc = (col)*t;
int haloLoc = 2*row+col+t;
subMat[subLoc] = haloMat[haloLoc];
}
}
/*******************************************/
/* Cuda kernel to get N/S halo elements */
/*******************************************/
__global__ void get_NS_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add North portion
if (b == 0) {
haloMat[t]=subMat[t+col];
}
// add South portion
if (b == 1) {
int subLoc = col*(row-2)+t;
int haloLoc = (row+col)+t;
haloMat[haloLoc]=subMat[subLoc];
}
}
/*******************************************/
/* Cuda kernel to get E/W halo elements */
/*******************************************/
__global__ void get_EW_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add East portion
if (b == 0) {
int subLoc = (col-2)+col*t;
int haloLoc = col+t;
haloMat[haloLoc]=subMat[subLoc];
}
// add the West portion
if (b == 1) {
int subLoc = 1+col*t;
int haloLoc = col+2*row+t;
haloMat[haloLoc]=subMat[subLoc] ;
}
}
/***************************************/
/* External c subroutine for CUDA */
/***************************************/
extern "C" void call_cuda_applyRules(int flag,int rows, int cols,int *halo, int *halo_dev, int *update, int *hold) {
/**************************************************/
/* Get the values to exchange over MPI */
/**************************************************/
if (flag == 0) {
int haloSize = sizeof(int)*2*(rows+cols);
cudaError_t err = cudaSuccess;
// Add the North and South rows to hold
get_NS_Halo<<<2, cols>>>(rows,cols,halo_dev,hold);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// Add the East and West columns to hold
get_EW_Halo<<<2, rows>>>(rows,cols,halo_dev,hold);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
err = cudaMemcpy(halo,halo_dev,haloSize,cudaMemcpyDeviceToHost);
return;
}
/*****************************************************/
/* Update hold with halo, then apply rules to update */
/*****************************************************/
if (flag == 1) {
int haloSize = sizeof(int)*2*(rows+cols);
cudaError_t err = cudaSuccess;
// Copy updated halo to GPU
err = cudaMemcpy(halo_dev,halo,haloSize,cudaMemcpyHostToDevice);
// Add the North and South rows to hold
add_NS_Halo<<<2, cols>>>(rows,cols,halo_dev,hold);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// Add the East and West columns to hold
add_EW_Halo<<<2, rows>>>(rows,cols,halo_dev,hold);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// Apply the Rules
int N = (rows-2)*(cols-2);
int threadCnt;
int blockCnt;
//threads per block
threadCnt = maxThread;
//blocks
int check = N/maxThread;
if (check == 0) {
blockCnt = 1;
} else {
blockCnt = check + 1;
}
applyRules<<<blockCnt, threadCnt>>>(rows,cols,update,hold);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy halo to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
return;
}
}
|
0b553d134d9bbf23684ac5f770f77fd028c6647e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "integral_image_op.h"
namespace caffe2 {
namespace {
__global__ void RowPassKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which row, channel, and batch element we're processing
int row = i % rows_out;
int chan = (i / rows_out) % chans;
int ind = i / rows_out / chans;
// Input is (H, W) and output is (H + 1, W + 1)
int rows_in = rows_out - 1;
int cols_in = cols_out - 1;
// Row pointer to input data
// Input data is shift (-1, -1) relative to output data, hence row - 1
const float* row_in_data =
in + cols_in * ((row - 1) + rows_in * (chan + ind * chans));
// Row pointer to output data
float* row_out_data =
out + cols_out * (row + rows_out * (chan + ind * chans));
// The first row and first column of the output is all zeros
row_out_data[0] = 0.;
if (row == 0) {
for (int i = 1; i < cols_out; ++i) {
row_out_data[i] = 0.;
}
} else {
for (int i = 1; i < cols_out; ++i) {
// Recall that input data is shift (-1, -1) relative to the output,
// hence i - 1
row_out_data[i] = row_out_data[i - 1] + row_in_data[i - 1];
}
}
}
}
__global__ void RowPassGradientKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which row, channel, and batch element we're processing
int row = i % rows_out;
int chan = (i / rows_out) % chans;
int ind = i / rows_out / chans;
// Input in (H + 1, W + 1) and output is (H + 1, W)
int rows_in = rows_out;
int cols_in = cols_out + 1;
// Col pointer to input data
const float* row_in_data =
in + cols_in * (row + rows_in * (chan + ind * chans));
// Col pointer to output data
float* row_out_data =
out + cols_out * (row + rows_out * (chan + ind * chans));
row_out_data[0] = row_in_data[0];
for (int i = 1; i < cols_out; ++i) {
row_out_data[i] = row_out_data[i - 1] + row_in_data[i];
}
}
}
__global__ void
ColPassKernel(int count, int rows_out, int cols_out, int chans, float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which col, channel, and batch element we're processing
int col = i % cols_out;
int chan = (i / cols_out) % chans;
int ind = i / cols_out / chans;
float* col_out_data =
out + col + cols_out * rows_out * (chan + ind * chans);
for (int i = 1; i < rows_out; ++i) {
col_out_data[i * cols_out] += col_out_data[(i - 1) * cols_out];
}
}
}
__global__ void ColPassGradientKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which col, channel, and batch element we're processing
int col = i % cols_out;
int chan = (i / cols_out) % chans;
int ind = i / cols_out / chans;
// Input is (H + 1, W) and output is (H, W)
int rows_in = rows_out + 1;
int cols_in = cols_out;
// Col pointer to input data
const float* col_in_data =
in + col + cols_in * rows_in * (chan + ind * chans);
// Col pointer to output data
float* col_out_data =
out + col + cols_out * rows_out * (chan + ind * chans);
col_out_data[0] = col_in_data[0];
for (int i = 1; i < rows_out; ++i) {
col_out_data[i * cols_out] =
col_out_data[(i - 1) * cols_out] + col_in_data[i * cols_in];
}
}
}
} // namespace
template <>
bool IntegralImageOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
CAFFE_ENFORCE(X.ndim() == 4, "Only supports 4D tensors for the momement");
// Input is (N, C, H, W)
// Output is (N, C, H + 1, W + 1)
vector<TIndex> out_shape(X.dims());
out_shape[2] += 1; // H + 1 output size
out_shape[3] += 1; // W + 1 output size
Y->Resize(out_shape);
const int chans = X.dim32(1);
const int rows_out = Y->dim32(2);
const int cols_out = Y->dim32(3);
// Integral image over rows of input X
const int row_pass_size = X.dim32(0) * chans * rows_out;
hipLaunchKernelGGL(( RowPassKernel),
dim3(CAFFE_GET_BLOCKS(row_pass_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
row_pass_size,
rows_out,
cols_out,
chans,
X.data<float>(),
Y->mutable_data<float>());
// Integral image over columns of the integral image over rows
const int col_pass_size = X.dim32(0) * chans * cols_out;
hipLaunchKernelGGL(( ColPassKernel),
dim3(CAFFE_GET_BLOCKS(col_pass_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
col_pass_size, rows_out, cols_out, chans, Y->mutable_data<float>());
return true;
}
template <>
bool IntegralImageGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Original input to "forward" op
auto& dY = Input(1); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
dX->ResizeLike(X);
// Row pass reduces shape of dY from (N, C, H + 1, W + 1)
// to (N, C, H + 1, W)
// Col pass reduces shape to (N, C, H, W)
vector<TIndex> row_pass_shape(dY.dims());
row_pass_shape[3] -= 1;
row_pass_buffer_.Resize(row_pass_shape);
const int chans = row_pass_buffer_.dim32(1);
const int rows_out = row_pass_buffer_.dim32(2);
const int cols_out = row_pass_buffer_.dim32(3);
// Integral image over rows of input X
const int row_pass_size = X.dim32(0) * chans * rows_out;
hipLaunchKernelGGL(( RowPassGradientKernel),
dim3(CAFFE_GET_BLOCKS(row_pass_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
row_pass_size,
rows_out,
cols_out,
chans,
dY.data<float>(),
row_pass_buffer_.mutable_data<float>());
// Integral image over columns of the integral image over rows
const int col_pass_size = X.dim32(0) * chans * cols_out;
hipLaunchKernelGGL(( ColPassGradientKernel),
dim3(CAFFE_GET_BLOCKS(col_pass_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
col_pass_size,
rows_out - 1,
cols_out,
chans,
row_pass_buffer_.data<float>(),
dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(IntegralImage, IntegralImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
IntegralImageGradient,
IntegralImageGradientOp<float, CUDAContext>);
} // namespace caffe2
| 0b553d134d9bbf23684ac5f770f77fd028c6647e.cu | #include "caffe2/core/context_gpu.h"
#include "integral_image_op.h"
namespace caffe2 {
namespace {
__global__ void RowPassKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which row, channel, and batch element we're processing
int row = i % rows_out;
int chan = (i / rows_out) % chans;
int ind = i / rows_out / chans;
// Input is (H, W) and output is (H + 1, W + 1)
int rows_in = rows_out - 1;
int cols_in = cols_out - 1;
// Row pointer to input data
// Input data is shift (-1, -1) relative to output data, hence row - 1
const float* row_in_data =
in + cols_in * ((row - 1) + rows_in * (chan + ind * chans));
// Row pointer to output data
float* row_out_data =
out + cols_out * (row + rows_out * (chan + ind * chans));
// The first row and first column of the output is all zeros
row_out_data[0] = 0.;
if (row == 0) {
for (int i = 1; i < cols_out; ++i) {
row_out_data[i] = 0.;
}
} else {
for (int i = 1; i < cols_out; ++i) {
// Recall that input data is shift (-1, -1) relative to the output,
// hence i - 1
row_out_data[i] = row_out_data[i - 1] + row_in_data[i - 1];
}
}
}
}
__global__ void RowPassGradientKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which row, channel, and batch element we're processing
int row = i % rows_out;
int chan = (i / rows_out) % chans;
int ind = i / rows_out / chans;
// Input in (H + 1, W + 1) and output is (H + 1, W)
int rows_in = rows_out;
int cols_in = cols_out + 1;
// Col pointer to input data
const float* row_in_data =
in + cols_in * (row + rows_in * (chan + ind * chans));
// Col pointer to output data
float* row_out_data =
out + cols_out * (row + rows_out * (chan + ind * chans));
row_out_data[0] = row_in_data[0];
for (int i = 1; i < cols_out; ++i) {
row_out_data[i] = row_out_data[i - 1] + row_in_data[i];
}
}
}
__global__ void
ColPassKernel(int count, int rows_out, int cols_out, int chans, float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which col, channel, and batch element we're processing
int col = i % cols_out;
int chan = (i / cols_out) % chans;
int ind = i / cols_out / chans;
float* col_out_data =
out + col + cols_out * rows_out * (chan + ind * chans);
for (int i = 1; i < rows_out; ++i) {
col_out_data[i * cols_out] += col_out_data[(i - 1) * cols_out];
}
}
}
__global__ void ColPassGradientKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which col, channel, and batch element we're processing
int col = i % cols_out;
int chan = (i / cols_out) % chans;
int ind = i / cols_out / chans;
// Input is (H + 1, W) and output is (H, W)
int rows_in = rows_out + 1;
int cols_in = cols_out;
// Col pointer to input data
const float* col_in_data =
in + col + cols_in * rows_in * (chan + ind * chans);
// Col pointer to output data
float* col_out_data =
out + col + cols_out * rows_out * (chan + ind * chans);
col_out_data[0] = col_in_data[0];
for (int i = 1; i < rows_out; ++i) {
col_out_data[i * cols_out] =
col_out_data[(i - 1) * cols_out] + col_in_data[i * cols_in];
}
}
}
} // namespace
template <>
bool IntegralImageOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
CAFFE_ENFORCE(X.ndim() == 4, "Only supports 4D tensors for the momement");
// Input is (N, C, H, W)
// Output is (N, C, H + 1, W + 1)
vector<TIndex> out_shape(X.dims());
out_shape[2] += 1; // H + 1 output size
out_shape[3] += 1; // W + 1 output size
Y->Resize(out_shape);
const int chans = X.dim32(1);
const int rows_out = Y->dim32(2);
const int cols_out = Y->dim32(3);
// Integral image over rows of input X
const int row_pass_size = X.dim32(0) * chans * rows_out;
RowPassKernel<<<
CAFFE_GET_BLOCKS(row_pass_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
row_pass_size,
rows_out,
cols_out,
chans,
X.data<float>(),
Y->mutable_data<float>());
// Integral image over columns of the integral image over rows
const int col_pass_size = X.dim32(0) * chans * cols_out;
ColPassKernel<<<
CAFFE_GET_BLOCKS(col_pass_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
col_pass_size, rows_out, cols_out, chans, Y->mutable_data<float>());
return true;
}
template <>
bool IntegralImageGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Original input to "forward" op
auto& dY = Input(1); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
dX->ResizeLike(X);
// Row pass reduces shape of dY from (N, C, H + 1, W + 1)
// to (N, C, H + 1, W)
// Col pass reduces shape to (N, C, H, W)
vector<TIndex> row_pass_shape(dY.dims());
row_pass_shape[3] -= 1;
row_pass_buffer_.Resize(row_pass_shape);
const int chans = row_pass_buffer_.dim32(1);
const int rows_out = row_pass_buffer_.dim32(2);
const int cols_out = row_pass_buffer_.dim32(3);
// Integral image over rows of input X
const int row_pass_size = X.dim32(0) * chans * rows_out;
RowPassGradientKernel<<<
CAFFE_GET_BLOCKS(row_pass_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
row_pass_size,
rows_out,
cols_out,
chans,
dY.data<float>(),
row_pass_buffer_.mutable_data<float>());
// Integral image over columns of the integral image over rows
const int col_pass_size = X.dim32(0) * chans * cols_out;
ColPassGradientKernel<<<
CAFFE_GET_BLOCKS(col_pass_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
col_pass_size,
rows_out - 1,
cols_out,
chans,
row_pass_buffer_.data<float>(),
dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(IntegralImage, IntegralImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
IntegralImageGradient,
IntegralImageGradientOp<float, CUDAContext>);
} // namespace caffe2
|
5e9097439743340d3ff036f5823b4b9ed541c44f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <unistd.h>
#define ALLOC_SIZE 1024
__global__ void access_offset_kernel() {
int* devMem = (int*)malloc(ALLOC_SIZE*sizeof(int));
#ifdef R
volatile int i = devMem[OFFSET+ACCESS_DIR*(ALLOC_SIZE-1)];
#elif W
devMem[OFFSET+ACCESS_DIR*(ALLOC_SIZE-1)] = 42;
#endif
free(devMem);
}
int main(int argc, char** argv) {
hipThreadSetLimit(hipLimitMallocHeapSize, ALLOC_SIZE*4*sizeof(int));
hipLaunchKernelGGL(( access_offset_kernel), dim3(1),dim3(1), 0, 0, );
hipDeviceReset();
return 0;
}
| 5e9097439743340d3ff036f5823b4b9ed541c44f.cu | #include <stdio.h>
#include <unistd.h>
#define ALLOC_SIZE 1024
__global__ void access_offset_kernel() {
int* devMem = (int*)malloc(ALLOC_SIZE*sizeof(int));
#ifdef R
volatile int i = devMem[OFFSET+ACCESS_DIR*(ALLOC_SIZE-1)];
#elif W
devMem[OFFSET+ACCESS_DIR*(ALLOC_SIZE-1)] = 42;
#endif
free(devMem);
}
int main(int argc, char** argv) {
cudaThreadSetLimit(cudaLimitMallocHeapSize, ALLOC_SIZE*4*sizeof(int));
access_offset_kernel<<<1,1>>>();
cudaDeviceReset();
return 0;
}
|
5e4639a45d0f8c391113613e905d4ab470573071.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2015 Chen-Yu Lee ([email protected])
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
//#include <iostream>
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__device__ inline Dtype sigmoid_gpu(Dtype x) {
return 1. / (1. + exp(-x));
}
//======================================================================
// GPU forward
//======================================================================
template <typename Dtype>
__global__ void PoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride,
const Dtype* weight1, Dtype* top_data) {
//-------------------------------------------------
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + ksize, height);
int wstart = pw * stride;
int wend = min(wstart + ksize, width);
int w_idx = 0;
bottom_data += (n * channels + c) * height * width;
Dtype maxval = -FLT_MAX;
Dtype aveval = 0;
Dtype resp_split = Dtype(0.);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
resp_split += bottom_data[h * width + w] * weight1[w_idx];
maxval = max(maxval, bottom_data[h * width + w]);
aveval += bottom_data[h * width + w];
w_idx++;
}
}
// max pool result is saved in maxval
// ave pool result is saved in aveval / poolsize_data[index];
// poolsize_data[index] = (hend - hstart) * (wend - wstart);
Dtype gating_value = sigmoid_gpu(resp_split);
// compute gated max-ave output value and save to top_data[index]
top_data[index] = gating_value * maxval +
(Dtype(1.) - gating_value) * aveval / ( (hend - hstart) * (wend - wstart) );
}
}
template <typename Dtype>
void GatedMaxAveLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* weight1 = this->blobs_[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// first compute splitting probabilities
hipLaunchKernelGGL(( PoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
weight1, top_data);
CUDA_POST_KERNEL_CHECK;
}
//======================================================================
// GPU backward
//======================================================================
template <typename Dtype>
__global__ void Im2col_channel(const int nthreads, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride,
const Dtype* bottom_data, Dtype* col_data) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
// note that index is for top data matrix
if (index < nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int wstart = pw * stride;
int hend = hstart + ksize;
int wend = wstart + ksize;
int w_idx = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
col_data[w_idx * nthreads + index] = (h >= 0 && w >=0 && h < height && w < width) ?
bottom_data[h * width + w] : 0;
w_idx++;
}
}
}
}
template <typename Dtype>
__global__ void PoolBackward(const int nthreads, const Dtype* top_diff,
const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride, const Dtype* weight1,
const Dtype* bottom_data, Dtype* bottom_diff, Dtype* weight1_diff_data, const int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
// note that index is for top data matrix
if (index < nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + ksize, height);
int wstart = pw * stride;
int wend = min(wstart + ksize, width);
//int bottom_offset = (n * channels + c) * height * width;
int w_idx = 0;
bottom_data += (n * channels + c) * height * width;
// recompute max and ave pooling results to save memory
Dtype maxval = -FLT_MAX;
Dtype aveval = 0;
Dtype resp_split = Dtype(0.);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
resp_split += bottom_data[h * width + w] * weight1[w_idx];
maxval = max(maxval, bottom_data[h * width + w]);
aveval += bottom_data[h * width + w];
w_idx++;
}
}
Dtype split_p1 = sigmoid_gpu(resp_split);
Dtype split_p2 = Dtype(1.) - split_p1;
Dtype top_data1 = maxval;
int poolsize_data = (hend - hstart) * (wend - wstart);
Dtype top_data2 = aveval / poolsize_data;
// precompute temporary value
Dtype temp = split_p1 * split_p2 * (top_data1 - top_data2);
//-------------------------------------------------------
w_idx = 0;
bottom_diff += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
// gradient w.r.t weight1 = g(1-g)*bottom_data*[max-ave]
weight1_diff_data[w_idx*count + index] = temp * bottom_data[h * width + w] * top_diff[index];
// gradient w.r.t bottom_data if needed
//bottom_diff_data[(bottom_offset + h * width + w) * (ksize*ksize) + w_idx] =
// ( temp * weight1[w_idx] + split_p1 * (bottom_data[h * width + w] == top_data1) + split_p2 / poolsize_data ) * top_diff[index];
bottom_diff[h * width + w] += ( temp * weight1[w_idx] + split_p1 * (bottom_data[h * width + w] == top_data1) + split_p2 / poolsize_data ) * top_diff[index];
w_idx++;
}
}
}
}
template <typename Dtype>
void GatedMaxAveLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
//if (!propagate_down) {
// return Dtype(0.);
//}
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
//-----------------------------------------------------------
const Dtype* weight1 = this->blobs_[0]->gpu_data();
Dtype* weight1_diff = this->blobs_[0]->mutable_gpu_diff();
//Dtype* bottom_diff_data = bottom_diff_buffer_.mutable_gpu_data();
Dtype* weight1_diff_data = weight1_diff_buffer_.mutable_gpu_data();
int count = top[0]->count();
int bottom_count = bottom[0]->count();
//-----------------------------------------------------------
//CUDA_CHECK(hipMemset(weight1_diff, 0, sizeof(Dtype) * this->blobs_[0]->count()));
CUDA_CHECK(hipMemset(bottom_diff, 0, sizeof(Dtype) * bottom[0]->count()));
//CUDA_CHECK(hipMemset(bottom_diff_data, 0, sizeof(Dtype) * (KSIZE_*KSIZE_) * bottom[0]->count()));
CUDA_CHECK(hipMemset(weight1_diff_data, 0, sizeof(Dtype) * (KSIZE_*KSIZE_) * top[0]->count()));
//-----------------------------------------------------------
hipLaunchKernelGGL(( PoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_,
KSIZE_, STRIDE_, weight1, bottom_data,
bottom_diff, weight1_diff_data, count);
// accumulate correct gradients for bottom_diff
//caffe_gpu_gemv<Dtype>(CblasNoTrans, bottom_count, (KSIZE_*KSIZE_), 1.,
// bottom_diff_data, reinterpret_cast<const Dtype*>(bias_multiplier2_->gpu_data()),
// 0., bottom_diff);
// accumulate correct gradients for weight1_diff
caffe_gpu_gemv<Dtype>(CblasNoTrans, (KSIZE_*KSIZE_), count, 1.,
weight1_diff_data, reinterpret_cast<const Dtype*>(bias_multiplier1_->gpu_data()),
0., weight1_diff);
CUDA_POST_KERNEL_CHECK;
//return Dtype(0.);
}
//INSTANTIATE_CLASS(GatedMaxAveLayer);
INSTANTIATE_LAYER_GPU_FUNCS(GatedMaxAveLayer);
}
| 5e4639a45d0f8c391113613e905d4ab470573071.cu | // Copyright 2015 Chen-Yu Lee ([email protected])
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
//#include <iostream>
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__device__ inline Dtype sigmoid_gpu(Dtype x) {
return 1. / (1. + exp(-x));
}
//======================================================================
// GPU forward
//======================================================================
template <typename Dtype>
__global__ void PoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride,
const Dtype* weight1, Dtype* top_data) {
//-------------------------------------------------
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + ksize, height);
int wstart = pw * stride;
int wend = min(wstart + ksize, width);
int w_idx = 0;
bottom_data += (n * channels + c) * height * width;
Dtype maxval = -FLT_MAX;
Dtype aveval = 0;
Dtype resp_split = Dtype(0.);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
resp_split += bottom_data[h * width + w] * weight1[w_idx];
maxval = max(maxval, bottom_data[h * width + w]);
aveval += bottom_data[h * width + w];
w_idx++;
}
}
// max pool result is saved in maxval
// ave pool result is saved in aveval / poolsize_data[index];
// poolsize_data[index] = (hend - hstart) * (wend - wstart);
Dtype gating_value = sigmoid_gpu(resp_split);
// compute gated max-ave output value and save to top_data[index]
top_data[index] = gating_value * maxval +
(Dtype(1.) - gating_value) * aveval / ( (hend - hstart) * (wend - wstart) );
}
}
template <typename Dtype>
void GatedMaxAveLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* weight1 = this->blobs_[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// first compute splitting probabilities
PoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
weight1, top_data);
CUDA_POST_KERNEL_CHECK;
}
//======================================================================
// GPU backward
//======================================================================
template <typename Dtype>
__global__ void Im2col_channel(const int nthreads, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride,
const Dtype* bottom_data, Dtype* col_data) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
// note that index is for top data matrix
if (index < nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int wstart = pw * stride;
int hend = hstart + ksize;
int wend = wstart + ksize;
int w_idx = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
col_data[w_idx * nthreads + index] = (h >= 0 && w >=0 && h < height && w < width) ?
bottom_data[h * width + w] : 0;
w_idx++;
}
}
}
}
template <typename Dtype>
__global__ void PoolBackward(const int nthreads, const Dtype* top_diff,
const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride, const Dtype* weight1,
const Dtype* bottom_data, Dtype* bottom_diff, Dtype* weight1_diff_data, const int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
// note that index is for top data matrix
if (index < nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + ksize, height);
int wstart = pw * stride;
int wend = min(wstart + ksize, width);
//int bottom_offset = (n * channels + c) * height * width;
int w_idx = 0;
bottom_data += (n * channels + c) * height * width;
// recompute max and ave pooling results to save memory
Dtype maxval = -FLT_MAX;
Dtype aveval = 0;
Dtype resp_split = Dtype(0.);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
resp_split += bottom_data[h * width + w] * weight1[w_idx];
maxval = max(maxval, bottom_data[h * width + w]);
aveval += bottom_data[h * width + w];
w_idx++;
}
}
Dtype split_p1 = sigmoid_gpu(resp_split);
Dtype split_p2 = Dtype(1.) - split_p1;
Dtype top_data1 = maxval;
int poolsize_data = (hend - hstart) * (wend - wstart);
Dtype top_data2 = aveval / poolsize_data;
// precompute temporary value
Dtype temp = split_p1 * split_p2 * (top_data1 - top_data2);
//-------------------------------------------------------
w_idx = 0;
bottom_diff += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
// gradient w.r.t weight1 = g(1-g)*bottom_data*[max-ave]
weight1_diff_data[w_idx*count + index] = temp * bottom_data[h * width + w] * top_diff[index];
// gradient w.r.t bottom_data if needed
//bottom_diff_data[(bottom_offset + h * width + w) * (ksize*ksize) + w_idx] =
// ( temp * weight1[w_idx] + split_p1 * (bottom_data[h * width + w] == top_data1) + split_p2 / poolsize_data ) * top_diff[index];
bottom_diff[h * width + w] += ( temp * weight1[w_idx] + split_p1 * (bottom_data[h * width + w] == top_data1) + split_p2 / poolsize_data ) * top_diff[index];
w_idx++;
}
}
}
}
template <typename Dtype>
void GatedMaxAveLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
//if (!propagate_down) {
// return Dtype(0.);
//}
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
//-----------------------------------------------------------
const Dtype* weight1 = this->blobs_[0]->gpu_data();
Dtype* weight1_diff = this->blobs_[0]->mutable_gpu_diff();
//Dtype* bottom_diff_data = bottom_diff_buffer_.mutable_gpu_data();
Dtype* weight1_diff_data = weight1_diff_buffer_.mutable_gpu_data();
int count = top[0]->count();
int bottom_count = bottom[0]->count();
//-----------------------------------------------------------
//CUDA_CHECK(cudaMemset(weight1_diff, 0, sizeof(Dtype) * this->blobs_[0]->count()));
CUDA_CHECK(cudaMemset(bottom_diff, 0, sizeof(Dtype) * bottom[0]->count()));
//CUDA_CHECK(cudaMemset(bottom_diff_data, 0, sizeof(Dtype) * (KSIZE_*KSIZE_) * bottom[0]->count()));
CUDA_CHECK(cudaMemset(weight1_diff_data, 0, sizeof(Dtype) * (KSIZE_*KSIZE_) * top[0]->count()));
//-----------------------------------------------------------
PoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_,
KSIZE_, STRIDE_, weight1, bottom_data,
bottom_diff, weight1_diff_data, count);
// accumulate correct gradients for bottom_diff
//caffe_gpu_gemv<Dtype>(CblasNoTrans, bottom_count, (KSIZE_*KSIZE_), 1.,
// bottom_diff_data, reinterpret_cast<const Dtype*>(bias_multiplier2_->gpu_data()),
// 0., bottom_diff);
// accumulate correct gradients for weight1_diff
caffe_gpu_gemv<Dtype>(CblasNoTrans, (KSIZE_*KSIZE_), count, 1.,
weight1_diff_data, reinterpret_cast<const Dtype*>(bias_multiplier1_->gpu_data()),
0., weight1_diff);
CUDA_POST_KERNEL_CHECK;
//return Dtype(0.);
}
//INSTANTIATE_CLASS(GatedMaxAveLayer);
INSTANTIATE_LAYER_GPU_FUNCS(GatedMaxAveLayer);
}
|
e48a2e5bc78720673f8a6c4d5c05a29a43d7870b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <iostream>
#include "numer_kernels.h"
#include "numer_blas.h"
#include "numer_cublas_wrappers.h"
#include "numer_ml.h"
extern template class BlasWrapper<float>;
extern template class BlasWrapper<double>;
extern template class NumErBlas<float>;
extern template class NumErBlas<double>;
extern template class Kernels<float>;
extern template class Kernels<double>;
template<typename T>
void Ml<T>::gradient_descent(hipblasHandle_t handle, thrust::device_vector<T> *d_theta, thrust::device_vector<T> *d_x, thrust::device_vector<T> *d_y, const unsigned int num_features, const unsigned int num_samples)
{
BlasWrapper<T> bw;
/*Kernels<T> kernels;
NumErBlas<T> blas;*/
int lda,ldb,ldc;
// Create a handle for CUBLAS
//hipblasStatus_t res;
//Grad = (1/m)* ( X * (sigmoid(Theta*X) - Y) )
// tmp1 = gemm(1*Theta*X + 0*H)
lda=1,ldb=num_samples, ldc=1;
thrust::device_vector<T> d_tmp1(num_samples, 0.0);
const T alf = 1.0;
const T bet = 0.0;
//const float *_alpha = &alf;
//const float *_beta = &bet;
bw.cublasGemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, 1, num_samples, num_features, &alf, thrust::raw_pointer_cast(d_theta->data()), lda, thrust::raw_pointer_cast(d_x->data()), ldb, &bet, thrust::raw_pointer_cast(d_tmp1.data()), ldc);
//% H=sigmoid(Theta*X)
thrust::device_vector<T> d_h(d_y->size());
thrust::transform(d_tmp1.begin(), d_tmp1.end(), d_h.begin(), typename Kernels<T>::sigmoid());
//%H - Y
thrust::transform(d_y->begin(), d_y->end(), d_h.begin(), d_h.begin(), typename NumErBlas<T>::saxpy_functor(-1.0));
const T alpha2 = 1.0/num_samples;
//const float *alpha2 = &alf2;
bw.cublasGemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1, num_features, num_samples, &alpha2, thrust::raw_pointer_cast(d_h.data()), lda, thrust::raw_pointer_cast(d_x->data()), ldb, &bet, thrust::raw_pointer_cast(d_theta->data()), ldc);
}
template<typename T>
void Ml<T>::numer_gd(thrust::device_vector<T> *d_theta, thrust::device_vector<T> *d_x, thrust::device_vector<T> *d_y, const unsigned int num_features, const unsigned int num_samples)
{
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
gradient_descent(handle, d_theta, d_x, d_y, num_features, num_samples);
// Destroy the handle
hipblasDestroy(handle);
}
template<typename T>
void Ml<T>::numer_gd_learn(thrust::device_vector<T> *d_theta, thrust::device_vector<T> *d_x, thrust::device_vector<T> *d_y, const unsigned int num_features, const unsigned int num_samples, const float learning_rate, const unsigned int iterations){
//NumErBlas<T> blas;
hipblasHandle_t handle;
hipblasCreate(&handle);
thrust::device_vector<T> d_theta_tmp = *d_theta;
for(int i=0; i<iterations; i++){
gradient_descent(handle, d_theta, d_x, d_y, num_features, num_samples);
thrust::transform(d_theta->begin(), d_theta->end(), d_theta_tmp.begin(), d_theta_tmp.begin(), typename NumErBlas<T>::saxpy_functor(-learning_rate));
*d_theta = d_theta_tmp;
}
// Destroy the handle
hipblasDestroy(handle);
}
template class Ml<float>;
template class Ml<double>; | e48a2e5bc78720673f8a6c4d5c05a29a43d7870b.cu | #include "cuda.h"
#include "cublas_v2.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <iostream>
#include "numer_kernels.h"
#include "numer_blas.h"
#include "numer_cublas_wrappers.h"
#include "numer_ml.h"
extern template class BlasWrapper<float>;
extern template class BlasWrapper<double>;
extern template class NumErBlas<float>;
extern template class NumErBlas<double>;
extern template class Kernels<float>;
extern template class Kernels<double>;
template<typename T>
void Ml<T>::gradient_descent(cublasHandle_t handle, thrust::device_vector<T> *d_theta, thrust::device_vector<T> *d_x, thrust::device_vector<T> *d_y, const unsigned int num_features, const unsigned int num_samples)
{
BlasWrapper<T> bw;
/*Kernels<T> kernels;
NumErBlas<T> blas;*/
int lda,ldb,ldc;
// Create a handle for CUBLAS
//cublasStatus_t res;
//Grad = (1/m)* ( X * (sigmoid(Theta*X) - Y) )
// tmp1 = gemm(1*Theta*X + 0*H)
lda=1,ldb=num_samples, ldc=1;
thrust::device_vector<T> d_tmp1(num_samples, 0.0);
const T alf = 1.0;
const T bet = 0.0;
//const float *_alpha = &alf;
//const float *_beta = &bet;
bw.cublasGemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, 1, num_samples, num_features, &alf, thrust::raw_pointer_cast(d_theta->data()), lda, thrust::raw_pointer_cast(d_x->data()), ldb, &bet, thrust::raw_pointer_cast(d_tmp1.data()), ldc);
//% H=sigmoid(Theta*X)
thrust::device_vector<T> d_h(d_y->size());
thrust::transform(d_tmp1.begin(), d_tmp1.end(), d_h.begin(), typename Kernels<T>::sigmoid());
//%H - Y
thrust::transform(d_y->begin(), d_y->end(), d_h.begin(), d_h.begin(), typename NumErBlas<T>::saxpy_functor(-1.0));
const T alpha2 = 1.0/num_samples;
//const float *alpha2 = &alf2;
bw.cublasGemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, num_features, num_samples, &alpha2, thrust::raw_pointer_cast(d_h.data()), lda, thrust::raw_pointer_cast(d_x->data()), ldb, &bet, thrust::raw_pointer_cast(d_theta->data()), ldc);
}
template<typename T>
void Ml<T>::numer_gd(thrust::device_vector<T> *d_theta, thrust::device_vector<T> *d_x, thrust::device_vector<T> *d_y, const unsigned int num_features, const unsigned int num_samples)
{
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
gradient_descent(handle, d_theta, d_x, d_y, num_features, num_samples);
// Destroy the handle
cublasDestroy(handle);
}
template<typename T>
void Ml<T>::numer_gd_learn(thrust::device_vector<T> *d_theta, thrust::device_vector<T> *d_x, thrust::device_vector<T> *d_y, const unsigned int num_features, const unsigned int num_samples, const float learning_rate, const unsigned int iterations){
//NumErBlas<T> blas;
cublasHandle_t handle;
cublasCreate(&handle);
thrust::device_vector<T> d_theta_tmp = *d_theta;
for(int i=0; i<iterations; i++){
gradient_descent(handle, d_theta, d_x, d_y, num_features, num_samples);
thrust::transform(d_theta->begin(), d_theta->end(), d_theta_tmp.begin(), d_theta_tmp.begin(), typename NumErBlas<T>::saxpy_functor(-learning_rate));
*d_theta = d_theta_tmp;
}
// Destroy the handle
cublasDestroy(handle);
}
template class Ml<float>;
template class Ml<double>; |
ac0e984bb1cea8bd69b2fdce0678a45a0c479b2e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math.h>
#include "CUDAParticleTracking.h"
#include "TrackCUDA.h"
#define _nparams 5
#define _length 0
#define _fieldX 1
#define _fieldY 2
#define _apertureX 3
#define _apertureY 4
extern Beam* d_BeamParameters;
extern Particle* d_ParticleArray;
extern int blocksPerGrid;
__global__ void OrbitCorrectorMap(Component* orbitcorrector, Beam* beam, Particle* ptcle)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
const double brho = beam[_rigidity];
const double beta0 = beam[_beta];
const double ds = orbitcorrector[_length];
const double kx = orbitcorrector[_fieldX] / brho;
const double ky = orbitcorrector[_fieldY] / brho;
const double ax = orbitcorrector[_apertureX];
const double ay = orbitcorrector[_apertureY];
if(ptcle[_f(i)])
{
double x0 = ptcle[_x(i)];
double px0 = ptcle[_px(i)];
double y0 = ptcle[_y(i)];
double py0 = ptcle[_py(i)];
double dp0 = ptcle[_dp(i)];
double ct0 = ptcle[_ct(i)];
double d1 = sqrt(1 + 2*dp0/beta0 + dp0*dp0);
double x1 = x0 + ds*px0/d1 - ds*ds*ky/d1/2;
double px1 = px0 - ds*ky;
double y1 = y0 + ds*py0/d1 + ds*ds*kx/d1/2;
double py1 = py0 + ds*kx;
double f1 = ds*(1/beta0 + dp0)/d1/d1/d1/2;
double c0 = ct0 + ds/beta0 - ds*(1/beta0 + dp0)/d1 -
ds*ds*f1*(kx*kx + ky*ky)/3;
double ct1 = c0 + ds*f1*(ky*px0 - kx*py0) - f1*(px0*px0 + py0*py0);
ptcle[_x(i)] = x1;
ptcle[_px(i)] = px1;
ptcle[_y(i)] = y1;
ptcle[_py(i)] = py1;
ptcle[_ct(i)] = ct1;
// Finally, collimate
ptcle[_s(i)] += ds;
if( (ax>0) & (ay>0) )
if((x1/ax)*(x1/ax) + (y1/ay)*(y1/ay) > 1)
ptcle[_f(i)] = 0;
}
if(i==0)
beam[_globaltime] += ds / (beta0 * SpeedOfLight);
}
extern "C" __host__ void TrackOrbitCorrectorCUDA(OrbitCorrectorParameters_t orbitcorrector)
{
size_t cptParamsListSize = _nparams*sizeof(Component);
Component* d_ComponentParameters;
Component parameters[_nparams];
parameters[_length] = orbitcorrector.length;
parameters[_fieldX] = orbitcorrector.fieldX;
parameters[_fieldY] = orbitcorrector.fieldY;
parameters[_apertureX] = orbitcorrector.apertureX;
parameters[_apertureY] = orbitcorrector.apertureY;
hipMalloc((void**)&d_ComponentParameters, cptParamsListSize);
hipMemcpy(d_ComponentParameters, parameters, cptParamsListSize, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( OrbitCorrectorMap), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_ComponentParameters, d_BeamParameters, d_ParticleArray);
hipFree(d_ComponentParameters);
}
extern "C" __host__ void CopyOrbitCorrectorCUDA(OrbitCorrectorParameters_t orbitcorrector)
{
size_t cptParamsListSize = _nparams*sizeof(Component);
Component* d_ComponentParameters;
Component parameters[_nparams];
parameters[_length] = orbitcorrector.length;
parameters[_fieldX] = orbitcorrector.fieldX;
parameters[_fieldY] = orbitcorrector.fieldY;
parameters[_apertureX] = orbitcorrector.apertureX;
parameters[_apertureY] = orbitcorrector.apertureY;
hipMalloc((void**)&d_ComponentParameters, cptParamsListSize);
hipMemcpy(d_ComponentParameters, parameters, cptParamsListSize, hipMemcpyHostToDevice);
AppendComponent(&OrbitCorrectorMap, d_ComponentParameters);
} | ac0e984bb1cea8bd69b2fdce0678a45a0c479b2e.cu | #include <cuda_runtime.h>
#include <math.h>
#include "CUDAParticleTracking.h"
#include "TrackCUDA.h"
#define _nparams 5
#define _length 0
#define _fieldX 1
#define _fieldY 2
#define _apertureX 3
#define _apertureY 4
extern Beam* d_BeamParameters;
extern Particle* d_ParticleArray;
extern int blocksPerGrid;
__global__ void OrbitCorrectorMap(Component* orbitcorrector, Beam* beam, Particle* ptcle)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
const double brho = beam[_rigidity];
const double beta0 = beam[_beta];
const double ds = orbitcorrector[_length];
const double kx = orbitcorrector[_fieldX] / brho;
const double ky = orbitcorrector[_fieldY] / brho;
const double ax = orbitcorrector[_apertureX];
const double ay = orbitcorrector[_apertureY];
if(ptcle[_f(i)])
{
double x0 = ptcle[_x(i)];
double px0 = ptcle[_px(i)];
double y0 = ptcle[_y(i)];
double py0 = ptcle[_py(i)];
double dp0 = ptcle[_dp(i)];
double ct0 = ptcle[_ct(i)];
double d1 = sqrt(1 + 2*dp0/beta0 + dp0*dp0);
double x1 = x0 + ds*px0/d1 - ds*ds*ky/d1/2;
double px1 = px0 - ds*ky;
double y1 = y0 + ds*py0/d1 + ds*ds*kx/d1/2;
double py1 = py0 + ds*kx;
double f1 = ds*(1/beta0 + dp0)/d1/d1/d1/2;
double c0 = ct0 + ds/beta0 - ds*(1/beta0 + dp0)/d1 -
ds*ds*f1*(kx*kx + ky*ky)/3;
double ct1 = c0 + ds*f1*(ky*px0 - kx*py0) - f1*(px0*px0 + py0*py0);
ptcle[_x(i)] = x1;
ptcle[_px(i)] = px1;
ptcle[_y(i)] = y1;
ptcle[_py(i)] = py1;
ptcle[_ct(i)] = ct1;
// Finally, collimate
ptcle[_s(i)] += ds;
if( (ax>0) & (ay>0) )
if((x1/ax)*(x1/ax) + (y1/ay)*(y1/ay) > 1)
ptcle[_f(i)] = 0;
}
if(i==0)
beam[_globaltime] += ds / (beta0 * SpeedOfLight);
}
extern "C" __host__ void TrackOrbitCorrectorCUDA(OrbitCorrectorParameters_t orbitcorrector)
{
size_t cptParamsListSize = _nparams*sizeof(Component);
Component* d_ComponentParameters;
Component parameters[_nparams];
parameters[_length] = orbitcorrector.length;
parameters[_fieldX] = orbitcorrector.fieldX;
parameters[_fieldY] = orbitcorrector.fieldY;
parameters[_apertureX] = orbitcorrector.apertureX;
parameters[_apertureY] = orbitcorrector.apertureY;
cudaMalloc((void**)&d_ComponentParameters, cptParamsListSize);
cudaMemcpy(d_ComponentParameters, parameters, cptParamsListSize, cudaMemcpyHostToDevice);
OrbitCorrectorMap<<<blocksPerGrid, threadsPerBlock>>>(d_ComponentParameters, d_BeamParameters, d_ParticleArray);
cudaFree(d_ComponentParameters);
}
extern "C" __host__ void CopyOrbitCorrectorCUDA(OrbitCorrectorParameters_t orbitcorrector)
{
size_t cptParamsListSize = _nparams*sizeof(Component);
Component* d_ComponentParameters;
Component parameters[_nparams];
parameters[_length] = orbitcorrector.length;
parameters[_fieldX] = orbitcorrector.fieldX;
parameters[_fieldY] = orbitcorrector.fieldY;
parameters[_apertureX] = orbitcorrector.apertureX;
parameters[_apertureY] = orbitcorrector.apertureY;
cudaMalloc((void**)&d_ComponentParameters, cptParamsListSize);
cudaMemcpy(d_ComponentParameters, parameters, cptParamsListSize, cudaMemcpyHostToDevice);
AppendComponent(&OrbitCorrectorMap, d_ComponentParameters);
} |
9a591b2d099ba48b4c9478ad492e4c1dfefe4475.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_ba2;
int xdim0_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim0_update_halo_kernel1_ba2;
int ydim0_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim1_update_halo_kernel1_ba2;
int xdim1_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim1_update_halo_kernel1_ba2;
int ydim1_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim2_update_halo_kernel1_ba2;
int xdim2_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim2_update_halo_kernel1_ba2;
int ydim2_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim3_update_halo_kernel1_ba2;
int xdim3_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim3_update_halo_kernel1_ba2;
int ydim3_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim4_update_halo_kernel1_ba2;
int xdim4_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim4_update_halo_kernel1_ba2;
int ydim4_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim5_update_halo_kernel1_ba2;
int xdim5_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim5_update_halo_kernel1_ba2;
int ydim5_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim6_update_halo_kernel1_ba2;
int xdim6_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim6_update_halo_kernel1_ba2;
int ydim6_update_halo_kernel1_ba2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel1_ba2 * (y) + \
xdim0_update_halo_kernel1_ba2 * ydim0_update_halo_kernel1_ba2 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel1_ba2 * (y) + \
xdim1_update_halo_kernel1_ba2 * ydim1_update_halo_kernel1_ba2 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_update_halo_kernel1_ba2 * (y) + \
xdim2_update_halo_kernel1_ba2 * ydim2_update_halo_kernel1_ba2 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_update_halo_kernel1_ba2 * (y) + \
xdim3_update_halo_kernel1_ba2 * ydim3_update_halo_kernel1_ba2 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_update_halo_kernel1_ba2 * (y) + \
xdim4_update_halo_kernel1_ba2 * ydim4_update_halo_kernel1_ba2 * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_update_halo_kernel1_ba2 * (y) + \
xdim5_update_halo_kernel1_ba2 * ydim5_update_halo_kernel1_ba2 * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_update_halo_kernel1_ba2 * (y) + \
xdim6_update_halo_kernel1_ba2 * ydim6_update_halo_kernel1_ba2 * (z))
// user function
__device__
inline void
update_halo_kernel1_ba2(double *density0, double *density1, double *energy0,
double *energy1, double *pressure,
double *viscosity, double *soundspeed,
const int *fields) {
if (fields[FIELD_DENSITY0] == 1)
density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(0, 0, 3)];
if (fields[FIELD_DENSITY1] == 1)
density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(0, 0, 3)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(0, 0, 3)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(0, 0, 3)];
if (fields[FIELD_PRESSURE] == 1)
pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(0, 0, 3)];
if (fields[FIELD_VISCOSITY] == 1)
viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(0, 0, 3)];
if (fields[FIELD_SOUNDSPEED] == 1)
soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(0, 0, 3)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void
ops_update_halo_kernel1_ba2(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
double *__restrict arg6, const int *__restrict arg7,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim0_update_halo_kernel1_ba2 *
ydim0_update_halo_kernel1_ba2;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim1_update_halo_kernel1_ba2 *
ydim1_update_halo_kernel1_ba2;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim2_update_halo_kernel1_ba2 *
ydim2_update_halo_kernel1_ba2;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim3_update_halo_kernel1_ba2 *
ydim3_update_halo_kernel1_ba2;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim4_update_halo_kernel1_ba2 *
ydim4_update_halo_kernel1_ba2;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim5_update_halo_kernel1_ba2 *
ydim5_update_halo_kernel1_ba2;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim6_update_halo_kernel1_ba2 *
ydim6_update_halo_kernel1_ba2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_ba2(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_update_halo_kernel1_ba2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 8, range, 64))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(64, "update_halo_kernel1_ba2");
OPS_kernels[64].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_ba2_h ||
ydim0 != ydim0_update_halo_kernel1_ba2_h ||
xdim1 != xdim1_update_halo_kernel1_ba2_h ||
ydim1 != ydim1_update_halo_kernel1_ba2_h ||
xdim2 != xdim2_update_halo_kernel1_ba2_h ||
ydim2 != ydim2_update_halo_kernel1_ba2_h ||
xdim3 != xdim3_update_halo_kernel1_ba2_h ||
ydim3 != ydim3_update_halo_kernel1_ba2_h ||
xdim4 != xdim4_update_halo_kernel1_ba2_h ||
ydim4 != ydim4_update_halo_kernel1_ba2_h ||
xdim5 != xdim5_update_halo_kernel1_ba2_h ||
ydim5 != ydim5_update_halo_kernel1_ba2_h ||
xdim6 != xdim6_update_halo_kernel1_ba2_h ||
ydim6 != ydim6_update_halo_kernel1_ba2_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel1_ba2, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_ba2_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel1_ba2, &ydim0, sizeof(int));
ydim0_update_halo_kernel1_ba2_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel1_ba2, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_ba2_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel1_ba2, &ydim1, sizeof(int));
ydim1_update_halo_kernel1_ba2_h = ydim1;
hipMemcpyToSymbol(xdim2_update_halo_kernel1_ba2, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_ba2_h = xdim2;
hipMemcpyToSymbol(ydim2_update_halo_kernel1_ba2, &ydim2, sizeof(int));
ydim2_update_halo_kernel1_ba2_h = ydim2;
hipMemcpyToSymbol(xdim3_update_halo_kernel1_ba2, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_ba2_h = xdim3;
hipMemcpyToSymbol(ydim3_update_halo_kernel1_ba2, &ydim3, sizeof(int));
ydim3_update_halo_kernel1_ba2_h = ydim3;
hipMemcpyToSymbol(xdim4_update_halo_kernel1_ba2, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_ba2_h = xdim4;
hipMemcpyToSymbol(ydim4_update_halo_kernel1_ba2, &ydim4, sizeof(int));
ydim4_update_halo_kernel1_ba2_h = ydim4;
hipMemcpyToSymbol(xdim5_update_halo_kernel1_ba2, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_ba2_h = xdim5;
hipMemcpyToSymbol(ydim5_update_halo_kernel1_ba2, &ydim5, sizeof(int));
ydim5_update_halo_kernel1_ba2_h = ydim5;
hipMemcpyToSymbol(xdim6_update_halo_kernel1_ba2, &xdim6, sizeof(int));
xdim6_update_halo_kernel1_ba2_h = xdim6;
hipMemcpyToSymbol(ydim6_update_halo_kernel1_ba2, &ydim6, sizeof(int));
ydim6_update_halo_kernel1_ba2_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
char *p_a[8];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] -
args[5].dat->base[1] - d_m[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] -
d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[6].dat->d_m[d];
#endif
int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] -
args[6].dat->base[0] - d_m[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] -
args[6].dat->base[1] - d_m[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] -
d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[64].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel1_ba2), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[64].time += t1 - t2;
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
ops_set_halo_dirtybit3(&args[6], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[64].mpi_time += t2 - t1;
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
| 9a591b2d099ba48b4c9478ad492e4c1dfefe4475.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_ba2;
int xdim0_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim0_update_halo_kernel1_ba2;
int ydim0_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim1_update_halo_kernel1_ba2;
int xdim1_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim1_update_halo_kernel1_ba2;
int ydim1_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim2_update_halo_kernel1_ba2;
int xdim2_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim2_update_halo_kernel1_ba2;
int ydim2_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim3_update_halo_kernel1_ba2;
int xdim3_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim3_update_halo_kernel1_ba2;
int ydim3_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim4_update_halo_kernel1_ba2;
int xdim4_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim4_update_halo_kernel1_ba2;
int ydim4_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim5_update_halo_kernel1_ba2;
int xdim5_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim5_update_halo_kernel1_ba2;
int ydim5_update_halo_kernel1_ba2_h = -1;
__constant__ int xdim6_update_halo_kernel1_ba2;
int xdim6_update_halo_kernel1_ba2_h = -1;
__constant__ int ydim6_update_halo_kernel1_ba2;
int ydim6_update_halo_kernel1_ba2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel1_ba2 * (y) + \
xdim0_update_halo_kernel1_ba2 * ydim0_update_halo_kernel1_ba2 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel1_ba2 * (y) + \
xdim1_update_halo_kernel1_ba2 * ydim1_update_halo_kernel1_ba2 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_update_halo_kernel1_ba2 * (y) + \
xdim2_update_halo_kernel1_ba2 * ydim2_update_halo_kernel1_ba2 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_update_halo_kernel1_ba2 * (y) + \
xdim3_update_halo_kernel1_ba2 * ydim3_update_halo_kernel1_ba2 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_update_halo_kernel1_ba2 * (y) + \
xdim4_update_halo_kernel1_ba2 * ydim4_update_halo_kernel1_ba2 * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_update_halo_kernel1_ba2 * (y) + \
xdim5_update_halo_kernel1_ba2 * ydim5_update_halo_kernel1_ba2 * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_update_halo_kernel1_ba2 * (y) + \
xdim6_update_halo_kernel1_ba2 * ydim6_update_halo_kernel1_ba2 * (z))
// user function
__device__
inline void
update_halo_kernel1_ba2(double *density0, double *density1, double *energy0,
double *energy1, double *pressure,
double *viscosity, double *soundspeed,
const int *fields) {
if (fields[FIELD_DENSITY0] == 1)
density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(0, 0, 3)];
if (fields[FIELD_DENSITY1] == 1)
density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(0, 0, 3)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(0, 0, 3)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(0, 0, 3)];
if (fields[FIELD_PRESSURE] == 1)
pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(0, 0, 3)];
if (fields[FIELD_VISCOSITY] == 1)
viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(0, 0, 3)];
if (fields[FIELD_SOUNDSPEED] == 1)
soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(0, 0, 3)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void
ops_update_halo_kernel1_ba2(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
double *__restrict arg6, const int *__restrict arg7,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim0_update_halo_kernel1_ba2 *
ydim0_update_halo_kernel1_ba2;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim1_update_halo_kernel1_ba2 *
ydim1_update_halo_kernel1_ba2;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim2_update_halo_kernel1_ba2 *
ydim2_update_halo_kernel1_ba2;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim3_update_halo_kernel1_ba2 *
ydim3_update_halo_kernel1_ba2;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim4_update_halo_kernel1_ba2 *
ydim4_update_halo_kernel1_ba2;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim5_update_halo_kernel1_ba2 *
ydim5_update_halo_kernel1_ba2;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_ba2 +
idx_z * 1 * 1 * xdim6_update_halo_kernel1_ba2 *
ydim6_update_halo_kernel1_ba2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_ba2(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_update_halo_kernel1_ba2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 8, range, 64))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(64, "update_halo_kernel1_ba2");
OPS_kernels[64].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_ba2_h ||
ydim0 != ydim0_update_halo_kernel1_ba2_h ||
xdim1 != xdim1_update_halo_kernel1_ba2_h ||
ydim1 != ydim1_update_halo_kernel1_ba2_h ||
xdim2 != xdim2_update_halo_kernel1_ba2_h ||
ydim2 != ydim2_update_halo_kernel1_ba2_h ||
xdim3 != xdim3_update_halo_kernel1_ba2_h ||
ydim3 != ydim3_update_halo_kernel1_ba2_h ||
xdim4 != xdim4_update_halo_kernel1_ba2_h ||
ydim4 != ydim4_update_halo_kernel1_ba2_h ||
xdim5 != xdim5_update_halo_kernel1_ba2_h ||
ydim5 != ydim5_update_halo_kernel1_ba2_h ||
xdim6 != xdim6_update_halo_kernel1_ba2_h ||
ydim6 != ydim6_update_halo_kernel1_ba2_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel1_ba2, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_ba2_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel1_ba2, &ydim0, sizeof(int));
ydim0_update_halo_kernel1_ba2_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel1_ba2, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_ba2_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel1_ba2, &ydim1, sizeof(int));
ydim1_update_halo_kernel1_ba2_h = ydim1;
cudaMemcpyToSymbol(xdim2_update_halo_kernel1_ba2, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_ba2_h = xdim2;
cudaMemcpyToSymbol(ydim2_update_halo_kernel1_ba2, &ydim2, sizeof(int));
ydim2_update_halo_kernel1_ba2_h = ydim2;
cudaMemcpyToSymbol(xdim3_update_halo_kernel1_ba2, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_ba2_h = xdim3;
cudaMemcpyToSymbol(ydim3_update_halo_kernel1_ba2, &ydim3, sizeof(int));
ydim3_update_halo_kernel1_ba2_h = ydim3;
cudaMemcpyToSymbol(xdim4_update_halo_kernel1_ba2, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_ba2_h = xdim4;
cudaMemcpyToSymbol(ydim4_update_halo_kernel1_ba2, &ydim4, sizeof(int));
ydim4_update_halo_kernel1_ba2_h = ydim4;
cudaMemcpyToSymbol(xdim5_update_halo_kernel1_ba2, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_ba2_h = xdim5;
cudaMemcpyToSymbol(ydim5_update_halo_kernel1_ba2, &ydim5, sizeof(int));
ydim5_update_halo_kernel1_ba2_h = ydim5;
cudaMemcpyToSymbol(xdim6_update_halo_kernel1_ba2, &xdim6, sizeof(int));
xdim6_update_halo_kernel1_ba2_h = xdim6;
cudaMemcpyToSymbol(ydim6_update_halo_kernel1_ba2, &ydim6, sizeof(int));
ydim6_update_halo_kernel1_ba2_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
char *p_a[8];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] -
args[5].dat->base[1] - d_m[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] -
d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[6].dat->d_m[d];
#endif
int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] -
args[6].dat->base[0] - d_m[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] -
args[6].dat->base[1] - d_m[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] -
d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[64].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel1_ba2<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[64].time += t1 - t2;
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
ops_set_halo_dirtybit3(&args[6], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[64].mpi_time += t2 - t1;
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
|
a41bab94b581e11c8458aeab6f7b6bbca2d2f564.hip | // !!! This is a file automatically generated by hipify!!!
/*
This Code is provied to be freely used, distributed, or modified.
However it comes without warranty of any kind.
Matt Wheeler 2011
Convenience functions to solve a linear system Ax = B and calculate
the trace of a matrix. All functions use the GPU.
*/
#include <rocblas.h>
#include <cutil_inline.h>
#include "util.h"
#include "Block.h"
void cholesky_cuda(DATA_TYPE*,int,int );
//Uses Cholesky Decomposition to solve d_C{x} = d_B (or x = d_C{^-1}d_B)
//Overwrites both d_C and d_B so watch out!
void leftLinCholSovle(DATA_TYPE *d_C, DATA_TYPE *d_B, const int& size, const int& cols,
const bool& needsChol) {
if(needsChol) {
DATA_TYPE *h_C = new DATA_TYPE[size*size];
cutilSafeCall( hipMemcpy(h_C, d_C, size*size*sizeof(DATA_TYPE), hipMemcpyDeviceToHost) );
cholesky_cuda(h_C, size, BLOCK_N);
cutilSafeCall( hipMemcpy(d_C, h_C, size*size*sizeof(DATA_TYPE), hipMemcpyHostToDevice) );
delete [] h_C;
}
#ifdef DOUBLE_PRECISION
hipblasDtrsm('L','L','N','N',size,cols,1.,d_C,size,d_B,size);
hipblasDtrsm('L','L','T','N',size,cols,1.,d_C,size,d_B,size);
#else
hipblasStrsm('L','L','N','N',size,cols,1.,d_C,size,d_B,size);
hipblasStrsm('L','L','T','N',size,cols,1.,d_C,size,d_B,size);
#endif
}
//Calculates the trace of size-by-sze matrix
DATA_TYPE trace(DATA_TYPE *mat, const int& size) {
DATA_TYPE *h_ones = new DATA_TYPE[size], *d_ones,
ans;
int i;
cutilSafeCall( hipMalloc(&d_ones, size*sizeof(DATA_TYPE)) );
for(i = 0; i < size; i++)
h_ones[i] = 1.;
cutilSafeCall( hipMemcpy(d_ones, h_ones, size*sizeof(DATA_TYPE), hipMemcpyHostToDevice) );
#ifdef DOUBLE_PRECISION
ans = hipblasDdot(size,mat,size+1,d_ones,1);
#else
ans = hipblasSdot(size,mat,size+1,d_ones,1);
#endif
cutilSafeCall( hipFree(d_ones) );
delete [] h_ones;
return ans;
}
//Calculates the trace of the product of two size-by-size matrices
DATA_TYPE trace(DATA_TYPE *matA, DATA_TYPE *matB, const int& size) {
DATA_TYPE *mat,
ans = 0.;
//Which method is faster depends on the number of cols/rows
if(size <= 2048) {
cutilSafeCall( hipMalloc(&mat, size*size*sizeof(DATA_TYPE)) );
#ifdef DOUBLE_PRECISION
hipblasDgemm('N','N',size,size,size,1.,matA,size,matB,size,0,mat,size);
#else
hipblasSgemm('N','N',size,size,size,1.,matA,size,matB,size,0,mat,size);
#endif
ans = trace(mat,size);
cutilSafeCall( hipFree(mat) );
} else {
int i;
for(i = 0; i < size; i++)
#ifdef DOUBLE_PRECISION
ans += hipblasDdot(size,&matA[i],size,&matB[i*size],1);
#else
ans += hipblasSdot(size,&matA[i],size,&matB[i*size],1);
#endif
}
return ans;
}
| a41bab94b581e11c8458aeab6f7b6bbca2d2f564.cu | /*
This Code is provied to be freely used, distributed, or modified.
However it comes without warranty of any kind.
Matt Wheeler 2011
Convenience functions to solve a linear system Ax = B and calculate
the trace of a matrix. All functions use the GPU.
*/
#include <cublas.h>
#include <cutil_inline.h>
#include "util.h"
#include "Block.h"
void cholesky_cuda(DATA_TYPE*,int,int );
//Uses Cholesky Decomposition to solve d_C{x} = d_B (or x = d_C{^-1}d_B)
//Overwrites both d_C and d_B so watch out!
void leftLinCholSovle(DATA_TYPE *d_C, DATA_TYPE *d_B, const int& size, const int& cols,
const bool& needsChol) {
if(needsChol) {
DATA_TYPE *h_C = new DATA_TYPE[size*size];
cutilSafeCall( cudaMemcpy(h_C, d_C, size*size*sizeof(DATA_TYPE), cudaMemcpyDeviceToHost) );
cholesky_cuda(h_C, size, BLOCK_N);
cutilSafeCall( cudaMemcpy(d_C, h_C, size*size*sizeof(DATA_TYPE), cudaMemcpyHostToDevice) );
delete [] h_C;
}
#ifdef DOUBLE_PRECISION
cublasDtrsm('L','L','N','N',size,cols,1.,d_C,size,d_B,size);
cublasDtrsm('L','L','T','N',size,cols,1.,d_C,size,d_B,size);
#else
cublasStrsm('L','L','N','N',size,cols,1.,d_C,size,d_B,size);
cublasStrsm('L','L','T','N',size,cols,1.,d_C,size,d_B,size);
#endif
}
//Calculates the trace of size-by-sze matrix
DATA_TYPE trace(DATA_TYPE *mat, const int& size) {
DATA_TYPE *h_ones = new DATA_TYPE[size], *d_ones,
ans;
int i;
cutilSafeCall( cudaMalloc(&d_ones, size*sizeof(DATA_TYPE)) );
for(i = 0; i < size; i++)
h_ones[i] = 1.;
cutilSafeCall( cudaMemcpy(d_ones, h_ones, size*sizeof(DATA_TYPE), cudaMemcpyHostToDevice) );
#ifdef DOUBLE_PRECISION
ans = cublasDdot(size,mat,size+1,d_ones,1);
#else
ans = cublasSdot(size,mat,size+1,d_ones,1);
#endif
cutilSafeCall( cudaFree(d_ones) );
delete [] h_ones;
return ans;
}
//Calculates the trace of the product of two size-by-size matrices
DATA_TYPE trace(DATA_TYPE *matA, DATA_TYPE *matB, const int& size) {
DATA_TYPE *mat,
ans = 0.;
//Which method is faster depends on the number of cols/rows
if(size <= 2048) {
cutilSafeCall( cudaMalloc(&mat, size*size*sizeof(DATA_TYPE)) );
#ifdef DOUBLE_PRECISION
cublasDgemm('N','N',size,size,size,1.,matA,size,matB,size,0,mat,size);
#else
cublasSgemm('N','N',size,size,size,1.,matA,size,matB,size,0,mat,size);
#endif
ans = trace(mat,size);
cutilSafeCall( cudaFree(mat) );
} else {
int i;
for(i = 0; i < size; i++)
#ifdef DOUBLE_PRECISION
ans += cublasDdot(size,&matA[i],size,&matB[i*size],1);
#else
ans += cublasSdot(size,&matA[i],size,&matB[i*size],1);
#endif
}
return ans;
}
|
746f5ca870330c6edaff9094638bcfcc5afb0dc5.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/hip/vol2col.cuh>
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose3d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_depth,
int kernel_width,
int kernel_height,
int stride_depth,
int stride_width,
int stride_height,
int padding_depth,
int padding_width,
int padding_height,
int dilation_depth,
int dilation_width,
int dilation_height,
int output_padding_depth,
int output_padding_width,
int output_padding_height,
int weight_nullable) {
TORCH_CHECK(
input.numel() != 0 && (input.dim() == 4 || input.dim() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ",
input.sizes());
TORCH_CHECK(
stride_depth > 0 && stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_depth > 0 && dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_depth: ",
dilation_depth,
", dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_depth < stride_depth ||
output_padding_depth < dilation_depth) &&
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation,",
" but got output_padding_depth: ",
output_padding_depth,
" output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_depth: ",
dilation_depth,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
// number of input & output planes and kernel size is indirectly defined by
// the weight tensor
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && weight.dim() == 5,
"non-empty 5D (n_output_plane x n_input_plane ",
"x kernel_depth x kernel_height x kernel_width) tensor ",
"expected for weight, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (ndim == 5) {
dimf++;
dimd++;
dimh++;
dimw++;
}
if (weight.defined()) {
const int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
int64_t input_width = input.size(dimw);
int64_t input_height = input.size(dimh);
int64_t input_depth = input.size(dimd);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_depth < 1 || output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_depth,
" x ",
input_height,
" x ",
input_width,
"). Calculated output size per channel: (",
output_depth,
" x ",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (grad_output.defined()) {
if (weight.defined()) {
const int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
const int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimd, output_depth);
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4};
checkAllSameGPU(
"slow_conv_transpose3d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg});
slow_conv_transpose3d_shape_check(
input_,
Tensor(),
weight_,
bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
TORCH_CHECK(
!bias.defined() || bias.is_contiguous(),
"bias tensor has to be contiguous");
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
int is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_(
{batch_size, n_output_plane, output_depth, output_height, output_width});
// Create temporary columns
Tensor columns = at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options());
// Define a buffer of ones, for bias accumulation
Tensor ones = bias.defined() ? at::ones({output_depth, output_height, output_width}, input_.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
't',
n,
m,
k,
static_cast<scalar_t>(1),
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
static_cast<scalar_t>(0),
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
at::native::col2vol<scalar_t, accscalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_depth * output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n_,
m_,
k_,
static_cast<scalar_t>(1),
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
static_cast<scalar_t>(1),
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"slow_conv_transpose3d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_input_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_(
{batch_size, n_input_plane, input_depth, input_height, input_width});
// Create temporary columns
bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 ||
stride_depth != 1 || stride_height != 1 || stride_width != 1 ||
dilation_depth != 1 || dilation_height != 1 ||
dilation_width != 1 || padding_depth != 0 ||
padding_height != 0 || padding_width != 0);
Tensor grad_columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n;
Tensor grad_output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
if (need_columns) {
// Extract columns:
at::native::vol2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = input_depth * input_height * input_width;
int64_t k =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? grad_columns.data_ptr<scalar_t>()
: grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
'n',
'n',
n,
m,
k,
static_cast<scalar_t>(1),
gemm_in_ptr,
n,
weight.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(0),
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
grad_input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_acc_grad_parameters_cuda(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4};
checkAllSameGPU(
"slow_conv_transpose3d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
1);
int n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
}
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Create temporary columns
bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 ||
stride_depth != 1 || stride_height != 1 || stride_width != 1 ||
dilation_depth != 1 || dilation_height != 1 ||
dilation_width != 1 || padding_depth != 0 ||
padding_height != 0 || padding_width != 0);
Tensor columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"slow_conv_transpose3d_acc_grad_parameters_cuda",
[&] {
// Helpers
Tensor input_n;
Tensor grad_output_n;
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
if (need_columns) {
// Extract columns:
at::native::vol2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = n_output_plane * kernel_width * kernel_height * kernel_depth;
int64_t m = input_n.size(0); // n_input_plane
int64_t k = input_depth * input_height * input_width;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? columns.data_ptr<scalar_t>() : grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n,
m,
k,
scale,
gemm_in_ptr,
k,
input_n.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(1),
grad_weight.data_ptr<scalar_t>(),
n);
}
}
if (grad_bias.defined()) {
at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3, 4});
}
// Resize
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{input.size(1), input_depth, input_height, input_width});
}
});
}
} // namespace
Tensor& slow_conv_transpose3d_out_cuda(const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& output) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation);
return output;
}
Tensor slow_conv_transpose3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias) {
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
| 746f5ca870330c6edaff9094638bcfcc5afb0dc5.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/vol2col.cuh>
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose3d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_depth,
int kernel_width,
int kernel_height,
int stride_depth,
int stride_width,
int stride_height,
int padding_depth,
int padding_width,
int padding_height,
int dilation_depth,
int dilation_width,
int dilation_height,
int output_padding_depth,
int output_padding_width,
int output_padding_height,
int weight_nullable) {
TORCH_CHECK(
input.numel() != 0 && (input.dim() == 4 || input.dim() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ",
input.sizes());
TORCH_CHECK(
stride_depth > 0 && stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_depth > 0 && dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_depth: ",
dilation_depth,
", dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_depth < stride_depth ||
output_padding_depth < dilation_depth) &&
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation,",
" but got output_padding_depth: ",
output_padding_depth,
" output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_depth: ",
dilation_depth,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
// number of input & output planes and kernel size is indirectly defined by
// the weight tensor
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && weight.dim() == 5,
"non-empty 5D (n_output_plane x n_input_plane ",
"x kernel_depth x kernel_height x kernel_width) tensor ",
"expected for weight, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (ndim == 5) {
dimf++;
dimd++;
dimh++;
dimw++;
}
if (weight.defined()) {
const int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
int64_t input_width = input.size(dimw);
int64_t input_height = input.size(dimh);
int64_t input_depth = input.size(dimd);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_depth < 1 || output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_depth,
" x ",
input_height,
" x ",
input_width,
"). Calculated output size per channel: (",
output_depth,
" x ",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (grad_output.defined()) {
if (weight.defined()) {
const int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
const int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimd, output_depth);
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4};
checkAllSameGPU(
"slow_conv_transpose3d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg});
slow_conv_transpose3d_shape_check(
input_,
Tensor(),
weight_,
bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
TORCH_CHECK(
!bias.defined() || bias.is_contiguous(),
"bias tensor has to be contiguous");
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
int is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_(
{batch_size, n_output_plane, output_depth, output_height, output_width});
// Create temporary columns
Tensor columns = at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options());
// Define a buffer of ones, for bias accumulation
Tensor ones = bias.defined() ? at::ones({output_depth, output_height, output_width}, input_.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
't',
n,
m,
k,
static_cast<scalar_t>(1),
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
static_cast<scalar_t>(0),
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
at::native::col2vol<scalar_t, accscalar_t>(
at::cuda::getCurrentCUDAStream(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_depth * output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n_,
m_,
k_,
static_cast<scalar_t>(1),
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
static_cast<scalar_t>(1),
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"slow_conv_transpose3d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_input_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_(
{batch_size, n_input_plane, input_depth, input_height, input_width});
// Create temporary columns
bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 ||
stride_depth != 1 || stride_height != 1 || stride_width != 1 ||
dilation_depth != 1 || dilation_height != 1 ||
dilation_width != 1 || padding_depth != 0 ||
padding_height != 0 || padding_width != 0);
Tensor grad_columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n;
Tensor grad_output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
if (need_columns) {
// Extract columns:
at::native::vol2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = input_depth * input_height * input_width;
int64_t k =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? grad_columns.data_ptr<scalar_t>()
: grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
'n',
'n',
n,
m,
k,
static_cast<scalar_t>(1),
gemm_in_ptr,
n,
weight.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(0),
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
grad_input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_acc_grad_parameters_cuda(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4};
checkAllSameGPU(
"slow_conv_transpose3d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
1);
int n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
}
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Create temporary columns
bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 ||
stride_depth != 1 || stride_height != 1 || stride_width != 1 ||
dilation_depth != 1 || dilation_height != 1 ||
dilation_width != 1 || padding_depth != 0 ||
padding_height != 0 || padding_width != 0);
Tensor columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"slow_conv_transpose3d_acc_grad_parameters_cuda",
[&] {
// Helpers
Tensor input_n;
Tensor grad_output_n;
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
if (need_columns) {
// Extract columns:
at::native::vol2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = n_output_plane * kernel_width * kernel_height * kernel_depth;
int64_t m = input_n.size(0); // n_input_plane
int64_t k = input_depth * input_height * input_width;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? columns.data_ptr<scalar_t>() : grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n,
m,
k,
scale,
gemm_in_ptr,
k,
input_n.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(1),
grad_weight.data_ptr<scalar_t>(),
n);
}
}
if (grad_bias.defined()) {
at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3, 4});
}
// Resize
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{input.size(1), input_depth, input_height, input_width});
}
});
}
} // namespace
Tensor& slow_conv_transpose3d_out_cuda(const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& output) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation);
return output;
}
Tensor slow_conv_transpose3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias) {
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
092fec3828f1ff95eec7a065880902cae0615db9.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <cfloat>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <rocblas.h>
#include <cusolverDn.h>
#include "struct.h"
#include "constants.h"
void assignObjgrdStructMemory(long long &, fcndata &, double *);
void objgrd(double *, double *, double *, fcndata &);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
mxInitGPU();
fcndata fcnObj = {0};
mxGPUArray const *ctlGrpStk;
mxGPUArray const *cenIniMat, *difIniMat, *grpNdeVec, *grpIfoMat, *wgtGrpVec;
mxGPUArray const *vfdElmVtxMat, *vfdElmIfoMat;
mxGPUArray const *tgtCenPosMat, *tgtUniDirMat, *tgtElmVolVec;
mxGPUArray *grdGrpVec;
ctlGrpStk = mxGPUCreateFromMxArray(prhs[ 0]);
cenIniMat = mxGPUCreateFromMxArray(prhs[ 1]);
difIniMat = mxGPUCreateFromMxArray(prhs[ 2]);
grpNdeVec = mxGPUCreateFromMxArray(prhs[ 3]);
grpIfoMat = mxGPUCreateFromMxArray(prhs[ 4]);
wgtGrpVec = mxGPUCreateFromMxArray(prhs[ 5]);
vfdElmVtxMat = mxGPUCreateFromMxArray(prhs[ 6]);
vfdElmIfoMat = mxGPUCreateFromMxArray(prhs[ 7]);
tgtCenPosMat = mxGPUCreateFromMxArray(prhs[ 8]);
tgtUniDirMat = mxGPUCreateFromMxArray(prhs[ 9]);
tgtElmVolVec = mxGPUCreateFromMxArray(prhs[10]);
fcnObj.vfd.cenKnlType = mxGetScalar(prhs[11]);
fcnObj.vfd.cenKnlWidth = mxGetScalar(prhs[12]);
fcnObj.vfd.dirKnlType = mxGetScalar(prhs[13]);
fcnObj.vfd.dirKnlWidth = mxGetScalar(prhs[14]);
fcnObj.prm.knlOrder = mxGetScalar(prhs[15]);
fcnObj.prm.knlWidth = mxGetScalar(prhs[16]);
fcnObj.prm.knlEps = mxGetScalar(prhs[17]);
fcnObj.prm.timeStp = mxGetScalar(prhs[18]);
fcnObj.prm.timeNum = mxGetScalar(prhs[19]);
fcnObj.prm.tgtWgt = mxGetScalar(prhs[20]);
fcnObj.prm.rgdGrpNum = mxGPUGetNumberOfElements(wgtGrpVec);
mwSize const ndim = 1;
mwSize const grdDims[1] = {(mwSize) fcnObj.prm.rgdGrpNum * RGDDOF * (fcnObj.prm.timeNum - 1)};
grdGrpVec = mxGPUCreateGPUArray(ndim, grdDims, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
// ---
double *d_ctlGrpStk = (double *) mxGPUGetDataReadOnly(ctlGrpStk);
fcnObj.prm.d_cenIniMat = (double *) mxGPUGetDataReadOnly(cenIniMat);
fcnObj.prm.d_difIniMat = (double *) mxGPUGetDataReadOnly(difIniMat);
fcnObj.prm.d_grpNdeVec = (int *) mxGPUGetDataReadOnly(grpNdeVec);
fcnObj.prm.d_grpIfoMat = (int *) mxGPUGetDataReadOnly(grpIfoMat);
fcnObj.prm.d_wgtGrpVec = (double *) mxGPUGetDataReadOnly(wgtGrpVec);
fcnObj.elm.d_vfdElmVtxMat = (int *) mxGPUGetDataReadOnly(vfdElmVtxMat);
fcnObj.elm.d_vfdElmIfoMat = (int *) mxGPUGetDataReadOnly(vfdElmIfoMat);
fcnObj.tgt.d_cenPosMat = (double *) mxGPUGetDataReadOnly(tgtCenPosMat);
fcnObj.tgt.d_uniDirMat = (double *) mxGPUGetDataReadOnly(tgtUniDirMat);
fcnObj.tgt.d_elmVolVec = (double *) mxGPUGetDataReadOnly(tgtElmVolVec);
double *d_grdGrpVec = (double *) mxGPUGetData(grdGrpVec);
mwSize const *vfdElmDims = mxGPUGetDimensions(vfdElmVtxMat);
mwSize const *tgtElmDims = mxGPUGetDimensions(tgtCenPosMat);
fcnObj.prm.rgdNdeNum = mxGPUGetNumberOfElements(grpNdeVec);
fcnObj.prm.vfdNdeNum = fcnObj.prm.rgdNdeNum;
fcnObj.prm.vfdElmNum = vfdElmDims[0];
fcnObj.tgt.tgtElmNum = tgtElmDims[0];
// ---
int rgdGrpNum = fcnObj.prm.rgdGrpNum;
int rgdNdeNum = fcnObj.prm.rgdNdeNum;
int vfdElmNum = fcnObj.prm.vfdElmNum;
int timeNum = fcnObj.prm.timeNum;
long long gpuAloDblMemCnt = rgdGrpNum * (timeNum - 1)
+ rgdNdeNum * ( rgdNdeNum * 2 + DIMNUM * timeNum + DIMNUM * (timeNum - 1) * 2
+ RGDDOF * (timeNum - 1) + RGDDOF * timeNum)
+ vfdElmNum * (DIMNUM * 2 + 2) + fcnObj.tgt.tgtElmNum
+ rgdNdeNum * (DIMNUM * 2 + RGDDOF * (timeNum - 1) + RGDDOF * 5)
+ vfdElmNum * DIMNUM * 2
+ SUMBLKDIM;
double *gpuDblSpace;
hipError_t error = hipMalloc((void **) &gpuDblSpace, sizeof(double) * gpuAloDblMemCnt);
if ( error != hipSuccess )
mexErrMsgIdAndTxt("objgrd2D:hipMalloc", "Fail to allocate device memory.");
hipMalloc((void **) &(fcnObj.d_status), sizeof(int));
long long gpuAsgDblMemCnt;
assignObjgrdStructMemory(gpuAsgDblMemCnt, fcnObj, gpuDblSpace);
if ( gpuAsgDblMemCnt != gpuAloDblMemCnt )
{
mexErrMsgIdAndTxt("objgrd2D:memAssign",
"Assigned device double memory (%lld) mismatches the allocated memory (%lld).",
gpuAsgDblMemCnt, gpuAloDblMemCnt);
}
// ---
hipblasCreate(&(fcnObj.blasHdl));
hipsolverDnCreate(&(fcnObj.solvHdl));
hipsolverDnDpotrf_bufferSize(fcnObj.solvHdl, HIPBLAS_FILL_MODE_LOWER,
fcnObj.prm.rgdNdeNum, fcnObj.d_rgdKnlMat,
fcnObj.prm.rgdNdeNum, &(fcnObj.h_Lwork));
hipMalloc((void **) &(fcnObj.d_workspace), sizeof(double) * fcnObj.h_Lwork);
// ---
double h_objVal;
objgrd(&h_objVal, d_grdGrpVec, d_ctlGrpStk, fcnObj);
plhs[0] = mxCreateDoubleScalar(h_objVal);
plhs[1] = mxGPUCreateMxArrayOnGPU(grdGrpVec);
// ---
//
mxGPUDestroyGPUArray(ctlGrpStk);
mxGPUDestroyGPUArray(cenIniMat);
mxGPUDestroyGPUArray(difIniMat);
mxGPUDestroyGPUArray(grpNdeVec);
mxGPUDestroyGPUArray(grpIfoMat);
mxGPUDestroyGPUArray(wgtGrpVec);
mxGPUDestroyGPUArray(vfdElmVtxMat);
mxGPUDestroyGPUArray(vfdElmIfoMat);
mxGPUDestroyGPUArray(tgtCenPosMat);
mxGPUDestroyGPUArray(tgtUniDirMat);
mxGPUDestroyGPUArray(tgtElmVolVec);
mxGPUDestroyGPUArray(grdGrpVec);
mxFree((void *) vfdElmDims);
mxFree((void *) tgtElmDims);
hipFree(gpuDblSpace);
hipFree(fcnObj.d_status);
hipFree(fcnObj.d_workspace);
hipblasDestroy(fcnObj.blasHdl);
hipsolverDnDestroy(fcnObj.solvHdl);
return;
}
| 092fec3828f1ff95eec7a065880902cae0615db9.cu | #include <cstdio>
#include <cstdlib>
#include <cfloat>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "struct.h"
#include "constants.h"
void assignObjgrdStructMemory(long long &, fcndata &, double *);
void objgrd(double *, double *, double *, fcndata &);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
mxInitGPU();
fcndata fcnObj = {0};
mxGPUArray const *ctlGrpStk;
mxGPUArray const *cenIniMat, *difIniMat, *grpNdeVec, *grpIfoMat, *wgtGrpVec;
mxGPUArray const *vfdElmVtxMat, *vfdElmIfoMat;
mxGPUArray const *tgtCenPosMat, *tgtUniDirMat, *tgtElmVolVec;
mxGPUArray *grdGrpVec;
ctlGrpStk = mxGPUCreateFromMxArray(prhs[ 0]);
cenIniMat = mxGPUCreateFromMxArray(prhs[ 1]);
difIniMat = mxGPUCreateFromMxArray(prhs[ 2]);
grpNdeVec = mxGPUCreateFromMxArray(prhs[ 3]);
grpIfoMat = mxGPUCreateFromMxArray(prhs[ 4]);
wgtGrpVec = mxGPUCreateFromMxArray(prhs[ 5]);
vfdElmVtxMat = mxGPUCreateFromMxArray(prhs[ 6]);
vfdElmIfoMat = mxGPUCreateFromMxArray(prhs[ 7]);
tgtCenPosMat = mxGPUCreateFromMxArray(prhs[ 8]);
tgtUniDirMat = mxGPUCreateFromMxArray(prhs[ 9]);
tgtElmVolVec = mxGPUCreateFromMxArray(prhs[10]);
fcnObj.vfd.cenKnlType = mxGetScalar(prhs[11]);
fcnObj.vfd.cenKnlWidth = mxGetScalar(prhs[12]);
fcnObj.vfd.dirKnlType = mxGetScalar(prhs[13]);
fcnObj.vfd.dirKnlWidth = mxGetScalar(prhs[14]);
fcnObj.prm.knlOrder = mxGetScalar(prhs[15]);
fcnObj.prm.knlWidth = mxGetScalar(prhs[16]);
fcnObj.prm.knlEps = mxGetScalar(prhs[17]);
fcnObj.prm.timeStp = mxGetScalar(prhs[18]);
fcnObj.prm.timeNum = mxGetScalar(prhs[19]);
fcnObj.prm.tgtWgt = mxGetScalar(prhs[20]);
fcnObj.prm.rgdGrpNum = mxGPUGetNumberOfElements(wgtGrpVec);
mwSize const ndim = 1;
mwSize const grdDims[1] = {(mwSize) fcnObj.prm.rgdGrpNum * RGDDOF * (fcnObj.prm.timeNum - 1)};
grdGrpVec = mxGPUCreateGPUArray(ndim, grdDims, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
// ---
double *d_ctlGrpStk = (double *) mxGPUGetDataReadOnly(ctlGrpStk);
fcnObj.prm.d_cenIniMat = (double *) mxGPUGetDataReadOnly(cenIniMat);
fcnObj.prm.d_difIniMat = (double *) mxGPUGetDataReadOnly(difIniMat);
fcnObj.prm.d_grpNdeVec = (int *) mxGPUGetDataReadOnly(grpNdeVec);
fcnObj.prm.d_grpIfoMat = (int *) mxGPUGetDataReadOnly(grpIfoMat);
fcnObj.prm.d_wgtGrpVec = (double *) mxGPUGetDataReadOnly(wgtGrpVec);
fcnObj.elm.d_vfdElmVtxMat = (int *) mxGPUGetDataReadOnly(vfdElmVtxMat);
fcnObj.elm.d_vfdElmIfoMat = (int *) mxGPUGetDataReadOnly(vfdElmIfoMat);
fcnObj.tgt.d_cenPosMat = (double *) mxGPUGetDataReadOnly(tgtCenPosMat);
fcnObj.tgt.d_uniDirMat = (double *) mxGPUGetDataReadOnly(tgtUniDirMat);
fcnObj.tgt.d_elmVolVec = (double *) mxGPUGetDataReadOnly(tgtElmVolVec);
double *d_grdGrpVec = (double *) mxGPUGetData(grdGrpVec);
mwSize const *vfdElmDims = mxGPUGetDimensions(vfdElmVtxMat);
mwSize const *tgtElmDims = mxGPUGetDimensions(tgtCenPosMat);
fcnObj.prm.rgdNdeNum = mxGPUGetNumberOfElements(grpNdeVec);
fcnObj.prm.vfdNdeNum = fcnObj.prm.rgdNdeNum;
fcnObj.prm.vfdElmNum = vfdElmDims[0];
fcnObj.tgt.tgtElmNum = tgtElmDims[0];
// ---
int rgdGrpNum = fcnObj.prm.rgdGrpNum;
int rgdNdeNum = fcnObj.prm.rgdNdeNum;
int vfdElmNum = fcnObj.prm.vfdElmNum;
int timeNum = fcnObj.prm.timeNum;
long long gpuAloDblMemCnt = rgdGrpNum * (timeNum - 1)
+ rgdNdeNum * ( rgdNdeNum * 2 + DIMNUM * timeNum + DIMNUM * (timeNum - 1) * 2
+ RGDDOF * (timeNum - 1) + RGDDOF * timeNum)
+ vfdElmNum * (DIMNUM * 2 + 2) + fcnObj.tgt.tgtElmNum
+ rgdNdeNum * (DIMNUM * 2 + RGDDOF * (timeNum - 1) + RGDDOF * 5)
+ vfdElmNum * DIMNUM * 2
+ SUMBLKDIM;
double *gpuDblSpace;
cudaError_t error = cudaMalloc((void **) &gpuDblSpace, sizeof(double) * gpuAloDblMemCnt);
if ( error != cudaSuccess )
mexErrMsgIdAndTxt("objgrd2D:cudaMalloc", "Fail to allocate device memory.");
cudaMalloc((void **) &(fcnObj.d_status), sizeof(int));
long long gpuAsgDblMemCnt;
assignObjgrdStructMemory(gpuAsgDblMemCnt, fcnObj, gpuDblSpace);
if ( gpuAsgDblMemCnt != gpuAloDblMemCnt )
{
mexErrMsgIdAndTxt("objgrd2D:memAssign",
"Assigned device double memory (%lld) mismatches the allocated memory (%lld).",
gpuAsgDblMemCnt, gpuAloDblMemCnt);
}
// ---
cublasCreate(&(fcnObj.blasHdl));
cusolverDnCreate(&(fcnObj.solvHdl));
cusolverDnDpotrf_bufferSize(fcnObj.solvHdl, CUBLAS_FILL_MODE_LOWER,
fcnObj.prm.rgdNdeNum, fcnObj.d_rgdKnlMat,
fcnObj.prm.rgdNdeNum, &(fcnObj.h_Lwork));
cudaMalloc((void **) &(fcnObj.d_workspace), sizeof(double) * fcnObj.h_Lwork);
// ---
double h_objVal;
objgrd(&h_objVal, d_grdGrpVec, d_ctlGrpStk, fcnObj);
plhs[0] = mxCreateDoubleScalar(h_objVal);
plhs[1] = mxGPUCreateMxArrayOnGPU(grdGrpVec);
// ---
//
mxGPUDestroyGPUArray(ctlGrpStk);
mxGPUDestroyGPUArray(cenIniMat);
mxGPUDestroyGPUArray(difIniMat);
mxGPUDestroyGPUArray(grpNdeVec);
mxGPUDestroyGPUArray(grpIfoMat);
mxGPUDestroyGPUArray(wgtGrpVec);
mxGPUDestroyGPUArray(vfdElmVtxMat);
mxGPUDestroyGPUArray(vfdElmIfoMat);
mxGPUDestroyGPUArray(tgtCenPosMat);
mxGPUDestroyGPUArray(tgtUniDirMat);
mxGPUDestroyGPUArray(tgtElmVolVec);
mxGPUDestroyGPUArray(grdGrpVec);
mxFree((void *) vfdElmDims);
mxFree((void *) tgtElmDims);
cudaFree(gpuDblSpace);
cudaFree(fcnObj.d_status);
cudaFree(fcnObj.d_workspace);
cublasDestroy(fcnObj.blasHdl);
cusolverDnDestroy(fcnObj.solvHdl);
return;
}
|
2be5250703ab7bdc714941c0447b4c638db51eb6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include "decimal.h"
#include "colordefs.h"
#include "text.h"
#include <iostream>
#define bilinearInterpolate(c00,c01,c10,c11,w1,w2,w3,w4) (w1*c00+w2*c01+w3*c10+w4*c11)
#define CUDA_THREADS_PER_BLOCK 1024 // Optimal value (calculated)
#if 0==1
#define __shared__
#define __global__
#define __device__
#define __host__
#endif
void cudaLog(const char *str)
{
std::cout<<str<<std::endl;
}
__global__ void device_bilinearResize(int origImgWidth,int origImgHeight,int newImgWidth,decimal_t origRatio,int blocksPerLine,uint32_t *origImgData,uint32_t *newImgData)
{
// Each block is responsible for 1024 px of a single row. Do not use shared memory, as we cannot predict the distances between two lines we need in the source image.
int threadId=threadIdx.x;
int blockId=blockIdx.x;
int newY=(blockId-(blockId%blocksPerLine))/blocksPerLine;
int blockIdInLine=blockId-newY*blocksPerLine;
int newX=blockIdInLine*CUDA_THREADS_PER_BLOCK+threadId;
if(newX>=newImgWidth)
return;
// y-specific:
decimal_t oldYRF;
int oldY;
decimal_t oldYF;
decimal_t yDiff;
decimal_t yDiffR;
oldYRF=origRatio*(decimal_t)newY;
oldY=(int)floor(oldYRF);
oldYF=(decimal_t)oldY;
yDiff=oldYRF-oldYF;
yDiffR=1.0-yDiff;
// x-specific:
decimal_t oldXRF=origRatio*(decimal_t)newX;
int oldX=(int)floor(oldXRF);
decimal_t oldXF=(decimal_t)oldX;
decimal_t xDiff=oldXRF-oldXF;
decimal_t xDiffR=1.0-xDiff;
// Each thread is responsible for a single pixel in the new image
uint32_t c00,c01,c10,c11;
int xLim=origImgWidth-1;
int yLim=origImgHeight-1;
const int currentLineOffset= oldY*origImgWidth;
const int lineBelowOffset=(oldY==yLim?oldY:oldY+1)*origImgWidth;
int oldXPlusOne=(oldX==xLim?oldX:oldX+1);
c00=origImgData[currentLineOffset+oldX];
c10=origImgData[currentLineOffset+oldXPlusOne];
c01=origImgData[lineBelowOffset+oldX];
c11=origImgData[lineBelowOffset+oldXPlusOne];
decimal_t w1=xDiffR*yDiffR;
decimal_t w2=xDiff*yDiffR;
decimal_t w3=xDiffR*yDiff;
decimal_t w4=xDiff*yDiff;
uint32_t newAlpha=bilinearInterpolate(getAlpha(c00),getAlpha(c01),getAlpha(c10),getAlpha(c11),w1,w2,w3,w4);
uint32_t newRed=bilinearInterpolate(getRed(c00),getRed(c01),getRed(c10),getRed(c11),w1,w2,w3,w4);
uint32_t newGreen=bilinearInterpolate(getGreen(c00),getGreen(c01),getGreen(c10),getGreen(c11),w1,w2,w3,w4);
uint32_t newBlue=bilinearInterpolate(getBlue(c00),getBlue(c01),getBlue(c10),getBlue(c11),w1,w2,w3,w4);
newImgData[newY*newImgWidth+newX]=getColor(newAlpha,newRed,newGreen,newBlue);
}
__global__ void device_nearestNeighborResize(int origImgWidth,int newImgWidth,decimal_t origRatio,int blocksPerLine,uint32_t *origImgData,uint32_t *newImgData)
{
// Each block is responsible for 1024 px of a single row. Do not use shared memory, as we cannot predict the distances between two lines we need in the source image.
int threadId=threadIdx.x;
int blockId=blockIdx.x;
int newY=(blockId-(blockId%blocksPerLine))/blocksPerLine;
int blockIdInLine=blockId-newY*blocksPerLine;
int newX=blockIdInLine*CUDA_THREADS_PER_BLOCK+threadId;
if(newX>=newImgWidth)
return;
int oldX=(int)round(origRatio*((decimal_t)newX));
int oldY=(int)round(origRatio*((decimal_t)newY));
newImgData[newY*newImgWidth+newX]=origImgData[oldY*origImgWidth+oldX];
}
uint32_t *cudaNearestNeighborResize(uint32_t *origImageData,int origImgWidth,int origImgHeight,int newImgWidth,int newImgHeight,decimal_t resizeRatio)
{
int blocksPerLine=ceil(floatDiv(newImgWidth,CUDA_THREADS_PER_BLOCK));
int totalNumBlocks=blocksPerLine*newImgHeight; // Each block is responsible for a single line
if(totalNumBlocks>65535)
return 0;
uint32_t *device_origImageData_in;
uint32_t *device_newImageData_out;
size_t origImageSize=origImgWidth*origImgHeight*sizeof(uint32_t);
size_t newImageSize=newImgWidth*newImgHeight*sizeof(uint32_t);
hipMalloc(&device_origImageData_in,origImageSize);
hipMalloc(&device_newImageData_out,newImageSize);
hipMemcpy(device_origImageData_in,origImageData,origImageSize,hipMemcpyHostToDevice);
decimal_t origRatio=decimalDiv(1.0,resizeRatio);
hipLaunchKernelGGL(( device_nearestNeighborResize), dim3(totalNumBlocks),dim3(CUDA_THREADS_PER_BLOCK), 0, 0, origImgWidth,newImgWidth,origRatio,blocksPerLine,device_origImageData_in,device_newImageData_out);
uint32_t *newImageData=(uint32_t*)malloc(newImageSize);
hipMemcpy(newImageData,device_newImageData_out,newImageSize,hipMemcpyDeviceToHost);
hipFree(device_newImageData_out);
hipFree(device_origImageData_in);
return newImageData;
}
uint32_t *cudaBilinearResize(uint32_t *origImageData,int origImgWidth,int origImgHeight,int newImgWidth,int newImgHeight,decimal_t resizeRatio)
{
int blocksPerLine=ceil(floatDiv(newImgWidth,CUDA_THREADS_PER_BLOCK));
int totalNumBlocks=blocksPerLine*newImgHeight; // Each block is responsible for a single line
if(totalNumBlocks>65535)
return 0;
uint32_t *device_origImageData_in;
uint32_t *device_newImageData_out;
size_t origImageSize=origImgWidth*origImgHeight*sizeof(uint32_t);
size_t newImageSize=newImgWidth*newImgHeight*sizeof(uint32_t);
hipMalloc(&device_origImageData_in,origImageSize);
hipMalloc(&device_newImageData_out,newImageSize);
hipMemcpy(device_origImageData_in,origImageData,origImageSize,hipMemcpyHostToDevice);
decimal_t origRatio=decimalDiv(1.0,resizeRatio);
hipLaunchKernelGGL(( device_bilinearResize), dim3(totalNumBlocks),dim3(CUDA_THREADS_PER_BLOCK), 0, 0, origImgWidth,origImgHeight,newImgWidth,origRatio,blocksPerLine,device_origImageData_in,device_newImageData_out);
uint32_t *newImageData=(uint32_t*)malloc(newImageSize);
hipMemcpy(newImageData,device_newImageData_out,newImageSize,hipMemcpyDeviceToHost);
hipFree(device_newImageData_out);
hipFree(device_origImageData_in);
return newImageData;
}
| 2be5250703ab7bdc714941c0447b4c638db51eb6.cu | #include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include "decimal.h"
#include "colordefs.h"
#include "text.h"
#include <iostream>
#define bilinearInterpolate(c00,c01,c10,c11,w1,w2,w3,w4) (w1*c00+w2*c01+w3*c10+w4*c11)
#define CUDA_THREADS_PER_BLOCK 1024 // Optimal value (calculated)
#if 0==1
#define __shared__
#define __global__
#define __device__
#define __host__
#endif
void cudaLog(const char *str)
{
std::cout<<str<<std::endl;
}
__global__ void device_bilinearResize(int origImgWidth,int origImgHeight,int newImgWidth,decimal_t origRatio,int blocksPerLine,uint32_t *origImgData,uint32_t *newImgData)
{
// Each block is responsible for 1024 px of a single row. Do not use shared memory, as we cannot predict the distances between two lines we need in the source image.
int threadId=threadIdx.x;
int blockId=blockIdx.x;
int newY=(blockId-(blockId%blocksPerLine))/blocksPerLine;
int blockIdInLine=blockId-newY*blocksPerLine;
int newX=blockIdInLine*CUDA_THREADS_PER_BLOCK+threadId;
if(newX>=newImgWidth)
return;
// y-specific:
decimal_t oldYRF;
int oldY;
decimal_t oldYF;
decimal_t yDiff;
decimal_t yDiffR;
oldYRF=origRatio*(decimal_t)newY;
oldY=(int)floor(oldYRF);
oldYF=(decimal_t)oldY;
yDiff=oldYRF-oldYF;
yDiffR=1.0-yDiff;
// x-specific:
decimal_t oldXRF=origRatio*(decimal_t)newX;
int oldX=(int)floor(oldXRF);
decimal_t oldXF=(decimal_t)oldX;
decimal_t xDiff=oldXRF-oldXF;
decimal_t xDiffR=1.0-xDiff;
// Each thread is responsible for a single pixel in the new image
uint32_t c00,c01,c10,c11;
int xLim=origImgWidth-1;
int yLim=origImgHeight-1;
const int currentLineOffset= oldY*origImgWidth;
const int lineBelowOffset=(oldY==yLim?oldY:oldY+1)*origImgWidth;
int oldXPlusOne=(oldX==xLim?oldX:oldX+1);
c00=origImgData[currentLineOffset+oldX];
c10=origImgData[currentLineOffset+oldXPlusOne];
c01=origImgData[lineBelowOffset+oldX];
c11=origImgData[lineBelowOffset+oldXPlusOne];
decimal_t w1=xDiffR*yDiffR;
decimal_t w2=xDiff*yDiffR;
decimal_t w3=xDiffR*yDiff;
decimal_t w4=xDiff*yDiff;
uint32_t newAlpha=bilinearInterpolate(getAlpha(c00),getAlpha(c01),getAlpha(c10),getAlpha(c11),w1,w2,w3,w4);
uint32_t newRed=bilinearInterpolate(getRed(c00),getRed(c01),getRed(c10),getRed(c11),w1,w2,w3,w4);
uint32_t newGreen=bilinearInterpolate(getGreen(c00),getGreen(c01),getGreen(c10),getGreen(c11),w1,w2,w3,w4);
uint32_t newBlue=bilinearInterpolate(getBlue(c00),getBlue(c01),getBlue(c10),getBlue(c11),w1,w2,w3,w4);
newImgData[newY*newImgWidth+newX]=getColor(newAlpha,newRed,newGreen,newBlue);
}
__global__ void device_nearestNeighborResize(int origImgWidth,int newImgWidth,decimal_t origRatio,int blocksPerLine,uint32_t *origImgData,uint32_t *newImgData)
{
// Each block is responsible for 1024 px of a single row. Do not use shared memory, as we cannot predict the distances between two lines we need in the source image.
int threadId=threadIdx.x;
int blockId=blockIdx.x;
int newY=(blockId-(blockId%blocksPerLine))/blocksPerLine;
int blockIdInLine=blockId-newY*blocksPerLine;
int newX=blockIdInLine*CUDA_THREADS_PER_BLOCK+threadId;
if(newX>=newImgWidth)
return;
int oldX=(int)round(origRatio*((decimal_t)newX));
int oldY=(int)round(origRatio*((decimal_t)newY));
newImgData[newY*newImgWidth+newX]=origImgData[oldY*origImgWidth+oldX];
}
uint32_t *cudaNearestNeighborResize(uint32_t *origImageData,int origImgWidth,int origImgHeight,int newImgWidth,int newImgHeight,decimal_t resizeRatio)
{
int blocksPerLine=ceil(floatDiv(newImgWidth,CUDA_THREADS_PER_BLOCK));
int totalNumBlocks=blocksPerLine*newImgHeight; // Each block is responsible for a single line
if(totalNumBlocks>65535)
return 0;
uint32_t *device_origImageData_in;
uint32_t *device_newImageData_out;
size_t origImageSize=origImgWidth*origImgHeight*sizeof(uint32_t);
size_t newImageSize=newImgWidth*newImgHeight*sizeof(uint32_t);
cudaMalloc(&device_origImageData_in,origImageSize);
cudaMalloc(&device_newImageData_out,newImageSize);
cudaMemcpy(device_origImageData_in,origImageData,origImageSize,cudaMemcpyHostToDevice);
decimal_t origRatio=decimalDiv(1.0,resizeRatio);
device_nearestNeighborResize<<<totalNumBlocks,CUDA_THREADS_PER_BLOCK>>>(origImgWidth,newImgWidth,origRatio,blocksPerLine,device_origImageData_in,device_newImageData_out);
uint32_t *newImageData=(uint32_t*)malloc(newImageSize);
cudaMemcpy(newImageData,device_newImageData_out,newImageSize,cudaMemcpyDeviceToHost);
cudaFree(device_newImageData_out);
cudaFree(device_origImageData_in);
return newImageData;
}
uint32_t *cudaBilinearResize(uint32_t *origImageData,int origImgWidth,int origImgHeight,int newImgWidth,int newImgHeight,decimal_t resizeRatio)
{
int blocksPerLine=ceil(floatDiv(newImgWidth,CUDA_THREADS_PER_BLOCK));
int totalNumBlocks=blocksPerLine*newImgHeight; // Each block is responsible for a single line
if(totalNumBlocks>65535)
return 0;
uint32_t *device_origImageData_in;
uint32_t *device_newImageData_out;
size_t origImageSize=origImgWidth*origImgHeight*sizeof(uint32_t);
size_t newImageSize=newImgWidth*newImgHeight*sizeof(uint32_t);
cudaMalloc(&device_origImageData_in,origImageSize);
cudaMalloc(&device_newImageData_out,newImageSize);
cudaMemcpy(device_origImageData_in,origImageData,origImageSize,cudaMemcpyHostToDevice);
decimal_t origRatio=decimalDiv(1.0,resizeRatio);
device_bilinearResize<<<totalNumBlocks,CUDA_THREADS_PER_BLOCK>>>(origImgWidth,origImgHeight,newImgWidth,origRatio,blocksPerLine,device_origImageData_in,device_newImageData_out);
uint32_t *newImageData=(uint32_t*)malloc(newImageSize);
cudaMemcpy(newImageData,device_newImageData_out,newImageSize,cudaMemcpyDeviceToHost);
cudaFree(device_newImageData_out);
cudaFree(device_origImageData_in);
return newImageData;
}
|
45fbd7ab54dfca271d9dc1614bb52511042c52e5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 1024
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
double* h_A;
double* h_B;
double* h_C;
double* d_A;
double* d_B;
double* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(double*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const double* A, const double* B, double* C, unsigned long long N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
double Value1;
double Value2;
double Value3;
double Value;
double I1=A[i];
double I2=B[i];
#pragma unroll 1000
// Excessive Addition access
for(unsigned long long k=0; k<N;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
// Value1=I1*I2;
// Value3=Value1*I1;
// Value2=Value3*Value1;
// Value3*=Value2;
// Value1*=Value2;
// Value3*=Value1;
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
__syncthreads();
}
int main(int argc, char** argv)
{
unsigned long long iterations;
unsigned blocks;
if (argc != 3){
fprintf(stderr,"usage: %s #iterations #cores\n",argv[0]);
exit(1);
}
else {
iterations = atoll(argv[1]);
blocks = atoi(argv[2]);
}
printf("Power Microbenchmarks with iterations %llu\n",iterations);
int N = THREADS_PER_BLOCK*blocks;
size_t size = N * sizeof(double);
// Allocate input vectors h_A and h_B in host memory
h_A = (double*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (double*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (double*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(blocks,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random double entries.
void RandomInit(double* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| 45fbd7ab54dfca271d9dc1614bb52511042c52e5.cu | #include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 1024
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
double* h_A;
double* h_B;
double* h_C;
double* d_A;
double* d_B;
double* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(double*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const double* A, const double* B, double* C, unsigned long long N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
double Value1;
double Value2;
double Value3;
double Value;
double I1=A[i];
double I2=B[i];
#pragma unroll 1000
// Excessive Addition access
for(unsigned long long k=0; k<N;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
// Value1=I1*I2;
// Value3=Value1*I1;
// Value2=Value3*Value1;
// Value3*=Value2;
// Value1*=Value2;
// Value3*=Value1;
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
__syncthreads();
}
int main(int argc, char** argv)
{
unsigned long long iterations;
unsigned blocks;
if (argc != 3){
fprintf(stderr,"usage: %s #iterations #cores\n",argv[0]);
exit(1);
}
else {
iterations = atoll(argv[1]);
blocks = atoi(argv[2]);
}
printf("Power Microbenchmarks with iterations %llu\n",iterations);
int N = THREADS_PER_BLOCK*blocks;
size_t size = N * sizeof(double);
// Allocate input vectors h_A and h_B in host memory
h_A = (double*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (double*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (double*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(blocks,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random double entries.
void RandomInit(double* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
f1d631e124f085984115c31a83a8cd4636245030.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "corrector.cuh"
#include <math.h>
#include <stdio.h>
__global__ void corrector(float* uStar, float* vStar, float* uCorr, float* vCorr, float* P, int N, float dx, float dy, float dt){
int i = threadIdx.x, j = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, dim = blockDim.x;
int col = bx*dim + i;
int row = by*dim + j;
int index = row*(N+1) + col;
int indexV = row*N + col;
int indexP = row*(N+1) + col;
if (index >= N*(N+1))
return;
bool boundCheckU = ((index > N) && (index < N*(N+1)-N-1) && (index%(N+1) !=0) && (index%(N+1)!=N));
bool boundCheckV = ((indexV > N-1) && (indexV < N*(N+1)-N) && (indexV%N != 0) && (indexV%N != N-1));
if(boundCheckU){
uCorr[index] = uStar[index] - dt/dx*(P[indexP+N+1]-P[indexP]);
__syncthreads();
}
if(boundCheckV){
vCorr[indexV] = vStar[indexV] - dt/dy*(P[indexP+1]-P[indexP]);
__syncthreads();
}
}
| f1d631e124f085984115c31a83a8cd4636245030.cu | #include "corrector.cuh"
#include <math.h>
#include <stdio.h>
__global__ void corrector(float* uStar, float* vStar, float* uCorr, float* vCorr, float* P, int N, float dx, float dy, float dt){
int i = threadIdx.x, j = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, dim = blockDim.x;
int col = bx*dim + i;
int row = by*dim + j;
int index = row*(N+1) + col;
int indexV = row*N + col;
int indexP = row*(N+1) + col;
if (index >= N*(N+1))
return;
bool boundCheckU = ((index > N) && (index < N*(N+1)-N-1) && (index%(N+1) !=0) && (index%(N+1)!=N));
bool boundCheckV = ((indexV > N-1) && (indexV < N*(N+1)-N) && (indexV%N != 0) && (indexV%N != N-1));
if(boundCheckU){
uCorr[index] = uStar[index] - dt/dx*(P[indexP+N+1]-P[indexP]);
__syncthreads();
}
if(boundCheckV){
vCorr[indexV] = vStar[indexV] - dt/dy*(P[indexP+1]-P[indexP]);
__syncthreads();
}
}
|
721edc5ce138d8e99642c5ed0f4356ee898dfb6a.hip | // !!! This is a file automatically generated by hipify!!!
#include"CUDARender.cuh"
#include"cuda.h"
#include<iostream>
#include<hipfft.h>
#include"rocblas.h"
#include<stdio.h>
#include<stdlib.h>
bool CUDARender::Init(HDC dc)
{
MainDC = GetDC(hwnd);
MemoryDC = CreateCompatibleDC(MainDC);
BITMAPINFO ScreenDesc{};
ScreenDesc.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
ScreenDesc.bmiHeader.biWidth = WIDTH;
ScreenDesc.bmiHeader.biHeight = -HEIGHT;
ScreenDesc.bmiHeader.biPlanes = 1;
ScreenDesc.bmiHeader.biBitCount = BITS_PER_PIXEL;
ScreenDesc.bmiHeader.biCompression = BI_RGB;
DIBitmap = CreateDIBSection(MemoryDC, &ScreenDesc, DIB_RGB_COLORS, (LPVOID*)&ScreenBits, NULL, NULL);
OldDIBitmap = (HBITMAP)SelectObject(MemoryDC, DIBitmap);
Timer = time(NULL);
return true;
}
bool CUDARender::Update()
{
// Clear(VColor(0.0f,0.0f,0.0f));
if (RenderDone)
return false;
Vector3 LowLeftCorner(-2.0f, -1.5f, -1.0f);
Hittable* List[5];
Camera Cam(Vector3(-2, 2, 1), Vector3(0, 0, -1), Vector3(0, 1, 0), 60, ASPECTRATIO);
float R = cos(M_PI / 4);
List[0] = new Sphere(Vector3(0, 0, -1), 0.5f, new Lambertian(Vector3(0.1f, 0.2f, 0.5f)));
List[1] = new Sphere(Vector3(0, -100.5f, -1), 100.0f, new Lambertian(Vector3(0.8f, 0.8f, 0.0f)));
List[2] = new Sphere(Vector3(1, 0, -1), 0.5f, new Metal(Vector3(0.8f, 0.6f, 0.2f), 0.3f));
List[3] = new Sphere(Vector3(-1, 0, -1), 0.5f, new Dielectric(1.5f));
List[4] = new Sphere(Vector3(-1, 0, -1), -0.45f, new Dielectric(1.5f));
Hittable* World = new HittableList(List, 5);
// .
// (800, 600) 4:3 .
Vector3 Horizontal(4.0f, 0.0f, 0.0f);
Vector3 Vertical(0.0f, 3.0f, 0.0f);
Vector3 Origin(0.0f, 0.0f, 0.0f);
//OutImage << "P3\n" << WIDTH << " " << HEIGHT << "\n255\n";
for (int y = HEIGHT - 1; y >= 0; y--)
{
for (int x = 0; x < WIDTH; x++)
{
Vector3 color(0.0f, 0.0f, 0.0f);
for (int s = 0; s < SAMPLE; s++)
{
float u = float(x + random_double()) / float(WIDTH);
float v = float(y + random_double()) / float(HEIGHT);
Ray ray = Cam.GetRay(u, v);
color += Color(ray, World, 0);
}
color /= float(SAMPLE);
color = Vector3(sqrt(color[0]), sqrt(color[1]), sqrt(color[2]));
int ir = int(255.99*color[0]);
int ig = int(255.99*color[1]);
int ib = int(255.99*color[2]);
ScreenColors[y][x].SetRGB(ir, ig, ib);
}
}
ElapsedTime = (double)(time(NULL) - Timer);
//OutImage.close();
return true;
}
bool CUDARender::Render()
{
SwapBuffer();
if (RenderDone)
return false;
for (int y = 0; y < HEIGHT; y++)
{
for (int x = 0; x < WIDTH; x++)
{
CurrentColor.SetRGB(ScreenColors[HEIGHT - y][x]);
SetPixel(x, y);
}
}
printf("%f", ElapsedTime);
RenderDone = true;
return true;
}
bool CUDARender::Release()
{
SelectObject(MemoryDC, OldDIBitmap);
DeleteObject(DIBitmap);
ReleaseDC(hwnd, MemoryDC);
return true;
}
Vector3 CUDARender::Color(const Ray & ray, Hittable * World, int depth)
{
HitRecord rec;
if (World->hit(ray, 0.001f, FLT_MAX, rec))
{
Ray scattered;
Vector3 attenuation;
if (depth < 50 && rec.matptr->Scatter(ray, rec, attenuation, scattered))
return attenuation * Color(scattered, World, depth + 1);
else
return Vector3(0, 0, 0);
Vector3 Target = rec.p + rec.normal + RandomInUnitSphere();
return 0.5f* Color(Ray(rec.p, Target - rec.p), World, 0);
}
else
{
Vector3 Direction = unit_vector(ray.Direction());
float t = 0.5f * (Direction.y() + 1.0f);
return (1.0f - t) * Vector3(1.0f, 1.0f, 1.0f) + t * Vector3(0.5f, 0.7f, 1.0f);
}
}
float CUDARender::HitSphere(const Vector3 & center, float radius, const Ray & ray)
{
Vector3 oc = ray.Origin() - center;
float a = dot(ray.Direction(), ray.Direction());
float b = 2.0f * dot(oc, ray.Direction());
float c = dot(oc, oc) - radius * radius;
float discriminant = b * b - 4 * a*c;
if (discriminant < 0)
return -1.0;
else
return (-b - sqrt(discriminant)) / (2.0*a);
}
void CUDARender::Clear(VColor ClearColor)
{
UINT Offset = 0;
while (Offset < BytesPerScanline)
{
*((DWORD*)(ScreenBits + Offset)) = *(DWORD*)ClearColor.GetRGB(); // 0x00000000
Offset += BYTES_PER_PIXEL;
}
Offset = BytesPerScanline;
for (int i = 0; i < HEIGHT - 1; i++)
{
memcpy(ScreenBits + Offset, ScreenBits, BytesPerScanline);
Offset += BytesPerScanline;
}
}
void CUDARender::SwapBuffer()
{
BitBlt(MainDC, 0, 0, WIDTH, HEIGHT, MemoryDC, 0, 0, SRCCOPY);
}
void CUDARender::SetPixel(UINT x, UINT y)
{
if (!IsInScreen(x, y))
return;
int Offset = (y * BytesPerScanline) + (x * BYTES_PER_PIXEL);
*(ScreenBits + Offset + 0) = CurrentColor._0;
*(ScreenBits + Offset + 1) = CurrentColor._1;
*(ScreenBits + Offset + 2) = CurrentColor._2;
}
bool CUDARender::IsInScreen(UINT x, UINT y)
{
if (x > WIDTH || x < 0 || y > HEIGHT || y < 0)
return false;
return true;
}
CUDARender::CUDARender()
{
}
CUDARender::~CUDARender()
{
}
| 721edc5ce138d8e99642c5ed0f4356ee898dfb6a.cu | #include"CUDARender.cuh"
#include"cuda.h"
#include<iostream>
#include<cufft.h>
#include"cublas_v2.h"
#include<stdio.h>
#include<stdlib.h>
bool CUDARender::Init(HDC dc)
{
MainDC = GetDC(hwnd);
MemoryDC = CreateCompatibleDC(MainDC);
BITMAPINFO ScreenDesc{};
ScreenDesc.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
ScreenDesc.bmiHeader.biWidth = WIDTH;
ScreenDesc.bmiHeader.biHeight = -HEIGHT;
ScreenDesc.bmiHeader.biPlanes = 1;
ScreenDesc.bmiHeader.biBitCount = BITS_PER_PIXEL;
ScreenDesc.bmiHeader.biCompression = BI_RGB;
DIBitmap = CreateDIBSection(MemoryDC, &ScreenDesc, DIB_RGB_COLORS, (LPVOID*)&ScreenBits, NULL, NULL);
OldDIBitmap = (HBITMAP)SelectObject(MemoryDC, DIBitmap);
Timer = time(NULL);
return true;
}
bool CUDARender::Update()
{
// Clear(VColor(0.0f,0.0f,0.0f));
if (RenderDone)
return false;
Vector3 LowLeftCorner(-2.0f, -1.5f, -1.0f);
Hittable* List[5];
Camera Cam(Vector3(-2, 2, 1), Vector3(0, 0, -1), Vector3(0, 1, 0), 60, ASPECTRATIO);
float R = cos(M_PI / 4);
List[0] = new Sphere(Vector3(0, 0, -1), 0.5f, new Lambertian(Vector3(0.1f, 0.2f, 0.5f)));
List[1] = new Sphere(Vector3(0, -100.5f, -1), 100.0f, new Lambertian(Vector3(0.8f, 0.8f, 0.0f)));
List[2] = new Sphere(Vector3(1, 0, -1), 0.5f, new Metal(Vector3(0.8f, 0.6f, 0.2f), 0.3f));
List[3] = new Sphere(Vector3(-1, 0, -1), 0.5f, new Dielectric(1.5f));
List[4] = new Sphere(Vector3(-1, 0, -1), -0.45f, new Dielectric(1.5f));
Hittable* World = new HittableList(List, 5);
// 수직과 수평 벡터의 비율은 원하는 뷰포트 비율로 설정한다.
// 나의 경우 (800, 600) 이므로 4:3 비율로 정하였다.
Vector3 Horizontal(4.0f, 0.0f, 0.0f);
Vector3 Vertical(0.0f, 3.0f, 0.0f);
Vector3 Origin(0.0f, 0.0f, 0.0f);
//OutImage << "P3\n" << WIDTH << " " << HEIGHT << "\n255\n";
for (int y = HEIGHT - 1; y >= 0; y--)
{
for (int x = 0; x < WIDTH; x++)
{
Vector3 color(0.0f, 0.0f, 0.0f);
for (int s = 0; s < SAMPLE; s++)
{
float u = float(x + random_double()) / float(WIDTH);
float v = float(y + random_double()) / float(HEIGHT);
Ray ray = Cam.GetRay(u, v);
color += Color(ray, World, 0);
}
color /= float(SAMPLE);
color = Vector3(sqrt(color[0]), sqrt(color[1]), sqrt(color[2]));
int ir = int(255.99*color[0]);
int ig = int(255.99*color[1]);
int ib = int(255.99*color[2]);
ScreenColors[y][x].SetRGB(ir, ig, ib);
}
}
ElapsedTime = (double)(time(NULL) - Timer);
//OutImage.close();
return true;
}
bool CUDARender::Render()
{
SwapBuffer();
if (RenderDone)
return false;
for (int y = 0; y < HEIGHT; y++)
{
for (int x = 0; x < WIDTH; x++)
{
CurrentColor.SetRGB(ScreenColors[HEIGHT - y][x]);
SetPixel(x, y);
}
}
printf("%f", ElapsedTime);
RenderDone = true;
return true;
}
bool CUDARender::Release()
{
SelectObject(MemoryDC, OldDIBitmap);
DeleteObject(DIBitmap);
ReleaseDC(hwnd, MemoryDC);
return true;
}
Vector3 CUDARender::Color(const Ray & ray, Hittable * World, int depth)
{
HitRecord rec;
if (World->hit(ray, 0.001f, FLT_MAX, rec))
{
Ray scattered;
Vector3 attenuation;
if (depth < 50 && rec.matptr->Scatter(ray, rec, attenuation, scattered))
return attenuation * Color(scattered, World, depth + 1);
else
return Vector3(0, 0, 0);
Vector3 Target = rec.p + rec.normal + RandomInUnitSphere();
return 0.5f* Color(Ray(rec.p, Target - rec.p), World, 0);
}
else
{
Vector3 Direction = unit_vector(ray.Direction());
float t = 0.5f * (Direction.y() + 1.0f);
return (1.0f - t) * Vector3(1.0f, 1.0f, 1.0f) + t * Vector3(0.5f, 0.7f, 1.0f);
}
}
float CUDARender::HitSphere(const Vector3 & center, float radius, const Ray & ray)
{
Vector3 oc = ray.Origin() - center;
float a = dot(ray.Direction(), ray.Direction());
float b = 2.0f * dot(oc, ray.Direction());
float c = dot(oc, oc) - radius * radius;
float discriminant = b * b - 4 * a*c;
if (discriminant < 0)
return -1.0;
else
return (-b - sqrt(discriminant)) / (2.0*a);
}
void CUDARender::Clear(VColor ClearColor)
{
UINT Offset = 0;
while (Offset < BytesPerScanline)
{
*((DWORD*)(ScreenBits + Offset)) = *(DWORD*)ClearColor.GetRGB(); // 0x00000000
Offset += BYTES_PER_PIXEL;
}
Offset = BytesPerScanline;
for (int i = 0; i < HEIGHT - 1; i++)
{
memcpy(ScreenBits + Offset, ScreenBits, BytesPerScanline);
Offset += BytesPerScanline;
}
}
void CUDARender::SwapBuffer()
{
BitBlt(MainDC, 0, 0, WIDTH, HEIGHT, MemoryDC, 0, 0, SRCCOPY);
}
void CUDARender::SetPixel(UINT x, UINT y)
{
if (!IsInScreen(x, y))
return;
int Offset = (y * BytesPerScanline) + (x * BYTES_PER_PIXEL);
*(ScreenBits + Offset + 0) = CurrentColor._0;
*(ScreenBits + Offset + 1) = CurrentColor._1;
*(ScreenBits + Offset + 2) = CurrentColor._2;
}
bool CUDARender::IsInScreen(UINT x, UINT y)
{
if (x > WIDTH || x < 0 || y > HEIGHT || y < 0)
return false;
return true;
}
CUDARender::CUDARender()
{
}
CUDARender::~CUDARender()
{
}
|
0ac423d1e73a1decb7b4108a0acad05c4be5fcfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************
*
* File: okuyama_host.cu
* Author: Alex Stivala
* Created: February 2011
*
* $Id: okuyama_host.cu 224 2011-04-13 06:09:10Z astivala $
*
* CUDA host code for multiple-source shortest path implementation using
* CUDA based on:
*
* Okuyama, Ino, Hagihara 2008 "A Task Parallel Algorithm for Computing
* the costs of All-Pairs Shortest Paths on the CUDA-compatible GPU"
* Intl. Symp. Parallel Distributed Processing with Applications. 284-291
*
* Developed on CUDA 2.3.
* Requires device compute capabillity at least 1.2 (uses 64 bit atomic
* functions).
*
****************************************************************************/
#include <assert.h>
#include <cutil_inline.h> /* CUDA SDK */
#include "sssp.h"
#include "okuyama_kernels.h"
#include "okuyama_host.h"
#undef TIMER_DEBUG
/*
* okuyama_sssp() - multiple-source shortest path by Okuyama et al algorithm
*
* Parameters:
* Va, Ea, Wa - graph in packed adjacency list represention
* num_nodes - number of nodes (elemnts in Va)
* num_edges - number of edges (elements in Ea, Wa)
* start_nodes - array of source nodes
* num_start_nodes - number of source nodes (elements in start_nodes)
* distances (OUT) - 2d array of shortest costs from sources to each node
* distances[i*num_nodes+j] is cost from sourc j to node i
* predecessors (OUT) - 2d array (as above)
* of predecessor nodes for each node
*
* Return value:
* None.
*
* Each block is responsible for all start nodes (singe source shortest path
* problems) for a single vertex.
*
*/
void okuyama_sssp(int Va[], int Ea[], float Wa[],
int num_nodes, int num_edges,
int start_nodes[], int num_start_nodes,
float *distances, int *predecessors)
{
int *d_Va = NULL, *d_Ea;
float *d_Wa;
bool *d_Ma;
float *d_Ca;
volatile cost_node_pair_t *d_Ua;
int *d_start_nodes;
int *d_Pa;
unsigned int hTimer;
double copytime, runtime;
// assert(num_nodes + 1 < MAX_CONSTANT_NODES); // TODO handle this if too many
// using constant memory for this makes little improvement anyway
dim3 dimBlock(num_start_nodes); // threads per block
dim3 dimGrid(num_nodes); // blocks (per grid)
if (num_start_nodes > 512) // FIXME some rule for this
dimBlock = dim3(512); // kernel will handle multiple start nodes per thread
if (num_nodes > 65535) // FIXME some rule for this - 65535 is Fermi max */
dimGrid = dim3(65535);
fprintf(stdout, "Execution configuration: Grid = (%d,%d,%d) Block = (%d,%d,%d)\n", dimGrid.x,dimGrid.y,dimGrid.z, dimBlock.x,dimBlock.y,dimBlock.z);
// allocate arrays for packed adjancey list format and
// copy graph in packed adjacney list format to device
// also start nodes list
cutilSafeCall( hipMalloc((void **)&d_Va, (num_nodes+1)*sizeof(int)) );
cutilSafeCall( hipMalloc((void **)&d_Ea, num_edges*sizeof(int)) );
cutilSafeCall( hipMalloc((void **)&d_Wa, num_edges*sizeof(float)) );
cutilSafeCall( hipMalloc((void **)&d_start_nodes, num_start_nodes*sizeof(int)) );
printf("%d nodes (%d KB) %d edges (%d KB)\n",
num_nodes,
( (num_nodes+1)*sizeof(int) ) / 1024,
num_edges,
( num_edges*sizeof(int) + num_edges*sizeof(float) ) / 1024);
cutilCheckError( cutCreateTimer(&hTimer) );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
cutilSafeCall( hipMemcpy(d_Va, Va, (num_nodes+1)*sizeof(int),
hipMemcpyHostToDevice) );
// copy Va to constant memory
// cutilSafeCall( hipMemcpyToSymbol("c_Va", Va, (num_nodes+1)*sizeof(int)) );
// and others to global memory
cutilSafeCall( hipMemcpy(d_Ea, Ea, num_edges*sizeof(int),
hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_Wa, Wa, num_edges*sizeof(float),
hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_start_nodes, start_nodes, num_start_nodes*sizeof(int),
hipMemcpyHostToDevice) );
cutilCheckError( cutStopTimer(hTimer) );
copytime = cutGetTimerValue(hTimer);
printf("time to copy %d nodes %d edges (total %d KB) to device: %f ms\n",
num_nodes, num_edges,
( (num_nodes+1)*sizeof(int) + num_edges*sizeof(int) +
num_edges*sizeof(float) ) / 1024,
copytime);
// allocate arrays for modification set, cost, updated cost
// and predecessor arrays
cutilSafeCall( hipMalloc((void **)&d_Ma,
num_nodes*num_start_nodes*sizeof(bool)) );
cutilSafeCall( hipMalloc((void **)&d_Ca, num_nodes*num_start_nodes*
sizeof(float)) );
cutilSafeCall( hipMalloc((void **)&d_Ua, num_nodes*num_start_nodes*
sizeof(cost_node_pair_t)) );
cutilSafeCall( hipMalloc((void**)&d_Pa, num_nodes*num_start_nodes*
sizeof(int)) );
// initialize the modification set, cost, updated cost arrays on device
hipLaunchKernelGGL(( okuyama_init_mask_cost_update_arrays), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Ma, d_Ca,
(cost_node_pair_t *)d_Ua,
d_start_nodes,
num_nodes,
num_start_nodes,
d_Pa);
CUT_CHECK_ERROR("Kernel execution failed (okuyama_init_mask_cost_update_arrays)");
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
// execute scatter kernel followed by update kernel while modified nodes
unsigned int update_count = 1;
unsigned int iter_count = 0;
do
{
#ifdef TIMER_DEBUG
unsigned int tdhTimer;
cutilCheckError( cutCreateTimer(&tdhTimer) );
cutilCheckError( cutResetTimer(tdhTimer) );
cutilCheckError( cutStartTimer(tdhTimer) );
#endif /* TIMER_DEBUG */
hipLaunchKernelGGL(( okuyama_scatter_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Va,//NULL,//[use c_Va now] d_Va,
d_Ea, d_Wa,
d_Ma, d_Ca, d_Ua,
d_start_nodes,
num_nodes,
num_start_nodes,
d_Pa);
CUT_CHECK_ERROR("Kernel execution failed (okuyama_scatter_kernel)");
cutilSafeCall( hipDeviceSynchronize() );
#ifdef TIMER_DEBUG
cutilCheckError( cutStopTimer(tdhTimer) );
double scatter_time = cutGetTimerValue(tdhTimer);
fprintf(stderr, "okuyama_scatter_kernel time: %f ms\n", scatter_time);
#endif /* TIMER_DEBUG */
#ifdef TIMER_DEBUG
cutilCheckError( cutResetTimer(tdhTimer) );
cutilCheckError( cutStartTimer(tdhTimer) );
#endif /* TIMER_DEBUG */
hipLaunchKernelGGL(( okuyama_update_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
d_Ma, d_Ca,
(cost_node_pair_t *)d_Ua,
d_start_nodes,
num_nodes,
num_start_nodes,
d_Pa);
CUT_CHECK_ERROR("Kernel execution failed (okuyama_update_kernel)");
cutilSafeCall( hipDeviceSynchronize() );
#ifdef TIMER_DEBUG
cutilCheckError( cutStopTimer(tdhTimer) );
double update_time = cutGetTimerValue(tdhTimer);
fprintf(stderr, "okuyama_update_kernel time: %f ms\n", update_time);
#endif /* TIMER_DEBUG */
hipMemcpyFromSymbol(&update_count,"d_okuyama_update_count",
sizeof(unsigned int));
#ifdef DEBUG
fprintf(stderr, "okuyama_update_count = %d\n", update_count);
printf("iter_count = %d\n", iter_count);
#endif /* DEBUG */
iter_count++;
}
while (update_count > 0);
cutilCheckError( cutStopTimer(hTimer) );
runtime = cutGetTimerValue(hTimer);
printf("time to run %d iterations on device: %f ms\n",
iter_count, runtime);
// get the final costs and predecessor nodes back from the device
cutilSafeCall( hipMemcpy(distances, d_Ca,
num_nodes*num_start_nodes*sizeof(float),
hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy(predecessors, d_Pa,
num_nodes*num_start_nodes*sizeof(int),
hipMemcpyDeviceToHost) );
// free device memory
cutilSafeCall( hipFree(d_Pa) );
cutilSafeCall( hipFree(d_start_nodes) );
cutilSafeCall( hipFree(d_Va) );
cutilSafeCall( hipFree(d_Ea) );
cutilSafeCall( hipFree(d_Wa) );
cutilSafeCall( hipFree(d_Ca) );
cutilSafeCall( hipFree((void *)d_Ua) );
cutilSafeCall( hipFree(d_Ma) );
}
| 0ac423d1e73a1decb7b4108a0acad05c4be5fcfa.cu | /*****************************************************************************
*
* File: okuyama_host.cu
* Author: Alex Stivala
* Created: February 2011
*
* $Id: okuyama_host.cu 224 2011-04-13 06:09:10Z astivala $
*
* CUDA host code for multiple-source shortest path implementation using
* CUDA based on:
*
* Okuyama, Ino, Hagihara 2008 "A Task Parallel Algorithm for Computing
* the costs of All-Pairs Shortest Paths on the CUDA-compatible GPU"
* Intl. Symp. Parallel Distributed Processing with Applications. 284-291
*
* Developed on CUDA 2.3.
* Requires device compute capabillity at least 1.2 (uses 64 bit atomic
* functions).
*
****************************************************************************/
#include <assert.h>
#include <cutil_inline.h> /* CUDA SDK */
#include "sssp.h"
#include "okuyama_kernels.h"
#include "okuyama_host.h"
#undef TIMER_DEBUG
/*
* okuyama_sssp() - multiple-source shortest path by Okuyama et al algorithm
*
* Parameters:
* Va, Ea, Wa - graph in packed adjacency list represention
* num_nodes - number of nodes (elemnts in Va)
* num_edges - number of edges (elements in Ea, Wa)
* start_nodes - array of source nodes
* num_start_nodes - number of source nodes (elements in start_nodes)
* distances (OUT) - 2d array of shortest costs from sources to each node
* distances[i*num_nodes+j] is cost from sourc j to node i
* predecessors (OUT) - 2d array (as above)
* of predecessor nodes for each node
*
* Return value:
* None.
*
* Each block is responsible for all start nodes (singe source shortest path
* problems) for a single vertex.
*
*/
void okuyama_sssp(int Va[], int Ea[], float Wa[],
int num_nodes, int num_edges,
int start_nodes[], int num_start_nodes,
float *distances, int *predecessors)
{
int *d_Va = NULL, *d_Ea;
float *d_Wa;
bool *d_Ma;
float *d_Ca;
volatile cost_node_pair_t *d_Ua;
int *d_start_nodes;
int *d_Pa;
unsigned int hTimer;
double copytime, runtime;
// assert(num_nodes + 1 < MAX_CONSTANT_NODES); // TODO handle this if too many
// using constant memory for this makes little improvement anyway
dim3 dimBlock(num_start_nodes); // threads per block
dim3 dimGrid(num_nodes); // blocks (per grid)
if (num_start_nodes > 512) // FIXME some rule for this
dimBlock = dim3(512); // kernel will handle multiple start nodes per thread
if (num_nodes > 65535) // FIXME some rule for this - 65535 is Fermi max */
dimGrid = dim3(65535);
fprintf(stdout, "Execution configuration: Grid = (%d,%d,%d) Block = (%d,%d,%d)\n", dimGrid.x,dimGrid.y,dimGrid.z, dimBlock.x,dimBlock.y,dimBlock.z);
// allocate arrays for packed adjancey list format and
// copy graph in packed adjacney list format to device
// also start nodes list
cutilSafeCall( cudaMalloc((void **)&d_Va, (num_nodes+1)*sizeof(int)) );
cutilSafeCall( cudaMalloc((void **)&d_Ea, num_edges*sizeof(int)) );
cutilSafeCall( cudaMalloc((void **)&d_Wa, num_edges*sizeof(float)) );
cutilSafeCall( cudaMalloc((void **)&d_start_nodes, num_start_nodes*sizeof(int)) );
printf("%d nodes (%d KB) %d edges (%d KB)\n",
num_nodes,
( (num_nodes+1)*sizeof(int) ) / 1024,
num_edges,
( num_edges*sizeof(int) + num_edges*sizeof(float) ) / 1024);
cutilCheckError( cutCreateTimer(&hTimer) );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
cutilSafeCall( cudaMemcpy(d_Va, Va, (num_nodes+1)*sizeof(int),
cudaMemcpyHostToDevice) );
// copy Va to constant memory
// cutilSafeCall( cudaMemcpyToSymbol("c_Va", Va, (num_nodes+1)*sizeof(int)) );
// and others to global memory
cutilSafeCall( cudaMemcpy(d_Ea, Ea, num_edges*sizeof(int),
cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_Wa, Wa, num_edges*sizeof(float),
cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_start_nodes, start_nodes, num_start_nodes*sizeof(int),
cudaMemcpyHostToDevice) );
cutilCheckError( cutStopTimer(hTimer) );
copytime = cutGetTimerValue(hTimer);
printf("time to copy %d nodes %d edges (total %d KB) to device: %f ms\n",
num_nodes, num_edges,
( (num_nodes+1)*sizeof(int) + num_edges*sizeof(int) +
num_edges*sizeof(float) ) / 1024,
copytime);
// allocate arrays for modification set, cost, updated cost
// and predecessor arrays
cutilSafeCall( cudaMalloc((void **)&d_Ma,
num_nodes*num_start_nodes*sizeof(bool)) );
cutilSafeCall( cudaMalloc((void **)&d_Ca, num_nodes*num_start_nodes*
sizeof(float)) );
cutilSafeCall( cudaMalloc((void **)&d_Ua, num_nodes*num_start_nodes*
sizeof(cost_node_pair_t)) );
cutilSafeCall( cudaMalloc((void**)&d_Pa, num_nodes*num_start_nodes*
sizeof(int)) );
// initialize the modification set, cost, updated cost arrays on device
okuyama_init_mask_cost_update_arrays<<<dimGrid, dimBlock>>>(d_Ma, d_Ca,
(cost_node_pair_t *)d_Ua,
d_start_nodes,
num_nodes,
num_start_nodes,
d_Pa);
CUT_CHECK_ERROR("Kernel execution failed (okuyama_init_mask_cost_update_arrays)");
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
// execute scatter kernel followed by update kernel while modified nodes
unsigned int update_count = 1;
unsigned int iter_count = 0;
do
{
#ifdef TIMER_DEBUG
unsigned int tdhTimer;
cutilCheckError( cutCreateTimer(&tdhTimer) );
cutilCheckError( cutResetTimer(tdhTimer) );
cutilCheckError( cutStartTimer(tdhTimer) );
#endif /* TIMER_DEBUG */
okuyama_scatter_kernel<<<dimGrid, dimBlock>>>(d_Va,//NULL,//[use c_Va now] d_Va,
d_Ea, d_Wa,
d_Ma, d_Ca, d_Ua,
d_start_nodes,
num_nodes,
num_start_nodes,
d_Pa);
CUT_CHECK_ERROR("Kernel execution failed (okuyama_scatter_kernel)");
cutilSafeCall( cudaThreadSynchronize() );
#ifdef TIMER_DEBUG
cutilCheckError( cutStopTimer(tdhTimer) );
double scatter_time = cutGetTimerValue(tdhTimer);
fprintf(stderr, "okuyama_scatter_kernel time: %f ms\n", scatter_time);
#endif /* TIMER_DEBUG */
#ifdef TIMER_DEBUG
cutilCheckError( cutResetTimer(tdhTimer) );
cutilCheckError( cutStartTimer(tdhTimer) );
#endif /* TIMER_DEBUG */
okuyama_update_kernel<<<dimGrid, dimBlock>>>(
d_Ma, d_Ca,
(cost_node_pair_t *)d_Ua,
d_start_nodes,
num_nodes,
num_start_nodes,
d_Pa);
CUT_CHECK_ERROR("Kernel execution failed (okuyama_update_kernel)");
cutilSafeCall( cudaThreadSynchronize() );
#ifdef TIMER_DEBUG
cutilCheckError( cutStopTimer(tdhTimer) );
double update_time = cutGetTimerValue(tdhTimer);
fprintf(stderr, "okuyama_update_kernel time: %f ms\n", update_time);
#endif /* TIMER_DEBUG */
cudaMemcpyFromSymbol(&update_count,"d_okuyama_update_count",
sizeof(unsigned int));
#ifdef DEBUG
fprintf(stderr, "okuyama_update_count = %d\n", update_count);
printf("iter_count = %d\n", iter_count);
#endif /* DEBUG */
iter_count++;
}
while (update_count > 0);
cutilCheckError( cutStopTimer(hTimer) );
runtime = cutGetTimerValue(hTimer);
printf("time to run %d iterations on device: %f ms\n",
iter_count, runtime);
// get the final costs and predecessor nodes back from the device
cutilSafeCall( cudaMemcpy(distances, d_Ca,
num_nodes*num_start_nodes*sizeof(float),
cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy(predecessors, d_Pa,
num_nodes*num_start_nodes*sizeof(int),
cudaMemcpyDeviceToHost) );
// free device memory
cutilSafeCall( cudaFree(d_Pa) );
cutilSafeCall( cudaFree(d_start_nodes) );
cutilSafeCall( cudaFree(d_Va) );
cutilSafeCall( cudaFree(d_Ea) );
cutilSafeCall( cudaFree(d_Wa) );
cutilSafeCall( cudaFree(d_Ca) );
cutilSafeCall( cudaFree((void *)d_Ua) );
cutilSafeCall( cudaFree(d_Ma) );
}
|
ca7894b2788abbd04cb48dac67cbcd31988373e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "parameters.cuh"
__global__ void d_tumble(point *ra, point *pos_colloid, point len, int no_of_colloid, hiprandState_t *state){
int i = blockDim.x*blockIdx.x + threadIdx.x + 1;
if(i <= no_of_colloid) {
ra[i] = img(pos_colloid[i] - ra[i].rand(&state[i])*len, len);
ra[i] = ra[i]/sqrt((ra[i]*ra[i]).sum());
}
}
void tumble() {
blk = dim3((no_of_colloid + thr.x - 1)/thr.x);
hipLaunchKernelGGL(( d_tumble), dim3(blk), dim3(thr), 0, 0, ra, pos_colloid, len, no_of_colloid, state);
}
__global__ void d_nbrc(point *ra, point *vel_colloid, point *pos_fl, point *pos_colloid, point len,
int *no_neigh, int **nbr, int **neigh_fl, int *cnt, int no_of_colloid, double v0, double sigma) {
point vector;
int j = blockIdx.x*blockDim.x + threadIdx.x + 1;
int i = blockIdx.y*blockDim.y + threadIdx.y + 1;
if(i <= no_of_colloid) {
vel_colloid[i] += ra[i]*v0, cnt[i] = 0;
if(j <= no_neigh[i]) {
vector = img(pos_fl[neigh_fl[i][j]] - pos_colloid[i], len);
if((vector*vector).sum() <= power(sigma*0.5+0.5, 2) && (vector*vel_colloid[i]).sum() <= 0)
nbr[atomicAdd(&cnt[i], 1) + 1][i] = neigh_fl[i][j];
}
}
}
__global__ void d_velc(point *ra, point *vel_fl, int **nbr, int *cnt, int no_of_colloid, double mass_colloid, double mass_fl, double v0) {
point del; double temp;
int i = blockIdx.x*blockDim.x + threadIdx.x + 1;
if(i <= no_of_colloid) {
del = ra[i]*v0, temp = mass_colloid/(mass_fl*cnt[i]);
for(int j = 1; j <= cnt[i]; j++)
vel_fl[nbr[j][i]] -= del*temp;
}
}
void run() {
blk = dim3((10000 + thr.x -1)/thr.x, (no_of_colloid + thr.y - 1)/thr.y);
hipLaunchKernelGGL(( d_nbrc), dim3(blk), dim3(thr), 0, 0, ra, vel_colloid, pos_fl, pos_colloid, len, no_neigh, nbr, neigh_fl, cnt, no_of_colloid, v0, sigma);
blk = dim3((no_of_colloid + thr.x - 1)/thr.x);
hipLaunchKernelGGL(( d_velc), dim3(blk), dim3(thr), 0, 0, ra, vel_fl, nbr, cnt, no_of_colloid, mass_colloid, mass_fl, v0);
}
__global__ void helper_upd(int no_of_colloid, int *cnt, int *up_cnt, int *no_neigh, point **vel, point **up_vel,
int **neigh_fl, point *pos_fl, point *pos_colloid, point *vel_colloid, point *vel_fl, point len, double sigma) {
point vector;
int i = blockIdx.x*blockDim.x + threadIdx.x + 1;
int j = blockIdx.y*blockDim.y + threadIdx.y + 1;
if(i <= no_of_colloid) {
if(j <= no_neigh[i]) {
vector = img(pos_fl[neigh_fl[i][j]] - pos_colloid[i], len);
if((vector*vector).sum() <= pow((sigma*0.5 + 0.5), 2) && (vector*vel_colloid[i]).sum() <= 0.0)
vel[i][atomicAdd(&cnt[i], 1) + 1] = vel_fl[neigh_fl[i][j]];
if((vector*vector).sum() <= pow((sigma*0.5 + 0.1), 2) && (vector*vel_colloid[i]).sum() <= 0.0)
up_vel[i][atomicAdd(&up_cnt[i], 1) + 1] = vel_fl[neigh_fl[i][j]];
}
}
}
__global__ void calc_upd(int no_of_colloid, int *cnt, int *up_cnt, point **vel, point **up_vel, point *vel_colloid) {
int i = blockIdx.x*blockDim.x + threadIdx.x + 1;
if(i <= no_of_colloid) {
vel[i][0] = thrust::reduce(thrust::device, vel[i], vel[i] + cnt[i] + 1, point(0, 0, 0), add_point());
vel[i][0] = (cnt[i])? vel[i][0]/cnt[i] - vel_colloid[i]: vel[i][0];
up_vel[i][0] = thrust::reduce(thrust::device, up_vel[i], up_vel[i] + up_cnt[i] + 1, point(0, 0, 0), add_point());
up_vel[i][0] = (up_cnt[i])? up_vel[i][0]/up_cnt[i] - vel_colloid[i]: up_vel[i][0];
}
}
void updown_velocity() {
blk = dim3((no_of_colloid + thr.x - 1)/thr.x);
hipLaunchKernelGGL(( imemset), dim3(blk), dim3(thr), 0, 0, cnt, no_of_colloid);
hipLaunchKernelGGL(( imemset), dim3(blk), dim3(thr), 0, 0, up_cnt, no_of_colloid);
hipLaunchKernelGGL(( helper_upd), dim3(blk), dim3(thr), 0, 0, no_of_colloid, cnt, up_cnt, no_neigh, vel, up_vel, neigh_fl, pos_fl,
pos_colloid, vel_colloid, vel_fl, len, sigma);
hipLaunchKernelGGL(( calc_upd), dim3(blk), dim3(thr), 0, 0, no_of_colloid, cnt, up_cnt, vel, up_vel, vel_colloid);
}
| ca7894b2788abbd04cb48dac67cbcd31988373e5.cu | #include "parameters.cuh"
__global__ void d_tumble(point *ra, point *pos_colloid, point len, int no_of_colloid, curandState_t *state){
int i = blockDim.x*blockIdx.x + threadIdx.x + 1;
if(i <= no_of_colloid) {
ra[i] = img(pos_colloid[i] - ra[i].rand(&state[i])*len, len);
ra[i] = ra[i]/sqrt((ra[i]*ra[i]).sum());
}
}
void tumble() {
blk = dim3((no_of_colloid + thr.x - 1)/thr.x);
d_tumble<<<blk, thr>>>(ra, pos_colloid, len, no_of_colloid, state);
}
__global__ void d_nbrc(point *ra, point *vel_colloid, point *pos_fl, point *pos_colloid, point len,
int *no_neigh, int **nbr, int **neigh_fl, int *cnt, int no_of_colloid, double v0, double sigma) {
point vector;
int j = blockIdx.x*blockDim.x + threadIdx.x + 1;
int i = blockIdx.y*blockDim.y + threadIdx.y + 1;
if(i <= no_of_colloid) {
vel_colloid[i] += ra[i]*v0, cnt[i] = 0;
if(j <= no_neigh[i]) {
vector = img(pos_fl[neigh_fl[i][j]] - pos_colloid[i], len);
if((vector*vector).sum() <= power(sigma*0.5+0.5, 2) && (vector*vel_colloid[i]).sum() <= 0)
nbr[atomicAdd(&cnt[i], 1) + 1][i] = neigh_fl[i][j];
}
}
}
__global__ void d_velc(point *ra, point *vel_fl, int **nbr, int *cnt, int no_of_colloid, double mass_colloid, double mass_fl, double v0) {
point del; double temp;
int i = blockIdx.x*blockDim.x + threadIdx.x + 1;
if(i <= no_of_colloid) {
del = ra[i]*v0, temp = mass_colloid/(mass_fl*cnt[i]);
for(int j = 1; j <= cnt[i]; j++)
vel_fl[nbr[j][i]] -= del*temp;
}
}
void run() {
blk = dim3((10000 + thr.x -1)/thr.x, (no_of_colloid + thr.y - 1)/thr.y);
d_nbrc<<<blk, thr>>>(ra, vel_colloid, pos_fl, pos_colloid, len, no_neigh, nbr, neigh_fl, cnt, no_of_colloid, v0, sigma);
blk = dim3((no_of_colloid + thr.x - 1)/thr.x);
d_velc<<<blk, thr>>>(ra, vel_fl, nbr, cnt, no_of_colloid, mass_colloid, mass_fl, v0);
}
__global__ void helper_upd(int no_of_colloid, int *cnt, int *up_cnt, int *no_neigh, point **vel, point **up_vel,
int **neigh_fl, point *pos_fl, point *pos_colloid, point *vel_colloid, point *vel_fl, point len, double sigma) {
point vector;
int i = blockIdx.x*blockDim.x + threadIdx.x + 1;
int j = blockIdx.y*blockDim.y + threadIdx.y + 1;
if(i <= no_of_colloid) {
if(j <= no_neigh[i]) {
vector = img(pos_fl[neigh_fl[i][j]] - pos_colloid[i], len);
if((vector*vector).sum() <= pow((sigma*0.5 + 0.5), 2) && (vector*vel_colloid[i]).sum() <= 0.0)
vel[i][atomicAdd(&cnt[i], 1) + 1] = vel_fl[neigh_fl[i][j]];
if((vector*vector).sum() <= pow((sigma*0.5 + 0.1), 2) && (vector*vel_colloid[i]).sum() <= 0.0)
up_vel[i][atomicAdd(&up_cnt[i], 1) + 1] = vel_fl[neigh_fl[i][j]];
}
}
}
__global__ void calc_upd(int no_of_colloid, int *cnt, int *up_cnt, point **vel, point **up_vel, point *vel_colloid) {
int i = blockIdx.x*blockDim.x + threadIdx.x + 1;
if(i <= no_of_colloid) {
vel[i][0] = thrust::reduce(thrust::device, vel[i], vel[i] + cnt[i] + 1, point(0, 0, 0), add_point());
vel[i][0] = (cnt[i])? vel[i][0]/cnt[i] - vel_colloid[i]: vel[i][0];
up_vel[i][0] = thrust::reduce(thrust::device, up_vel[i], up_vel[i] + up_cnt[i] + 1, point(0, 0, 0), add_point());
up_vel[i][0] = (up_cnt[i])? up_vel[i][0]/up_cnt[i] - vel_colloid[i]: up_vel[i][0];
}
}
void updown_velocity() {
blk = dim3((no_of_colloid + thr.x - 1)/thr.x);
imemset<<<blk, thr>>>(cnt, no_of_colloid);
imemset<<<blk, thr>>>(up_cnt, no_of_colloid);
helper_upd<<<blk, thr>>>(no_of_colloid, cnt, up_cnt, no_neigh, vel, up_vel, neigh_fl, pos_fl,
pos_colloid, vel_colloid, vel_fl, len, sigma);
calc_upd<<<blk, thr>>>(no_of_colloid, cnt, up_cnt, vel, up_vel, vel_colloid);
}
|
4607241d8764e2e8543aa908331b1a5d95a53eda.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaTimer.cuh"
#include <cudaDefs.h>
namespace nscommon {
CudaTimer::CudaTimer() : elapsed_{0}, running_{false} {
hipEventCreate(&startEvent_);
hipEventCreate(&stopEvent_);
}
CudaTimer::~CudaTimer() {
hipEventDestroy(startEvent_);
hipEventDestroy(stopEvent_);
}
void CudaTimer::start(hipStream_t stream) {
if (running_) {
throw std::runtime_error("Timer is already running");
}
running_ = true;
hipEventRecord(startEvent_, stream);
}
float CudaTimer::stop(hipStream_t stream) {
if (!running_) {
throw std::runtime_error("Timer is not running");
}
running_ = false;
hipEventRecord(stopEvent_, stream);
hipEventSynchronize(stopEvent_);
hipEventElapsedTime(&elapsed_, startEvent_, stopEvent_);
return elapsed_;
}
}
| 4607241d8764e2e8543aa908331b1a5d95a53eda.cu | #include "CudaTimer.cuh"
#include <cudaDefs.h>
namespace nscommon {
CudaTimer::CudaTimer() : elapsed_{0}, running_{false} {
cudaEventCreate(&startEvent_);
cudaEventCreate(&stopEvent_);
}
CudaTimer::~CudaTimer() {
cudaEventDestroy(startEvent_);
cudaEventDestroy(stopEvent_);
}
void CudaTimer::start(cudaStream_t stream) {
if (running_) {
throw std::runtime_error("Timer is already running");
}
running_ = true;
cudaEventRecord(startEvent_, stream);
}
float CudaTimer::stop(cudaStream_t stream) {
if (!running_) {
throw std::runtime_error("Timer is not running");
}
running_ = false;
cudaEventRecord(stopEvent_, stream);
cudaEventSynchronize(stopEvent_);
cudaEventElapsedTime(&elapsed_, startEvent_, stopEvent_);
return elapsed_;
}
}
|
8189dc68aae951d0ddb2dd3cd8757c5ff21ea86c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "Correlation.cuh"
#include "DeviceFunctions.cuh"
#include "IO.cuh"
#include "FFT.cuh"
#include "Helper.cuh"
#include <vector>
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
__global__ void LocalPeaksKernel(tfloat* d_input, float* d_output, int3 dims, int localextent, tfloat threshold);
template<int ndims, int rad> __global__ void SubpixelMaxKernel(tfloat* d_input, tfloat* d_output, int3 dims, float3 s);
////////////////////////////////////////////////
// Find local peaks above specified threshold //
////////////////////////////////////////////////
void d_LocalPeaks(tfloat* d_input, int3** h_peaks, int* h_peaksnum, int3 dims, int localextent, tfloat threshold, int batch)
{
int TpB = tmin(128, NextMultipleOf(dims.x, 32));
dim3 grid = dim3(tmin((dims.x + TpB - 1) / TpB, 32768), dims.y, dims.z);
float* h_output;
hipHostMalloc((void**)&h_output, Elements(dims) * sizeof(float));
std::vector<int3> peaks;
for (int b = 0; b < batch; b++)
{
peaks.clear();
float* d_output = CudaMallocValueFilled(Elements(dims), 0.0f);
LocalPeaksKernel << <grid, (uint)TpB >> > (d_input + Elements(dims) * b, d_output, dims, localextent, threshold);
hipDeviceSynchronize();
//d_WriteMRC(d_output, dims, "d_localpeaks.mrc");
hipMemcpy(h_output, d_output, Elements(dims) * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_output);
for (int z = 0; z < dims.z; z++)
for (int y = 0; y < dims.y; y++)
for (int x = 0; x < dims.x; x++)
if (h_output[(z * dims.y + y) * dims.x + x] > 0)
peaks.push_back(toInt3(x, y, z));
if (peaks.size() > 0)
{
h_peaks[b] = (int3*)malloc(peaks.size() * sizeof(int3));
memcpy(h_peaks[b], &peaks[0], peaks.size() * sizeof(int3));
}
h_peaksnum[b] = peaks.size();
}
hipHostFree(h_output);
}
void d_SubpixelMax(tfloat* d_input, tfloat* d_output, int3 dims, int subpixsteps)
{
int ndims = DimensionCount(dims);
float steplength = 1.0f / subpixsteps;
int TpB = 128;
dim3 grid = dim3((dims.x - 15 + TpB - 1) / TpB, dims.y - 15, ndims > 2 ? dims.z - 15 : 1);
for (int sz = 0; sz < (ndims == 3 ? subpixsteps : 1); sz++)
for (int sy = 0; sy < subpixsteps; sy++)
for (int sx = 0; sx < subpixsteps; sx++)
{
float3 s = make_float3(sx * steplength - 0.5f + steplength / 2,
sy * steplength - 0.5f + steplength / 2,
sz * steplength - 0.5f + steplength / 2);
if (ndims < 3)
s.z = 0;
if (ndims == 2)
SubpixelMaxKernel<2, 6> << <grid, TpB >> > (d_input, d_output, dims, s);
else if (ndims == 3)
SubpixelMaxKernel<3, 6> << <grid, TpB >> > (d_input, d_output, dims, s);
else
throw;
hipDeviceSynchronize();
}
}
////////////////
//CUDA kernels//
////////////////
__global__ void LocalPeaksKernel(tfloat* d_input, float* d_output, int3 dims, int localextent, tfloat threshold)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dims.x)
return;
int idy = blockIdx.y;
int idz = blockIdx.z;
tfloat value = d_input[(idz * dims.y + idy) * dims.x + idx];
if (value < threshold)
return;
int limx = tmin(dims.x - 1, idx + localextent);
int limy = tmin(dims.y - 1, idy + localextent);
int limz = tmin(dims.z - 1, idz + localextent);
int sqlocalextent = localextent * localextent;
int sqy, sqz;
int sqdist;
for (int z = tmax(0, idz - localextent); z <= limz; z++)
{
sqz = idz - z;
sqz *= sqz;
for (int y = tmax(0, idy - localextent); y <= limy; y++)
{
sqy = idy - y;
sqy *= sqy;
sqy += sqz;
for (int x = tmax(0, idx - localextent); x <= limx; x++)
{
sqdist = idx - x;
sqdist *= sqdist;
sqdist += sqy;
if (sqdist > sqlocalextent + 1e-5f || sqdist == 0)
continue;
if (value < d_input[(z * dims.y + y) * dims.x + x])
return;
}
}
}
d_output[(idz * dims.y + idy) * dims.x + idx] = 1.0f;
}
template<int ndims, int rad> __global__ void SubpixelMaxKernel(tfloat* d_input, tfloat* d_output, int3 dims, float3 s)
{
int cz = ndims == 3 ? blockIdx.z + rad : 0;
int cy = blockIdx.y + rad;
int cx = blockIdx.x * blockDim.x + threadIdx.x + rad;
if (cx >= dims.x - rad - 2 || cy >= dims.y - rad - 2)
return;
if (ndims == 3 && cz >= dims.z - rad - 2)
return;
tfloat sum = 0;
for (int z = (ndims == 3 ? -rad : 0); z <= (ndims == 3 ? rad : 0); z++)
{
float sincz = ndims == 3 ? sinc(s.z - z) : 1;
for (int y = -rad; y <= rad; y++)
{
float sincy = sinc(s.y - y);
for (int x = -rad; x <= rad; x++)
{
float sincx = sinc(s.x - x);
sum += d_input[((cz + z) * dims.y + cy + y) * dims.x + cx + x] * sincx * sincy * sincz;
}
}
}
d_output[(cz * dims.y + cy) * dims.x + cx] = tmax(d_output[(cz * dims.y + cy) * dims.x + cx], sum);
}
} | 8189dc68aae951d0ddb2dd3cd8757c5ff21ea86c.cu | #include "Prerequisites.cuh"
#include "Correlation.cuh"
#include "DeviceFunctions.cuh"
#include "IO.cuh"
#include "FFT.cuh"
#include "Helper.cuh"
#include <vector>
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
__global__ void LocalPeaksKernel(tfloat* d_input, float* d_output, int3 dims, int localextent, tfloat threshold);
template<int ndims, int rad> __global__ void SubpixelMaxKernel(tfloat* d_input, tfloat* d_output, int3 dims, float3 s);
////////////////////////////////////////////////
// Find local peaks above specified threshold //
////////////////////////////////////////////////
void d_LocalPeaks(tfloat* d_input, int3** h_peaks, int* h_peaksnum, int3 dims, int localextent, tfloat threshold, int batch)
{
int TpB = tmin(128, NextMultipleOf(dims.x, 32));
dim3 grid = dim3(tmin((dims.x + TpB - 1) / TpB, 32768), dims.y, dims.z);
float* h_output;
cudaMallocHost((void**)&h_output, Elements(dims) * sizeof(float));
std::vector<int3> peaks;
for (int b = 0; b < batch; b++)
{
peaks.clear();
float* d_output = CudaMallocValueFilled(Elements(dims), 0.0f);
LocalPeaksKernel << <grid, (uint)TpB >> > (d_input + Elements(dims) * b, d_output, dims, localextent, threshold);
cudaDeviceSynchronize();
//d_WriteMRC(d_output, dims, "d_localpeaks.mrc");
cudaMemcpy(h_output, d_output, Elements(dims) * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_output);
for (int z = 0; z < dims.z; z++)
for (int y = 0; y < dims.y; y++)
for (int x = 0; x < dims.x; x++)
if (h_output[(z * dims.y + y) * dims.x + x] > 0)
peaks.push_back(toInt3(x, y, z));
if (peaks.size() > 0)
{
h_peaks[b] = (int3*)malloc(peaks.size() * sizeof(int3));
memcpy(h_peaks[b], &peaks[0], peaks.size() * sizeof(int3));
}
h_peaksnum[b] = peaks.size();
}
cudaFreeHost(h_output);
}
void d_SubpixelMax(tfloat* d_input, tfloat* d_output, int3 dims, int subpixsteps)
{
int ndims = DimensionCount(dims);
float steplength = 1.0f / subpixsteps;
int TpB = 128;
dim3 grid = dim3((dims.x - 15 + TpB - 1) / TpB, dims.y - 15, ndims > 2 ? dims.z - 15 : 1);
for (int sz = 0; sz < (ndims == 3 ? subpixsteps : 1); sz++)
for (int sy = 0; sy < subpixsteps; sy++)
for (int sx = 0; sx < subpixsteps; sx++)
{
float3 s = make_float3(sx * steplength - 0.5f + steplength / 2,
sy * steplength - 0.5f + steplength / 2,
sz * steplength - 0.5f + steplength / 2);
if (ndims < 3)
s.z = 0;
if (ndims == 2)
SubpixelMaxKernel<2, 6> << <grid, TpB >> > (d_input, d_output, dims, s);
else if (ndims == 3)
SubpixelMaxKernel<3, 6> << <grid, TpB >> > (d_input, d_output, dims, s);
else
throw;
cudaDeviceSynchronize();
}
}
////////////////
//CUDA kernels//
////////////////
__global__ void LocalPeaksKernel(tfloat* d_input, float* d_output, int3 dims, int localextent, tfloat threshold)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dims.x)
return;
int idy = blockIdx.y;
int idz = blockIdx.z;
tfloat value = d_input[(idz * dims.y + idy) * dims.x + idx];
if (value < threshold)
return;
int limx = tmin(dims.x - 1, idx + localextent);
int limy = tmin(dims.y - 1, idy + localextent);
int limz = tmin(dims.z - 1, idz + localextent);
int sqlocalextent = localextent * localextent;
int sqy, sqz;
int sqdist;
for (int z = tmax(0, idz - localextent); z <= limz; z++)
{
sqz = idz - z;
sqz *= sqz;
for (int y = tmax(0, idy - localextent); y <= limy; y++)
{
sqy = idy - y;
sqy *= sqy;
sqy += sqz;
for (int x = tmax(0, idx - localextent); x <= limx; x++)
{
sqdist = idx - x;
sqdist *= sqdist;
sqdist += sqy;
if (sqdist > sqlocalextent + 1e-5f || sqdist == 0)
continue;
if (value < d_input[(z * dims.y + y) * dims.x + x])
return;
}
}
}
d_output[(idz * dims.y + idy) * dims.x + idx] = 1.0f;
}
template<int ndims, int rad> __global__ void SubpixelMaxKernel(tfloat* d_input, tfloat* d_output, int3 dims, float3 s)
{
int cz = ndims == 3 ? blockIdx.z + rad : 0;
int cy = blockIdx.y + rad;
int cx = blockIdx.x * blockDim.x + threadIdx.x + rad;
if (cx >= dims.x - rad - 2 || cy >= dims.y - rad - 2)
return;
if (ndims == 3 && cz >= dims.z - rad - 2)
return;
tfloat sum = 0;
for (int z = (ndims == 3 ? -rad : 0); z <= (ndims == 3 ? rad : 0); z++)
{
float sincz = ndims == 3 ? sinc(s.z - z) : 1;
for (int y = -rad; y <= rad; y++)
{
float sincy = sinc(s.y - y);
for (int x = -rad; x <= rad; x++)
{
float sincx = sinc(s.x - x);
sum += d_input[((cz + z) * dims.y + cy + y) * dims.x + cx + x] * sincx * sincy * sincz;
}
}
}
d_output[(cz * dims.y + cy) * dims.x + cx] = tmax(d_output[(cz * dims.y + cy) * dims.x + cx], sum);
}
} |
19192c5ac94261690a1b231554dc5cece5ee6bd0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
#include "../include/repeat2.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
#define NUM_OF_THREADS 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
int gcf(int a, int b)
{
if (a == 0) return b;
return gcf(b % a, a);
}
// Device code
const int page_size = 4; // Scale stride and arrays by page size.
__global__ void global_latency (unsigned int * my_array, int array_length, int iterations, unsigned * duration) {
unsigned sum_time = 0;
duration[0] = 0;
unsigned j=0;
unsigned LINESIZE= 1;
unsigned CACHESIZE= 4096;
unsigned LIMIT=0;
int m=0;
/*
// fill L1/L2 cache
for (int k=0; k<CACHESIZE; k+=LINESIZE){
m=k%array_length;
j+=my_array[m];
}
if (j>=array_length) j=0;
*/
int tid = blockDim.x * blockIdx.x + threadIdx.x;
j=tid;
for (int k = 0; k < iterations; k++) {
repeat1(j = my_array[j];)
// repeat1024(j=*(unsigned int **)j
}
//my_array[array_length] = (unsigned int)j;
//my_array[array_length+1] = (unsigned int) sum_time;
duration[0] = j;
}
void parametric_measure_global(int N, int iterations, int stride) {
int i;
int j=0;
unsigned int * h_a;
unsigned int * d_a;
unsigned * duration;
unsigned long long * latency;
unsigned long long latency_sum = 0;
// Don't die if too much memory was requested.
if (N > 650000000) { printf ("OOM.\n"); return; }
// allocate arrays on CPU
h_a = (unsigned int *)malloc(sizeof(unsigned int) * (N+2+NUM_OF_THREADS));
latency = (unsigned long long *)malloc(sizeof(unsigned long long));
// allocate arrays on GPU
hipMalloc ((void **) &d_a, sizeof(unsigned int) * (N+2+NUM_OF_THREADS));
hipMalloc ((void **) &duration, sizeof(unsigned long long));
// initialize array elements on CPU with pointers into d_a.
int step = gcf (stride, N); // Optimization: Initialize fewer elements.
for (i = 0; i < N; i += step) {
// Device pointers are 32-bit on GT200.
for (j=0; j<NUM_OF_THREADS; j++)
h_a[i+j] = ((i + j + stride) % N);
}
for (j=0; j<NUM_OF_THREADS; j++)
h_a[N+j] = j;
h_a[N+NUM_OF_THREADS] = 0;
hipDeviceSynchronize ();
// copy array elements from CPU to GPU
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned int) * N, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
// Launch a multiple of 10 iterations of the same kernel and take the average to eliminate interconnect (TPCs) effects
for (int l=0; l <1; l++) {
// launch kernel
dim3 Db = dim3(NUM_OF_THREADS);
dim3 Dg = dim3(1,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
hipLaunchKernelGGL(( global_latency) , dim3(Dg), dim3(Db), 0, 0, d_a,N, iterations, duration);
//global_latency <<<Dg, Db>>> ();
hipDeviceSynchronize ();
hipError_t error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error is %s\n", hipGetErrorString(error_id));
}
// copy results from GPU to CPU
hipDeviceSynchronize ();
//hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned int) * (N+2), hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long), hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
latency_sum+=latency[0];
}
// free memory on GPU
hipFree(d_a);
hipFree(duration);
hipDeviceSynchronize ();
// free memory on CPU
free(h_a);
free(latency);
// return 0;
}
// Host code
int main() {
printf("Assuming page size is %d KB\n", page_size);
// we will measure latency of global memory
// One thread that accesses an array.
// loads are dependent on the previously loaded values
int N, iterations, stride;
// initialize upper bounds here
int stride_upper_bound;
printf("Global1: Global memory latency for 1 KB array and varying strides.\n");
printf(" stride (bytes), latency (clocks)\n");
N= 536870912;
iterations = 40;
stride_upper_bound = N;
stride= 2048;
//for (stride = 1; stride <= (stride_upper_bound) ; stride+=1) {
// printf (" %5d, ", stride*4);
parametric_measure_global(N, iterations, stride);
//}
return 0;
}
| 19192c5ac94261690a1b231554dc5cece5ee6bd0.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
#include "../include/repeat2.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
#define NUM_OF_THREADS 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
int gcf(int a, int b)
{
if (a == 0) return b;
return gcf(b % a, a);
}
// Device code
const int page_size = 4; // Scale stride and arrays by page size.
__global__ void global_latency (unsigned int * my_array, int array_length, int iterations, unsigned * duration) {
unsigned sum_time = 0;
duration[0] = 0;
unsigned j=0;
unsigned LINESIZE= 1;
unsigned CACHESIZE= 4096;
unsigned LIMIT=0;
int m=0;
/*
// fill L1/L2 cache
for (int k=0; k<CACHESIZE; k+=LINESIZE){
m=k%array_length;
j+=my_array[m];
}
if (j>=array_length) j=0;
*/
int tid = blockDim.x * blockIdx.x + threadIdx.x;
j=tid;
for (int k = 0; k < iterations; k++) {
repeat1(j = my_array[j];)
// repeat1024(j=*(unsigned int **)j
}
//my_array[array_length] = (unsigned int)j;
//my_array[array_length+1] = (unsigned int) sum_time;
duration[0] = j;
}
void parametric_measure_global(int N, int iterations, int stride) {
int i;
int j=0;
unsigned int * h_a;
unsigned int * d_a;
unsigned * duration;
unsigned long long * latency;
unsigned long long latency_sum = 0;
// Don't die if too much memory was requested.
if (N > 650000000) { printf ("OOM.\n"); return; }
// allocate arrays on CPU
h_a = (unsigned int *)malloc(sizeof(unsigned int) * (N+2+NUM_OF_THREADS));
latency = (unsigned long long *)malloc(sizeof(unsigned long long));
// allocate arrays on GPU
cudaMalloc ((void **) &d_a, sizeof(unsigned int) * (N+2+NUM_OF_THREADS));
cudaMalloc ((void **) &duration, sizeof(unsigned long long));
// initialize array elements on CPU with pointers into d_a.
int step = gcf (stride, N); // Optimization: Initialize fewer elements.
for (i = 0; i < N; i += step) {
// Device pointers are 32-bit on GT200.
for (j=0; j<NUM_OF_THREADS; j++)
h_a[i+j] = ((i + j + stride) % N);
}
for (j=0; j<NUM_OF_THREADS; j++)
h_a[N+j] = j;
h_a[N+NUM_OF_THREADS] = 0;
cudaThreadSynchronize ();
// copy array elements from CPU to GPU
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned int) * N, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
// Launch a multiple of 10 iterations of the same kernel and take the average to eliminate interconnect (TPCs) effects
for (int l=0; l <1; l++) {
// launch kernel
dim3 Db = dim3(NUM_OF_THREADS);
dim3 Dg = dim3(1,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
global_latency <<<Dg, Db>>>(d_a,N, iterations, duration);
//global_latency <<<Dg, Db>>> ();
cudaThreadSynchronize ();
cudaError_t error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error is %s\n", cudaGetErrorString(error_id));
}
// copy results from GPU to CPU
cudaThreadSynchronize ();
//cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned int) * (N+2), cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
latency_sum+=latency[0];
}
// free memory on GPU
cudaFree(d_a);
cudaFree(duration);
cudaThreadSynchronize ();
// free memory on CPU
free(h_a);
free(latency);
// return 0;
}
// Host code
int main() {
printf("Assuming page size is %d KB\n", page_size);
// we will measure latency of global memory
// One thread that accesses an array.
// loads are dependent on the previously loaded values
int N, iterations, stride;
// initialize upper bounds here
int stride_upper_bound;
printf("Global1: Global memory latency for 1 KB array and varying strides.\n");
printf(" stride (bytes), latency (clocks)\n");
N= 536870912;
iterations = 40;
stride_upper_bound = N;
stride= 2048;
//for (stride = 1; stride <= (stride_upper_bound) ; stride+=1) {
// printf (" %5d, ", stride*4);
parametric_measure_global(N, iterations, stride);
//}
return 0;
}
|
a0dfec6caec93f668891cd8dbb48c8cb04b604cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layer.h"
#include <string.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#ifdef GPU
__global__
void bilinear_resize_gpu(const unsigned char* const img,
real* const input3d,
const int height, const int width,
const int resized_height, const int resized_width,
const real img_scale_y, const real img_scale_x,
const int stride)
{
const real gs_mean[3] = { 102.9801f, 115.9465f, 122.7717f };
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int resized_area = resized_height * resized_width;
if (index < 3 * resized_area) {
const int c = index / resized_area;
const int i = (index / resized_width) % resized_height;
const int j = index % resized_width;
const real y = i / img_scale_y;
const int y0 = (int)y;
const int y1 = MIN(y0 + 1, height - 1);
const real ay = y - y0;
const real by = 1 - ay;
const real x = j / img_scale_x;
const int x0 = (int)x;
const int x1 = MIN(x0 + 1, width - 1);
const real ax = x - x0;
const real bx = 1 - ax;
real val = 0;
val += (ax > 0 && ay > 0) ? ax * ay * img[y1 * stride + x1 * 3 + c] : 0;
val += (ax > 0 && by > 0) ? ax * by * img[y0 * stride + x1 * 3 + c] : 0;
val += (bx > 0 && ay > 0) ? bx * ay * img[y1 * stride + x0 * 3 + c] : 0;
val += (bx > 0 && by > 0) ? bx * by * img[y0 * stride + x0 * 3 + c] : 0;
input3d[index] = val - gs_mean[c];
}
}
#else
void bilinear_resize_cpu(const unsigned char* const img,
real* const input3d,
const int height, const int width,
const int resized_height, const int resized_width,
const real img_scale_y, const real img_scale_x,
const int stride)
{
static const real gs_mean_blue = 102.9801f;
static const real gs_mean_green = 115.9465f;
static const real gs_mean_red = 122.7717f;
const int resized_area = resized_height * resized_width;
real* const p_inputB = input3d + 0 * resized_area;
real* const p_inputG = input3d + 1 * resized_area;
real* const p_inputR = input3d + 2 * resized_area;
for (int i = 0; i < resized_height; ++i) {
const real y = i / img_scale_y;
const int y0 = (int)y;
const int y1 = MIN(y0 + 1, height - 1);
const real ay = y - y0;
const real by = 1 - ay;
for (int j = 0; j < resized_width; ++j) {
const real x = j / img_scale_x;
const int x0 = (int)x;
const int x1 = MIN(x0 + 1, width - 1);
const real ax = x - x0;
const real bx = 1 - ax;
real B = 0, G = 0, R = 0;
if (ax > 0 && ay > 0) {
B += ax * ay * img[y1 * stride + x1 * 3 + 0];
G += ax * ay * img[y1 * stride + x1 * 3 + 1];
R += ax * ay * img[y1 * stride + x1 * 3 + 2];
}
if (ax > 0 && by > 0) {
B += ax * by * img[y0 * stride + x1 * 3 + 0];
G += ax * by * img[y0 * stride + x1 * 3 + 1];
R += ax * by * img[y0 * stride + x1 * 3 + 2];
}
if (bx > 0 && ay > 0) {
B += bx * ay * img[y1 * stride + x0 * 3 + 0];
G += bx * ay * img[y1 * stride + x0 * 3 + 1];
R += bx * ay * img[y1 * stride + x0 * 3 + 2];
}
if (bx > 0 && by > 0) {
B += bx * by * img[y0 * stride + x0 * 3 + 0];
G += bx * by * img[y0 * stride + x0 * 3 + 1];
R += bx * by * img[y0 * stride + x0 * 3 + 2];
}
p_inputB[i * resized_width + j] = B - gs_mean_blue;
p_inputG[i * resized_width + j] = G - gs_mean_green;
p_inputR[i * resized_width + j] = R - gs_mean_red;
/*
if (i == resized_height - 1) {
printf("%d %d: %d %d %d %d %f %f %f %f, %d %d %d %d %f %f %f, %f %f %f\n", i, j, y0, y1, x0, x1, ay, by, ax, bx,
img[y1 * stride + x1 * 3], img[y0 * stride + x1 * 3], img[y1 * stride + x0 * 3], img[y0 * stride + x0 * 3],
B, G, R, p_inputB[i * resized_width + j], p_inputG[i * resized_width + j], p_inputR[i * resized_width + j]);
}
*/
}
}
}
#endif
void img2input(const unsigned char* const img,
Tensor* const input3d,
Tensor* const img_info1d,
unsigned char* const temp_data,
const int height, const int width, const int stride)
{
static const real gs_max_size = 1000.0f;
static const real gs_base_size = 600.0f;
const int img_size_min = MIN(height, width);
const int img_size_max = MAX(height, width);
real img_scale = gs_base_size / img_size_min;
if (ROUND(img_scale * img_size_max) > gs_max_size) {
img_scale = gs_max_size / img_size_max;
}
const int gs_scale_base = 32;
const real img_scale_y
= (real)((int)(height * img_scale / gs_scale_base) * gs_scale_base)
/ height;
const real img_scale_x
= (real)((int)(width * img_scale / gs_scale_base) * gs_scale_base)
/ width;
const int resized_height = ROUND(height * img_scale_y);
const int resized_width = ROUND(width * img_scale_x);
const int n = img_info1d->num_items;
real* const p_img_info1d = img_info1d->data + n * 6;
p_img_info1d[0] = (real)resized_height;
p_img_info1d[1] = (real)resized_width;
p_img_info1d[2] = img_scale_y;
p_img_info1d[3] = img_scale_x;
p_img_info1d[4] = (real)height;
p_img_info1d[5] = (real)width;
#ifdef GPU
{
const int num_threads = 3 * resized_height * resized_width;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipMemcpyAsync(temp_data, img,
height * width * 3 * sizeof(unsigned char),
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( bilinear_resize_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
temp_data, input3d->data + input3d->start[n],
height, width, resized_height, resized_width,
img_scale_y, img_scale_x, stride);
}
#else
{
bilinear_resize_cpu(
img, input3d->data + input3d->start[n],
height, width, resized_height, resized_width,
img_scale_y, img_scale_x, stride);
}
#endif
const int input_size = 3 * resized_height * resized_width;
//printf("image size = %d x %d x 3 = %d\n", resized_height, resized_width, input_size);
input3d->shape[n][0] = 3;
input3d->shape[n][1] = resized_height;
input3d->shape[n][2] = resized_width;
input3d->start[n + 1] = input3d->start[n] + input_size;
++input3d->num_items;
img_info1d->shape[n][0] = 6;
++img_info1d->num_items;
}
void load_image(const char* const filename,
Tensor* const input3d,
Tensor* const img_info1d,
real* const temp_data)
{
cv::Mat image = cv::imread(filename);
if (!image.data) {
printf("[ERROR] Cannot open image: %s\n", image.data);
}
const int height = image.rows;
const int width = image.cols;
const int stride = (int)image.step.p[0];
/*
printf("Image %s: %d x %d, stride=%d\n", filename, height, width, stride);
char path[1024];
sprintf(path, "params/%s.txt", filename + 35);
FILE* fp = fopen(path, "w");
for (int i = 0; i < height; ++i)
for (int j = 0; j < width; ++j)
for (int k = 0; k < 3; ++k)
fprintf(fp, "%d %d %d %d\n", i, j, k, image.data[i * width * 3 + j * 3 + k]);
fclose(fp);
*/
img2input(image.data, input3d, img_info1d, (unsigned char*)temp_data,
height, width, stride);
}
| a0dfec6caec93f668891cd8dbb48c8cb04b604cc.cu | #include "layer.h"
#include <string.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#ifdef GPU
__global__
void bilinear_resize_gpu(const unsigned char* const img,
real* const input3d,
const int height, const int width,
const int resized_height, const int resized_width,
const real img_scale_y, const real img_scale_x,
const int stride)
{
const real gs_mean[3] = { 102.9801f, 115.9465f, 122.7717f };
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int resized_area = resized_height * resized_width;
if (index < 3 * resized_area) {
const int c = index / resized_area;
const int i = (index / resized_width) % resized_height;
const int j = index % resized_width;
const real y = i / img_scale_y;
const int y0 = (int)y;
const int y1 = MIN(y0 + 1, height - 1);
const real ay = y - y0;
const real by = 1 - ay;
const real x = j / img_scale_x;
const int x0 = (int)x;
const int x1 = MIN(x0 + 1, width - 1);
const real ax = x - x0;
const real bx = 1 - ax;
real val = 0;
val += (ax > 0 && ay > 0) ? ax * ay * img[y1 * stride + x1 * 3 + c] : 0;
val += (ax > 0 && by > 0) ? ax * by * img[y0 * stride + x1 * 3 + c] : 0;
val += (bx > 0 && ay > 0) ? bx * ay * img[y1 * stride + x0 * 3 + c] : 0;
val += (bx > 0 && by > 0) ? bx * by * img[y0 * stride + x0 * 3 + c] : 0;
input3d[index] = val - gs_mean[c];
}
}
#else
void bilinear_resize_cpu(const unsigned char* const img,
real* const input3d,
const int height, const int width,
const int resized_height, const int resized_width,
const real img_scale_y, const real img_scale_x,
const int stride)
{
static const real gs_mean_blue = 102.9801f;
static const real gs_mean_green = 115.9465f;
static const real gs_mean_red = 122.7717f;
const int resized_area = resized_height * resized_width;
real* const p_inputB = input3d + 0 * resized_area;
real* const p_inputG = input3d + 1 * resized_area;
real* const p_inputR = input3d + 2 * resized_area;
for (int i = 0; i < resized_height; ++i) {
const real y = i / img_scale_y;
const int y0 = (int)y;
const int y1 = MIN(y0 + 1, height - 1);
const real ay = y - y0;
const real by = 1 - ay;
for (int j = 0; j < resized_width; ++j) {
const real x = j / img_scale_x;
const int x0 = (int)x;
const int x1 = MIN(x0 + 1, width - 1);
const real ax = x - x0;
const real bx = 1 - ax;
real B = 0, G = 0, R = 0;
if (ax > 0 && ay > 0) {
B += ax * ay * img[y1 * stride + x1 * 3 + 0];
G += ax * ay * img[y1 * stride + x1 * 3 + 1];
R += ax * ay * img[y1 * stride + x1 * 3 + 2];
}
if (ax > 0 && by > 0) {
B += ax * by * img[y0 * stride + x1 * 3 + 0];
G += ax * by * img[y0 * stride + x1 * 3 + 1];
R += ax * by * img[y0 * stride + x1 * 3 + 2];
}
if (bx > 0 && ay > 0) {
B += bx * ay * img[y1 * stride + x0 * 3 + 0];
G += bx * ay * img[y1 * stride + x0 * 3 + 1];
R += bx * ay * img[y1 * stride + x0 * 3 + 2];
}
if (bx > 0 && by > 0) {
B += bx * by * img[y0 * stride + x0 * 3 + 0];
G += bx * by * img[y0 * stride + x0 * 3 + 1];
R += bx * by * img[y0 * stride + x0 * 3 + 2];
}
p_inputB[i * resized_width + j] = B - gs_mean_blue;
p_inputG[i * resized_width + j] = G - gs_mean_green;
p_inputR[i * resized_width + j] = R - gs_mean_red;
/*
if (i == resized_height - 1) {
printf("%d %d: %d %d %d %d %f %f %f %f, %d %d %d %d %f %f %f, %f %f %f\n", i, j, y0, y1, x0, x1, ay, by, ax, bx,
img[y1 * stride + x1 * 3], img[y0 * stride + x1 * 3], img[y1 * stride + x0 * 3], img[y0 * stride + x0 * 3],
B, G, R, p_inputB[i * resized_width + j], p_inputG[i * resized_width + j], p_inputR[i * resized_width + j]);
}
*/
}
}
}
#endif
void img2input(const unsigned char* const img,
Tensor* const input3d,
Tensor* const img_info1d,
unsigned char* const temp_data,
const int height, const int width, const int stride)
{
static const real gs_max_size = 1000.0f;
static const real gs_base_size = 600.0f;
const int img_size_min = MIN(height, width);
const int img_size_max = MAX(height, width);
real img_scale = gs_base_size / img_size_min;
if (ROUND(img_scale * img_size_max) > gs_max_size) {
img_scale = gs_max_size / img_size_max;
}
const int gs_scale_base = 32;
const real img_scale_y
= (real)((int)(height * img_scale / gs_scale_base) * gs_scale_base)
/ height;
const real img_scale_x
= (real)((int)(width * img_scale / gs_scale_base) * gs_scale_base)
/ width;
const int resized_height = ROUND(height * img_scale_y);
const int resized_width = ROUND(width * img_scale_x);
const int n = img_info1d->num_items;
real* const p_img_info1d = img_info1d->data + n * 6;
p_img_info1d[0] = (real)resized_height;
p_img_info1d[1] = (real)resized_width;
p_img_info1d[2] = img_scale_y;
p_img_info1d[3] = img_scale_x;
p_img_info1d[4] = (real)height;
p_img_info1d[5] = (real)width;
#ifdef GPU
{
const int num_threads = 3 * resized_height * resized_width;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
cudaMemcpyAsync(temp_data, img,
height * width * 3 * sizeof(unsigned char),
cudaMemcpyHostToDevice);
bilinear_resize_gpu<<<num_blocks, threads_per_block>>>(
temp_data, input3d->data + input3d->start[n],
height, width, resized_height, resized_width,
img_scale_y, img_scale_x, stride);
}
#else
{
bilinear_resize_cpu(
img, input3d->data + input3d->start[n],
height, width, resized_height, resized_width,
img_scale_y, img_scale_x, stride);
}
#endif
const int input_size = 3 * resized_height * resized_width;
//printf("image size = %d x %d x 3 = %d\n", resized_height, resized_width, input_size);
input3d->shape[n][0] = 3;
input3d->shape[n][1] = resized_height;
input3d->shape[n][2] = resized_width;
input3d->start[n + 1] = input3d->start[n] + input_size;
++input3d->num_items;
img_info1d->shape[n][0] = 6;
++img_info1d->num_items;
}
void load_image(const char* const filename,
Tensor* const input3d,
Tensor* const img_info1d,
real* const temp_data)
{
cv::Mat image = cv::imread(filename);
if (!image.data) {
printf("[ERROR] Cannot open image: %s\n", image.data);
}
const int height = image.rows;
const int width = image.cols;
const int stride = (int)image.step.p[0];
/*
printf("Image %s: %d x %d, stride=%d\n", filename, height, width, stride);
char path[1024];
sprintf(path, "params/%s.txt", filename + 35);
FILE* fp = fopen(path, "w");
for (int i = 0; i < height; ++i)
for (int j = 0; j < width; ++j)
for (int k = 0; k < 3; ++k)
fprintf(fp, "%d %d %d %d\n", i, j, k, image.data[i * width * 3 + j * 3 + k]);
fclose(fp);
*/
img2input(image.data, input3d, img_info1d, (unsigned char*)temp_data,
height, width, stride);
}
|
d9af5255f3afb81635de406caa23f987cf907059.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFInterleaved.cuh>
#include <faiss/gpu/impl/scan/IVFInterleavedImpl.cuh>
namespace faiss {
namespace gpu {
constexpr uint32_t kMaxUInt32 = std::numeric_limits<uint32_t>::max();
// Second-pass kernel to further k-select the results from the first pass across
// IVF lists and produce the final results
template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ>
__global__ void ivfInterleavedScan2(
Tensor<float, 3, true> distanceIn,
Tensor<int, 3, true> indicesIn,
Tensor<Index::idx_t, 2, true> listIds,
int k,
void** listIndices,
IndicesOptions opt,
bool dir,
Tensor<float, 2, true> distanceOut,
Tensor<Index::idx_t, 2, true> indicesOut) {
int queryId = blockIdx.x;
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ float smemK[kNumWarps * NumWarpQ];
__shared__ uint32_t smemV[kNumWarps * NumWarpQ];
// To avoid creating excessive specializations, we combine direction
// kernels, selecting for the smallest element. If `dir` is true, we negate
// all values being selected (so that we are selecting the largest element).
BlockSelect<
float,
uint32_t,
false,
Comparator<float>,
NumWarpQ,
NumThreadQ,
ThreadsPerBlock>
heap(kFloatMax, kMaxUInt32, smemK, smemV, k);
// nprobe x k
int num = distanceIn.getSize(1) * distanceIn.getSize(2);
auto distanceBase = distanceIn[queryId].data();
int limit = utils::roundDown(num, kWarpSize);
// This will keep our negation factor
float adj = dir ? -1 : 1;
int i = threadIdx.x;
for (; i < limit; i += blockDim.x) {
// We represent the index as (probe id)(k)
// Right now, both are limited to a maximum of 2048, but we will
// dedicate each to the high and low words of a uint32_t
static_assert(GPU_MAX_SELECTION_K <= 65536, "");
uint32_t curProbe = i / k;
uint32_t curK = i % k;
uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff);
Index::idx_t listId = listIds[queryId][curProbe];
if (listId != -1) {
// Adjust the value we are selecting based on the sorting order
heap.addThreadQ(distanceBase[i] * adj, index);
}
heap.checkThreadQ();
}
// Handle warp divergence separately
if (i < num) {
uint32_t curProbe = i / k;
uint32_t curK = i % k;
uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff);
Index::idx_t listId = listIds[queryId][curProbe];
if (listId != -1) {
heap.addThreadQ(distanceBase[i] * adj, index);
}
}
// Merge all final results
heap.reduce();
for (int i = threadIdx.x; i < k; i += blockDim.x) {
// Re-adjust the value we are selecting based on the sorting order
distanceOut[queryId][i] = smemK[i] * adj;
auto packedIndex = smemV[i];
// We need to remap to the user-provided indices
Index::idx_t index = -1;
// We may not have at least k values to return; in this function, max
// uint32 is our sentinel value
if (packedIndex != kMaxUInt32) {
uint32_t curProbe = packedIndex >> 16;
uint32_t curK = packedIndex & 0xffff;
Index::idx_t listId = listIds[queryId][curProbe];
int listOffset = indicesIn[queryId][curProbe][curK];
if (opt == INDICES_32_BIT) {
index = (Index::idx_t)((int*)listIndices[listId])[listOffset];
} else if (opt == INDICES_64_BIT) {
index = ((Index::idx_t*)listIndices[listId])[listOffset];
} else {
index = (listId << 32 | (Index::idx_t)listOffset);
}
}
indicesOut[queryId][i] = index;
}
}
void runIVFInterleavedScan2(
Tensor<float, 3, true>& distanceIn,
Tensor<int, 3, true>& indicesIn,
Tensor<Index::idx_t, 2, true>& listIds,
int k,
DeviceVector<void*>& listIndices,
IndicesOptions indicesOptions,
bool dir,
Tensor<float, 2, true>& distanceOut,
Tensor<Index::idx_t, 2, true>& indicesOut,
hipStream_t stream) {
#define IVF_SCAN_2(THREADS, NUM_WARP_Q, NUM_THREAD_Q) \
hipLaunchKernelGGL(( ivfInterleavedScan2<THREADS, NUM_WARP_Q, NUM_THREAD_Q>) \
, dim3(distanceIn.getSize(0)), dim3(THREADS), 0, stream, \
distanceIn, \
indicesIn, \
listIds, \
k, \
listIndices.data(), \
indicesOptions, \
dir, \
distanceOut, \
indicesOut)
if (k == 1) {
IVF_SCAN_2(128, 1, 1);
} else if (k <= 32) {
IVF_SCAN_2(128, 32, 2);
} else if (k <= 64) {
IVF_SCAN_2(128, 64, 3);
} else if (k <= 128) {
IVF_SCAN_2(128, 128, 3);
} else if (k <= 256) {
IVF_SCAN_2(128, 256, 4);
} else if (k <= 512) {
IVF_SCAN_2(128, 512, 8);
} else if (k <= 1024) {
IVF_SCAN_2(128, 1024, 8);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
IVF_SCAN_2(64, 2048, 8);
}
#endif
}
void runIVFInterleavedScan(
Tensor<float, 2, true>& queries,
Tensor<Index::idx_t, 2, true>& listIds,
DeviceVector<void*>& listData,
DeviceVector<void*>& listIndices,
IndicesOptions indicesOptions,
DeviceVector<int>& listLengths,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<Index::idx_t, 2, true>& outIndices,
GpuResources* res) {
// caught for exceptions at a higher level
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
if (k == 1) {
IVF_INTERLEAVED_CALL(1);
} else if (k <= 32) {
IVF_INTERLEAVED_CALL(32);
} else if (k <= 64) {
IVF_INTERLEAVED_CALL(64);
} else if (k <= 128) {
IVF_INTERLEAVED_CALL(128);
} else if (k <= 256) {
IVF_INTERLEAVED_CALL(256);
} else if (k <= 512) {
IVF_INTERLEAVED_CALL(512);
} else if (k <= 1024) {
IVF_INTERLEAVED_CALL(1024);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
IVF_INTERLEAVED_CALL(2048);
}
#endif
}
} // namespace gpu
} // namespace faiss
| d9af5255f3afb81635de406caa23f987cf907059.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFInterleaved.cuh>
#include <faiss/gpu/impl/scan/IVFInterleavedImpl.cuh>
namespace faiss {
namespace gpu {
constexpr uint32_t kMaxUInt32 = std::numeric_limits<uint32_t>::max();
// Second-pass kernel to further k-select the results from the first pass across
// IVF lists and produce the final results
template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ>
__global__ void ivfInterleavedScan2(
Tensor<float, 3, true> distanceIn,
Tensor<int, 3, true> indicesIn,
Tensor<Index::idx_t, 2, true> listIds,
int k,
void** listIndices,
IndicesOptions opt,
bool dir,
Tensor<float, 2, true> distanceOut,
Tensor<Index::idx_t, 2, true> indicesOut) {
int queryId = blockIdx.x;
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ float smemK[kNumWarps * NumWarpQ];
__shared__ uint32_t smemV[kNumWarps * NumWarpQ];
// To avoid creating excessive specializations, we combine direction
// kernels, selecting for the smallest element. If `dir` is true, we negate
// all values being selected (so that we are selecting the largest element).
BlockSelect<
float,
uint32_t,
false,
Comparator<float>,
NumWarpQ,
NumThreadQ,
ThreadsPerBlock>
heap(kFloatMax, kMaxUInt32, smemK, smemV, k);
// nprobe x k
int num = distanceIn.getSize(1) * distanceIn.getSize(2);
auto distanceBase = distanceIn[queryId].data();
int limit = utils::roundDown(num, kWarpSize);
// This will keep our negation factor
float adj = dir ? -1 : 1;
int i = threadIdx.x;
for (; i < limit; i += blockDim.x) {
// We represent the index as (probe id)(k)
// Right now, both are limited to a maximum of 2048, but we will
// dedicate each to the high and low words of a uint32_t
static_assert(GPU_MAX_SELECTION_K <= 65536, "");
uint32_t curProbe = i / k;
uint32_t curK = i % k;
uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff);
Index::idx_t listId = listIds[queryId][curProbe];
if (listId != -1) {
// Adjust the value we are selecting based on the sorting order
heap.addThreadQ(distanceBase[i] * adj, index);
}
heap.checkThreadQ();
}
// Handle warp divergence separately
if (i < num) {
uint32_t curProbe = i / k;
uint32_t curK = i % k;
uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff);
Index::idx_t listId = listIds[queryId][curProbe];
if (listId != -1) {
heap.addThreadQ(distanceBase[i] * adj, index);
}
}
// Merge all final results
heap.reduce();
for (int i = threadIdx.x; i < k; i += blockDim.x) {
// Re-adjust the value we are selecting based on the sorting order
distanceOut[queryId][i] = smemK[i] * adj;
auto packedIndex = smemV[i];
// We need to remap to the user-provided indices
Index::idx_t index = -1;
// We may not have at least k values to return; in this function, max
// uint32 is our sentinel value
if (packedIndex != kMaxUInt32) {
uint32_t curProbe = packedIndex >> 16;
uint32_t curK = packedIndex & 0xffff;
Index::idx_t listId = listIds[queryId][curProbe];
int listOffset = indicesIn[queryId][curProbe][curK];
if (opt == INDICES_32_BIT) {
index = (Index::idx_t)((int*)listIndices[listId])[listOffset];
} else if (opt == INDICES_64_BIT) {
index = ((Index::idx_t*)listIndices[listId])[listOffset];
} else {
index = (listId << 32 | (Index::idx_t)listOffset);
}
}
indicesOut[queryId][i] = index;
}
}
void runIVFInterleavedScan2(
Tensor<float, 3, true>& distanceIn,
Tensor<int, 3, true>& indicesIn,
Tensor<Index::idx_t, 2, true>& listIds,
int k,
DeviceVector<void*>& listIndices,
IndicesOptions indicesOptions,
bool dir,
Tensor<float, 2, true>& distanceOut,
Tensor<Index::idx_t, 2, true>& indicesOut,
cudaStream_t stream) {
#define IVF_SCAN_2(THREADS, NUM_WARP_Q, NUM_THREAD_Q) \
ivfInterleavedScan2<THREADS, NUM_WARP_Q, NUM_THREAD_Q> \
<<<distanceIn.getSize(0), THREADS, 0, stream>>>( \
distanceIn, \
indicesIn, \
listIds, \
k, \
listIndices.data(), \
indicesOptions, \
dir, \
distanceOut, \
indicesOut)
if (k == 1) {
IVF_SCAN_2(128, 1, 1);
} else if (k <= 32) {
IVF_SCAN_2(128, 32, 2);
} else if (k <= 64) {
IVF_SCAN_2(128, 64, 3);
} else if (k <= 128) {
IVF_SCAN_2(128, 128, 3);
} else if (k <= 256) {
IVF_SCAN_2(128, 256, 4);
} else if (k <= 512) {
IVF_SCAN_2(128, 512, 8);
} else if (k <= 1024) {
IVF_SCAN_2(128, 1024, 8);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
IVF_SCAN_2(64, 2048, 8);
}
#endif
}
void runIVFInterleavedScan(
Tensor<float, 2, true>& queries,
Tensor<Index::idx_t, 2, true>& listIds,
DeviceVector<void*>& listData,
DeviceVector<void*>& listIndices,
IndicesOptions indicesOptions,
DeviceVector<int>& listLengths,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<Index::idx_t, 2, true>& outIndices,
GpuResources* res) {
// caught for exceptions at a higher level
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
if (k == 1) {
IVF_INTERLEAVED_CALL(1);
} else if (k <= 32) {
IVF_INTERLEAVED_CALL(32);
} else if (k <= 64) {
IVF_INTERLEAVED_CALL(64);
} else if (k <= 128) {
IVF_INTERLEAVED_CALL(128);
} else if (k <= 256) {
IVF_INTERLEAVED_CALL(256);
} else if (k <= 512) {
IVF_INTERLEAVED_CALL(512);
} else if (k <= 1024) {
IVF_INTERLEAVED_CALL(1024);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
IVF_INTERLEAVED_CALL(2048);
}
#endif
}
} // namespace gpu
} // namespace faiss
|
b76d71b43e35bdf2fce920a3b0946ede21fb2b3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel_j3d27pt0__(float * __restrict__ input, int L, int M, int N, float * __var_2__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z)- (1-(-1));
int FORMA_BLOCKDIM_Y = (int)(blockDim.y)- (2-(-2));
int FORMA_BLOCKDIM_X = (int)(blockDim.x)- (2-(-2));
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-4))*(FORMA_BLOCKDIM_Y-(-4))*(FORMA_BLOCKDIM_X-(-4))));
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-2))*(FORMA_BLOCKDIM_Y-(-2))*(FORMA_BLOCKDIM_X-(-2))));
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + 1;
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + 1;
int __iter_2__;
__iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + 1;
{
int __iter_3__;
__iter_3__ = (FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)) + (int)(threadIdx.z*1);
for (;__iter_3__+0 <= (FORMA_MIN((FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-2))+1),(L-2))+1); __iter_3__ += (int)(blockDim.z*1)) {
int __iter_4__;
__iter_4__ = (FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)) + (int)(threadIdx.y*1);
if (__iter_4__ <= (FORMA_MIN((FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-2))+1),(M-2))+1)) {
int __iter_5__;
__iter_5__ = (FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)) + (int)(threadIdx.x);
if (__iter_5__ <= (FORMA_MIN((FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-2))+1),(N-2))+1)) {
int __temp_0__;
__temp_0__ = (__iter_3__+0);
int __temp_1__;
__temp_1__ = (__iter_4__+0);
int __temp_2__;
__temp_2__ = (__iter_5__+0);
__tilevar_1__[(__iter_5__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_4__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_3__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))] = input[__temp_2__+(N-0)*(__temp_1__+(M-0)*(__temp_0__))];
}
}
}
}
__syncthreads();
{
int __iter_6__;
__iter_6__ = FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1) + (int)(threadIdx.z*1);
if (__iter_6__ <= FORMA_MIN((FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-2))+1),(L-2))) {
int __iter_7__;
__iter_7__ = FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1) + (int)(threadIdx.y*1);
if (__iter_7__ <= FORMA_MIN((FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-2))+1),(M-2))) {
int __iter_8__;
__iter_8__ = FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1) + (int)(threadIdx.x);
if (__iter_8__ <= FORMA_MIN((FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-2))+1),(N-2))) {
float __temp_3__;
__temp_3__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_4__;
__temp_4__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_5__;
__temp_5__ = (__temp_3__ + __temp_4__);
float __temp_6__;
__temp_6__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_7__;
__temp_7__ = (__temp_5__ + __temp_6__);
float __temp_8__;
__temp_8__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_9__;
__temp_9__ = (__temp_7__ + __temp_8__);
float __temp_10__;
__temp_10__ = (1.500000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_11__;
__temp_11__ = (__temp_9__ + __temp_10__);
float __temp_12__;
__temp_12__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_13__;
__temp_13__ = (__temp_11__ + __temp_12__);
float __temp_14__;
__temp_14__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_15__;
__temp_15__ = (__temp_13__ + __temp_14__);
float __temp_16__;
__temp_16__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_17__;
__temp_17__ = (__temp_15__ + __temp_16__);
float __temp_18__;
__temp_18__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_19__;
__temp_19__ = (__temp_17__ + __temp_18__);
float __temp_20__;
__temp_20__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_21__;
__temp_21__ = (__temp_19__ + __temp_20__);
float __temp_22__;
__temp_22__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_23__;
__temp_23__ = (__temp_21__ + __temp_22__);
float __temp_24__;
__temp_24__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_25__;
__temp_25__ = (__temp_23__ + __temp_24__);
float __temp_26__;
__temp_26__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_27__;
__temp_27__ = (__temp_25__ + __temp_26__);
float __temp_28__;
__temp_28__ = (1.500000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_29__;
__temp_29__ = (__temp_27__ + __temp_28__);
float __temp_30__;
__temp_30__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_31__;
__temp_31__ = (__temp_29__ + __temp_30__);
float __temp_32__;
__temp_32__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_33__;
__temp_33__ = (__temp_31__ + __temp_32__);
float __temp_34__;
__temp_34__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_35__;
__temp_35__ = (__temp_33__ + __temp_34__);
float __temp_36__;
__temp_36__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_37__;
__temp_37__ = (__temp_35__ + __temp_36__);
float __temp_38__;
__temp_38__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_39__;
__temp_39__ = (__temp_37__ + __temp_38__);
float __temp_40__;
__temp_40__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_41__;
__temp_41__ = (__temp_39__ + __temp_40__);
float __temp_42__;
__temp_42__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_43__;
__temp_43__ = (__temp_41__ + __temp_42__);
float __temp_44__;
__temp_44__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_45__;
__temp_45__ = (__temp_43__ + __temp_44__);
float __temp_46__;
__temp_46__ = (1.500000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_47__;
__temp_47__ = (__temp_45__ + __temp_46__);
float __temp_48__;
__temp_48__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_49__;
__temp_49__ = (__temp_47__ + __temp_48__);
float __temp_50__;
__temp_50__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_51__;
__temp_51__ = (__temp_49__ + __temp_50__);
float __temp_52__;
__temp_52__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_53__;
__temp_53__ = (__temp_51__ + __temp_52__);
float __temp_54__;
__temp_54__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_55__;
__temp_55__ = (__temp_53__ + __temp_54__);
float __temp_56__;
__temp_56__ = (__temp_55__ / 159);
__tilevar_0__[(__iter_8__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_7__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_6__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))] = __temp_56__;
}
}
}
}
__syncthreads();
{
int __iter_9__;
__iter_9__ = FORMA_MAX(__iter_2__,1) + (int)(threadIdx.z*1);
if (__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-2))) {
int __iter_10__;
__iter_10__ = FORMA_MAX(__iter_1__,1) + (int)(threadIdx.y*1);
if (__iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-2))) {
int __iter_11__;
__iter_11__ = FORMA_MAX(__iter_0__,1) + (int)(threadIdx.x);
if (__iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-2))) {
float __temp_57__;
__temp_57__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_58__;
__temp_58__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_59__;
__temp_59__ = (__temp_57__ + __temp_58__);
float __temp_60__;
__temp_60__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_61__;
__temp_61__ = (__temp_59__ + __temp_60__);
float __temp_62__;
__temp_62__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_63__;
__temp_63__ = (__temp_61__ + __temp_62__);
float __temp_64__;
__temp_64__ = (1.500000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_65__;
__temp_65__ = (__temp_63__ + __temp_64__);
float __temp_66__;
__temp_66__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_67__;
__temp_67__ = (__temp_65__ + __temp_66__);
float __temp_68__;
__temp_68__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_69__;
__temp_69__ = (__temp_67__ + __temp_68__);
float __temp_70__;
__temp_70__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_71__;
__temp_71__ = (__temp_69__ + __temp_70__);
float __temp_72__;
__temp_72__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_73__;
__temp_73__ = (__temp_71__ + __temp_72__);
float __temp_74__;
__temp_74__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_75__;
__temp_75__ = (__temp_73__ + __temp_74__);
float __temp_76__;
__temp_76__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_77__;
__temp_77__ = (__temp_75__ + __temp_76__);
float __temp_78__;
__temp_78__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_79__;
__temp_79__ = (__temp_77__ + __temp_78__);
float __temp_80__;
__temp_80__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_81__;
__temp_81__ = (__temp_79__ + __temp_80__);
float __temp_82__;
__temp_82__ = (1.500000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_83__;
__temp_83__ = (__temp_81__ + __temp_82__);
float __temp_84__;
__temp_84__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_85__;
__temp_85__ = (__temp_83__ + __temp_84__);
float __temp_86__;
__temp_86__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_87__;
__temp_87__ = (__temp_85__ + __temp_86__);
float __temp_88__;
__temp_88__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_89__;
__temp_89__ = (__temp_87__ + __temp_88__);
float __temp_90__;
__temp_90__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_91__;
__temp_91__ = (__temp_89__ + __temp_90__);
float __temp_92__;
__temp_92__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_93__;
__temp_93__ = (__temp_91__ + __temp_92__);
float __temp_94__;
__temp_94__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_95__;
__temp_95__ = (__temp_93__ + __temp_94__);
float __temp_96__;
__temp_96__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_97__;
__temp_97__ = (__temp_95__ + __temp_96__);
float __temp_98__;
__temp_98__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_99__;
__temp_99__ = (__temp_97__ + __temp_98__);
float __temp_100__;
__temp_100__ = (1.500000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_101__;
__temp_101__ = (__temp_99__ + __temp_100__);
float __temp_102__;
__temp_102__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_103__;
__temp_103__ = (__temp_101__ + __temp_102__);
float __temp_104__;
__temp_104__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_105__;
__temp_105__ = (__temp_103__ + __temp_104__);
float __temp_106__;
__temp_106__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_107__;
__temp_107__ = (__temp_105__ + __temp_106__);
float __temp_108__;
__temp_108__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_109__;
__temp_109__ = (__temp_107__ + __temp_108__);
float __temp_110__;
__temp_110__ = (__temp_109__ / 159);
__var_2__[(__iter_11__+0)+(N-0)*((__iter_10__+0)+(M-0)*((__iter_9__+0)))] = __temp_110__;
}
}
}
}
}
int __blockSizeToSMemSize___kernel_j3d27pt0__(dim3 blockDim){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z)- (1-(-1));
int FORMA_BLOCKDIM_Y = (int)(blockDim.y)- (2-(-2));
int FORMA_BLOCKDIM_X = (int)(blockDim.x)- (2-(-2));
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-4))*(FORMA_BLOCKDIM_Y-(-4))*(FORMA_BLOCKDIM_X-(-4))));
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-2))*(FORMA_BLOCKDIM_Y-(-2))*(FORMA_BLOCKDIM_X-(-2))));
return SMemSize;
}
__global__ void __kernel_j3d27pt1__(float * __restrict__ __var_2__, int L, int M, int N, float * __var_1__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z)- (1-(-1));
int FORMA_BLOCKDIM_Y = (int)(blockDim.y)- (2-(-2));
int FORMA_BLOCKDIM_X = (int)(blockDim.x)- (2-(-2));
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-4))*(FORMA_BLOCKDIM_Y-(-4))*(FORMA_BLOCKDIM_X-(-4))));
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-2))*(FORMA_BLOCKDIM_Y-(-2))*(FORMA_BLOCKDIM_X-(-2))));
int __iter_12__;
__iter_12__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + 1;
int __iter_13__;
__iter_13__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + 1;
int __iter_14__;
__iter_14__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + 1;
{
int __iter_15__;
__iter_15__ = (FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)) + (int)(threadIdx.z*1);
for (;__iter_15__+0 <= (FORMA_MIN((FORMA_MIN(((__iter_14__+FORMA_BLOCKDIM_Z)-1),(L-2))+1),(L-2))+1); __iter_15__ += (int)(blockDim.z*1)) {
int __iter_16__;
__iter_16__ = (FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)) + (int)(threadIdx.y*1);
if (__iter_16__ <= (FORMA_MIN((FORMA_MIN(((__iter_13__+FORMA_BLOCKDIM_Y)-1),(M-2))+1),(M-2))+1)) {
int __iter_17__;
__iter_17__ = (FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)) + (int)(threadIdx.x);
if (__iter_17__ <= (FORMA_MIN((FORMA_MIN(((__iter_12__+FORMA_BLOCKDIM_X)-1),(N-2))+1),(N-2))+1)) {
int __temp_111__;
__temp_111__ = (__iter_15__+0);
int __temp_112__;
__temp_112__ = (__iter_16__+0);
int __temp_113__;
__temp_113__ = (__iter_17__+0);
__tilevar_3__[(__iter_17__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_16__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_15__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))] = __var_2__[__temp_113__+(N-0)*(__temp_112__+(M-0)*(__temp_111__))];
}
}
}
}
__syncthreads();
{
int __iter_18__;
__iter_18__ = FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1) + (int)(threadIdx.z*1);
if (__iter_18__ <= FORMA_MIN((FORMA_MIN(((__iter_14__+FORMA_BLOCKDIM_Z)-1),(L-2))+1),(L-2))) {
int __iter_19__;
__iter_19__ = FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1) + (int)(threadIdx.y*1);
if (__iter_19__ <= FORMA_MIN((FORMA_MIN(((__iter_13__+FORMA_BLOCKDIM_Y)-1),(M-2))+1),(M-2))) {
int __iter_20__;
__iter_20__ = FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1) + (int)(threadIdx.x);
if (__iter_20__ <= FORMA_MIN((FORMA_MIN(((__iter_12__+FORMA_BLOCKDIM_X)-1),(N-2))+1),(N-2))) {
float __temp_114__;
__temp_114__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_115__;
__temp_115__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_116__;
__temp_116__ = (__temp_114__ + __temp_115__);
float __temp_117__;
__temp_117__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_118__;
__temp_118__ = (__temp_116__ + __temp_117__);
float __temp_119__;
__temp_119__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_120__;
__temp_120__ = (__temp_118__ + __temp_119__);
float __temp_121__;
__temp_121__ = (1.500000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_122__;
__temp_122__ = (__temp_120__ + __temp_121__);
float __temp_123__;
__temp_123__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_124__;
__temp_124__ = (__temp_122__ + __temp_123__);
float __temp_125__;
__temp_125__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_126__;
__temp_126__ = (__temp_124__ + __temp_125__);
float __temp_127__;
__temp_127__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_128__;
__temp_128__ = (__temp_126__ + __temp_127__);
float __temp_129__;
__temp_129__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_130__;
__temp_130__ = (__temp_128__ + __temp_129__);
float __temp_131__;
__temp_131__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_132__;
__temp_132__ = (__temp_130__ + __temp_131__);
float __temp_133__;
__temp_133__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_134__;
__temp_134__ = (__temp_132__ + __temp_133__);
float __temp_135__;
__temp_135__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_136__;
__temp_136__ = (__temp_134__ + __temp_135__);
float __temp_137__;
__temp_137__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_138__;
__temp_138__ = (__temp_136__ + __temp_137__);
float __temp_139__;
__temp_139__ = (1.500000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_140__;
__temp_140__ = (__temp_138__ + __temp_139__);
float __temp_141__;
__temp_141__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_142__;
__temp_142__ = (__temp_140__ + __temp_141__);
float __temp_143__;
__temp_143__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_144__;
__temp_144__ = (__temp_142__ + __temp_143__);
float __temp_145__;
__temp_145__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_146__;
__temp_146__ = (__temp_144__ + __temp_145__);
float __temp_147__;
__temp_147__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_148__;
__temp_148__ = (__temp_146__ + __temp_147__);
float __temp_149__;
__temp_149__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_150__;
__temp_150__ = (__temp_148__ + __temp_149__);
float __temp_151__;
__temp_151__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_152__;
__temp_152__ = (__temp_150__ + __temp_151__);
float __temp_153__;
__temp_153__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_154__;
__temp_154__ = (__temp_152__ + __temp_153__);
float __temp_155__;
__temp_155__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_156__;
__temp_156__ = (__temp_154__ + __temp_155__);
float __temp_157__;
__temp_157__ = (1.500000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_158__;
__temp_158__ = (__temp_156__ + __temp_157__);
float __temp_159__;
__temp_159__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_160__;
__temp_160__ = (__temp_158__ + __temp_159__);
float __temp_161__;
__temp_161__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_162__;
__temp_162__ = (__temp_160__ + __temp_161__);
float __temp_163__;
__temp_163__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_164__;
__temp_164__ = (__temp_162__ + __temp_163__);
float __temp_165__;
__temp_165__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_166__;
__temp_166__ = (__temp_164__ + __temp_165__);
float __temp_167__;
__temp_167__ = (__temp_166__ / 159);
__tilevar_2__[(__iter_20__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_19__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_18__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))] = __temp_167__;
}
}
}
}
__syncthreads();
{
int __iter_21__;
__iter_21__ = FORMA_MAX(__iter_14__,1) + (int)(threadIdx.z*1);
if (__iter_21__ <= FORMA_MIN(((__iter_14__+FORMA_BLOCKDIM_Z)-1),(L-2))) {
int __iter_22__;
__iter_22__ = FORMA_MAX(__iter_13__,1) + (int)(threadIdx.y*1);
if (__iter_22__ <= FORMA_MIN(((__iter_13__+FORMA_BLOCKDIM_Y)-1),(M-2))) {
int __iter_23__;
__iter_23__ = FORMA_MAX(__iter_12__,1) + (int)(threadIdx.x);
if (__iter_23__ <= FORMA_MIN(((__iter_12__+FORMA_BLOCKDIM_X)-1),(N-2))) {
float __temp_168__;
__temp_168__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_169__;
__temp_169__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_170__;
__temp_170__ = (__temp_168__ + __temp_169__);
float __temp_171__;
__temp_171__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_172__;
__temp_172__ = (__temp_170__ + __temp_171__);
float __temp_173__;
__temp_173__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_174__;
__temp_174__ = (__temp_172__ + __temp_173__);
float __temp_175__;
__temp_175__ = (1.500000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_176__;
__temp_176__ = (__temp_174__ + __temp_175__);
float __temp_177__;
__temp_177__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_178__;
__temp_178__ = (__temp_176__ + __temp_177__);
float __temp_179__;
__temp_179__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_180__;
__temp_180__ = (__temp_178__ + __temp_179__);
float __temp_181__;
__temp_181__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_182__;
__temp_182__ = (__temp_180__ + __temp_181__);
float __temp_183__;
__temp_183__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_184__;
__temp_184__ = (__temp_182__ + __temp_183__);
float __temp_185__;
__temp_185__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_186__;
__temp_186__ = (__temp_184__ + __temp_185__);
float __temp_187__;
__temp_187__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_188__;
__temp_188__ = (__temp_186__ + __temp_187__);
float __temp_189__;
__temp_189__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_190__;
__temp_190__ = (__temp_188__ + __temp_189__);
float __temp_191__;
__temp_191__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_192__;
__temp_192__ = (__temp_190__ + __temp_191__);
float __temp_193__;
__temp_193__ = (1.500000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_194__;
__temp_194__ = (__temp_192__ + __temp_193__);
float __temp_195__;
__temp_195__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_196__;
__temp_196__ = (__temp_194__ + __temp_195__);
float __temp_197__;
__temp_197__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_198__;
__temp_198__ = (__temp_196__ + __temp_197__);
float __temp_199__;
__temp_199__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_200__;
__temp_200__ = (__temp_198__ + __temp_199__);
float __temp_201__;
__temp_201__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_202__;
__temp_202__ = (__temp_200__ + __temp_201__);
float __temp_203__;
__temp_203__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_204__;
__temp_204__ = (__temp_202__ + __temp_203__);
float __temp_205__;
__temp_205__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_206__;
__temp_206__ = (__temp_204__ + __temp_205__);
float __temp_207__;
__temp_207__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_208__;
__temp_208__ = (__temp_206__ + __temp_207__);
float __temp_209__;
__temp_209__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_210__;
__temp_210__ = (__temp_208__ + __temp_209__);
float __temp_211__;
__temp_211__ = (1.500000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_212__;
__temp_212__ = (__temp_210__ + __temp_211__);
float __temp_213__;
__temp_213__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_214__;
__temp_214__ = (__temp_212__ + __temp_213__);
float __temp_215__;
__temp_215__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_216__;
__temp_216__ = (__temp_214__ + __temp_215__);
float __temp_217__;
__temp_217__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_218__;
__temp_218__ = (__temp_216__ + __temp_217__);
float __temp_219__;
__temp_219__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_220__;
__temp_220__ = (__temp_218__ + __temp_219__);
float __temp_221__;
__temp_221__ = (__temp_220__ / 159);
__var_1__[(__iter_23__+0)+(N-0)*((__iter_22__+0)+(M-0)*((__iter_21__+0)))] = __temp_221__;
}
}
}
}
}
int __blockSizeToSMemSize___kernel_j3d27pt1__(dim3 blockDim){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z)- (1-(-1));
int FORMA_BLOCKDIM_Y = (int)(blockDim.y)- (2-(-2));
int FORMA_BLOCKDIM_X = (int)(blockDim.x)- (2-(-2));
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-4))*(FORMA_BLOCKDIM_Y-(-4))*(FORMA_BLOCKDIM_X-(-4))));
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-2))*(FORMA_BLOCKDIM_Y-(-2))*(FORMA_BLOCKDIM_X-(-2))));
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d27pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((L-0)*(M-0)*(N-0)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
hipMalloc(&__var_2__,sizeof(float)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel_j3d27pt0__ = ((N-2) - 1 ) + 1;
int __size_1___kernel_j3d27pt0__ = ((M-2) - 1 ) + 1;
int __size_2___kernel_j3d27pt0__ = ((L-2) - 1 ) + 1;
int __max_occupancy_blocksize___kernel_j3d27pt0__;
int _max_occupancy_gridsize___kernel_j3d27pt0__;
hipOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel_j3d27pt0__,&__max_occupancy_blocksize___kernel_j3d27pt0__,(const void*)__kernel_j3d27pt0__,0,0);
int __max_occupancy_blocksize___kernel_j3d27pt0___0 = pow((float)__max_occupancy_blocksize___kernel_j3d27pt0__, (float)(1.0/(float)3));
__max_occupancy_blocksize___kernel_j3d27pt0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel_j3d27pt0___0/32, 1)*32;
int __block_0___kernel_j3d27pt0__ = 16;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt0___0,FORMA_MAX((__size_0___kernel_j3d27pt0__/1)/32,1)*32),FORMA_MAX_BLOCKDIM_0),5);
__max_occupancy_blocksize___kernel_j3d27pt0__ /= __block_0___kernel_j3d27pt0__;
int __max_occupancy_blocksize___kernel_j3d27pt0___1 = pow((float)__max_occupancy_blocksize___kernel_j3d27pt0__, (float)(1.0/(float)2));
int __block_1___kernel_j3d27pt0__ = 8;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt0___1,__size_1___kernel_j3d27pt0__/1),FORMA_MAX_BLOCKDIM_1),5);
__max_occupancy_blocksize___kernel_j3d27pt0__ /= __block_1___kernel_j3d27pt0__;
int __max_occupancy_blocksize___kernel_j3d27pt0___2 = __max_occupancy_blocksize___kernel_j3d27pt0__;
int __block_2___kernel_j3d27pt0__ = 8;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt0___2,__size_2___kernel_j3d27pt0__/1),FORMA_MAX_BLOCKDIM_2),3);
__max_occupancy_blocksize___kernel_j3d27pt0__ /= __block_2___kernel_j3d27pt0__;
dim3 __blockConfig___kernel_j3d27pt0__(__block_0___kernel_j3d27pt0__,__block_1___kernel_j3d27pt0__,__block_2___kernel_j3d27pt0__);
int __SMemSize___kernel_j3d27pt0__ = 0;
__SMemSize___kernel_j3d27pt0__ = __blockSizeToSMemSize___kernel_j3d27pt0__(__blockConfig___kernel_j3d27pt0__);
while( __SMemSize___kernel_j3d27pt0__ > __FORMA_MAX_SHARED_MEM__){
if( __blockConfig___kernel_j3d27pt0__.z/2 > 3)
__blockConfig___kernel_j3d27pt0__.z /= 2;
__SMemSize___kernel_j3d27pt0__ = __blockSizeToSMemSize___kernel_j3d27pt0__(__blockConfig___kernel_j3d27pt0__);
if( __SMemSize___kernel_j3d27pt0__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel_j3d27pt0__.y/2 > 5)
__blockConfig___kernel_j3d27pt0__.y /= 2;
__SMemSize___kernel_j3d27pt0__ = __blockSizeToSMemSize___kernel_j3d27pt0__(__blockConfig___kernel_j3d27pt0__);
if( __SMemSize___kernel_j3d27pt0__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel_j3d27pt0__.x/2 > FORMA_MIN(32,5))
__blockConfig___kernel_j3d27pt0__.x /= 2;
__SMemSize___kernel_j3d27pt0__ = __blockSizeToSMemSize___kernel_j3d27pt0__(__blockConfig___kernel_j3d27pt0__);
}
__block_0___kernel_j3d27pt0__ = __blockConfig___kernel_j3d27pt0__.x-(2-(-2));
__block_1___kernel_j3d27pt0__ = __blockConfig___kernel_j3d27pt0__.y-(2-(-2));
__block_2___kernel_j3d27pt0__ = __blockConfig___kernel_j3d27pt0__.z-(1-(-1));
int __grid_0___kernel_j3d27pt0__ = FORMA_CEIL(__size_0___kernel_j3d27pt0__,__block_0___kernel_j3d27pt0__*1);
int __grid_1___kernel_j3d27pt0__ = FORMA_CEIL(__size_1___kernel_j3d27pt0__,__block_1___kernel_j3d27pt0__*1);
int __grid_2___kernel_j3d27pt0__ = FORMA_CEIL(__size_2___kernel_j3d27pt0__,__block_2___kernel_j3d27pt0__*1);
dim3 __gridConfig___kernel_j3d27pt0__(__grid_0___kernel_j3d27pt0__,__grid_1___kernel_j3d27pt0__,__grid_2___kernel_j3d27pt0__);
hipLaunchKernelGGL(( __kernel_j3d27pt0__), dim3(__gridConfig___kernel_j3d27pt0__),dim3(__blockConfig___kernel_j3d27pt0__),__SMemSize___kernel_j3d27pt0__, 0, input, L, M, N,__var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel_j3d27pt0__\n");
int __size_0___kernel_j3d27pt1__ = ((N-2) - 1 ) + 1;
int __size_1___kernel_j3d27pt1__ = ((M-2) - 1 ) + 1;
int __size_2___kernel_j3d27pt1__ = ((L-2) - 1 ) + 1;
int __max_occupancy_blocksize___kernel_j3d27pt1__;
int _max_occupancy_gridsize___kernel_j3d27pt1__;
hipOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel_j3d27pt1__,&__max_occupancy_blocksize___kernel_j3d27pt1__,(const void*)__kernel_j3d27pt1__,0,0);
int __max_occupancy_blocksize___kernel_j3d27pt1___0 = pow((double)__max_occupancy_blocksize___kernel_j3d27pt1__, (double)(1.0/(double)3));
__max_occupancy_blocksize___kernel_j3d27pt1___0 = FORMA_MAX(__max_occupancy_blocksize___kernel_j3d27pt1___0/32, 1)*32;
int __block_0___kernel_j3d27pt1__ = 16;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt1___0,FORMA_MAX((__size_0___kernel_j3d27pt1__/1)/32,1)*32),FORMA_MAX_BLOCKDIM_0),5);
__max_occupancy_blocksize___kernel_j3d27pt1__ /= __block_0___kernel_j3d27pt1__;
int __max_occupancy_blocksize___kernel_j3d27pt1___1 = pow((double)__max_occupancy_blocksize___kernel_j3d27pt1__, (double)(1.0/(double)2));
int __block_1___kernel_j3d27pt1__ = 8;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt1___1,__size_1___kernel_j3d27pt1__/1),FORMA_MAX_BLOCKDIM_1),5);
__max_occupancy_blocksize___kernel_j3d27pt1__ /= __block_1___kernel_j3d27pt1__;
int __max_occupancy_blocksize___kernel_j3d27pt1___2 = __max_occupancy_blocksize___kernel_j3d27pt1__;
int __block_2___kernel_j3d27pt1__ = 8;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt1___2,__size_2___kernel_j3d27pt1__/1),FORMA_MAX_BLOCKDIM_2),3);
__max_occupancy_blocksize___kernel_j3d27pt1__ /= __block_2___kernel_j3d27pt1__;
dim3 __blockConfig___kernel_j3d27pt1__(__block_0___kernel_j3d27pt1__,__block_1___kernel_j3d27pt1__,__block_2___kernel_j3d27pt1__);
int __SMemSize___kernel_j3d27pt1__ = 0;
__SMemSize___kernel_j3d27pt1__ = __blockSizeToSMemSize___kernel_j3d27pt1__(__blockConfig___kernel_j3d27pt1__);
while( __SMemSize___kernel_j3d27pt1__ > __FORMA_MAX_SHARED_MEM__){
if( __blockConfig___kernel_j3d27pt1__.z/2 > 3)
__blockConfig___kernel_j3d27pt1__.z /= 2;
__SMemSize___kernel_j3d27pt1__ = __blockSizeToSMemSize___kernel_j3d27pt1__(__blockConfig___kernel_j3d27pt1__);
if( __SMemSize___kernel_j3d27pt1__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel_j3d27pt1__.y/2 > 5)
__blockConfig___kernel_j3d27pt1__.y /= 2;
__SMemSize___kernel_j3d27pt1__ = __blockSizeToSMemSize___kernel_j3d27pt1__(__blockConfig___kernel_j3d27pt1__);
if( __SMemSize___kernel_j3d27pt1__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel_j3d27pt1__.x/2 > FORMA_MIN(32,5))
__blockConfig___kernel_j3d27pt1__.x /= 2;
__SMemSize___kernel_j3d27pt1__ = __blockSizeToSMemSize___kernel_j3d27pt1__(__blockConfig___kernel_j3d27pt1__);
}
__block_0___kernel_j3d27pt1__ = __blockConfig___kernel_j3d27pt1__.x-(2-(-2));
__block_1___kernel_j3d27pt1__ = __blockConfig___kernel_j3d27pt1__.y-(2-(-2));
__block_2___kernel_j3d27pt1__ = __blockConfig___kernel_j3d27pt1__.z-(1-(-1));
int __grid_0___kernel_j3d27pt1__ = FORMA_CEIL(__size_0___kernel_j3d27pt1__,__block_0___kernel_j3d27pt1__*1);
int __grid_1___kernel_j3d27pt1__ = FORMA_CEIL(__size_1___kernel_j3d27pt1__,__block_1___kernel_j3d27pt1__*1);
int __grid_2___kernel_j3d27pt1__ = FORMA_CEIL(__size_2___kernel_j3d27pt1__,__block_2___kernel_j3d27pt1__*1);
dim3 __gridConfig___kernel_j3d27pt1__(__grid_0___kernel_j3d27pt1__,__grid_1___kernel_j3d27pt1__,__grid_2___kernel_j3d27pt1__);
hipLaunchKernelGGL(( __kernel_j3d27pt1__), dim3(__gridConfig___kernel_j3d27pt1__),dim3(__blockConfig___kernel_j3d27pt1__),__SMemSize___kernel_j3d27pt1__, 0, __var_2__, L, M, N,__var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel_j3d27pt1__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((L-0)*(M-0)*(N-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
}
/*Host Free End*/
| b76d71b43e35bdf2fce920a3b0946ede21fb2b3d.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel_j3d27pt0__(float * __restrict__ input, int L, int M, int N, float * __var_2__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z)- (1-(-1));
int FORMA_BLOCKDIM_Y = (int)(blockDim.y)- (2-(-2));
int FORMA_BLOCKDIM_X = (int)(blockDim.x)- (2-(-2));
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-4))*(FORMA_BLOCKDIM_Y-(-4))*(FORMA_BLOCKDIM_X-(-4))));
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-2))*(FORMA_BLOCKDIM_Y-(-2))*(FORMA_BLOCKDIM_X-(-2))));
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + 1;
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + 1;
int __iter_2__;
__iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + 1;
{
int __iter_3__;
__iter_3__ = (FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)) + (int)(threadIdx.z*1);
for (;__iter_3__+0 <= (FORMA_MIN((FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-2))+1),(L-2))+1); __iter_3__ += (int)(blockDim.z*1)) {
int __iter_4__;
__iter_4__ = (FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)) + (int)(threadIdx.y*1);
if (__iter_4__ <= (FORMA_MIN((FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-2))+1),(M-2))+1)) {
int __iter_5__;
__iter_5__ = (FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)) + (int)(threadIdx.x);
if (__iter_5__ <= (FORMA_MIN((FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-2))+1),(N-2))+1)) {
int __temp_0__;
__temp_0__ = (__iter_3__+0);
int __temp_1__;
__temp_1__ = (__iter_4__+0);
int __temp_2__;
__temp_2__ = (__iter_5__+0);
__tilevar_1__[(__iter_5__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_4__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_3__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))] = input[__temp_2__+(N-0)*(__temp_1__+(M-0)*(__temp_0__))];
}
}
}
}
__syncthreads();
{
int __iter_6__;
__iter_6__ = FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1) + (int)(threadIdx.z*1);
if (__iter_6__ <= FORMA_MIN((FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-2))+1),(L-2))) {
int __iter_7__;
__iter_7__ = FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1) + (int)(threadIdx.y*1);
if (__iter_7__ <= FORMA_MIN((FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-2))+1),(M-2))) {
int __iter_8__;
__iter_8__ = FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1) + (int)(threadIdx.x);
if (__iter_8__ <= FORMA_MIN((FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-2))+1),(N-2))) {
float __temp_3__;
__temp_3__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_4__;
__temp_4__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_5__;
__temp_5__ = (__temp_3__ + __temp_4__);
float __temp_6__;
__temp_6__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_7__;
__temp_7__ = (__temp_5__ + __temp_6__);
float __temp_8__;
__temp_8__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_9__;
__temp_9__ = (__temp_7__ + __temp_8__);
float __temp_10__;
__temp_10__ = (1.500000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_11__;
__temp_11__ = (__temp_9__ + __temp_10__);
float __temp_12__;
__temp_12__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_13__;
__temp_13__ = (__temp_11__ + __temp_12__);
float __temp_14__;
__temp_14__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_15__;
__temp_15__ = (__temp_13__ + __temp_14__);
float __temp_16__;
__temp_16__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_17__;
__temp_17__ = (__temp_15__ + __temp_16__);
float __temp_18__;
__temp_18__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_19__;
__temp_19__ = (__temp_17__ + __temp_18__);
float __temp_20__;
__temp_20__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_21__;
__temp_21__ = (__temp_19__ + __temp_20__);
float __temp_22__;
__temp_22__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_23__;
__temp_23__ = (__temp_21__ + __temp_22__);
float __temp_24__;
__temp_24__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_25__;
__temp_25__ = (__temp_23__ + __temp_24__);
float __temp_26__;
__temp_26__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_27__;
__temp_27__ = (__temp_25__ + __temp_26__);
float __temp_28__;
__temp_28__ = (1.500000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_29__;
__temp_29__ = (__temp_27__ + __temp_28__);
float __temp_30__;
__temp_30__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_31__;
__temp_31__ = (__temp_29__ + __temp_30__);
float __temp_32__;
__temp_32__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_33__;
__temp_33__ = (__temp_31__ + __temp_32__);
float __temp_34__;
__temp_34__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_35__;
__temp_35__ = (__temp_33__ + __temp_34__);
float __temp_36__;
__temp_36__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_37__;
__temp_37__ = (__temp_35__ + __temp_36__);
float __temp_38__;
__temp_38__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_39__;
__temp_39__ = (__temp_37__ + __temp_38__);
float __temp_40__;
__temp_40__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_41__;
__temp_41__ = (__temp_39__ + __temp_40__);
float __temp_42__;
__temp_42__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_43__;
__temp_43__ = (__temp_41__ + __temp_42__);
float __temp_44__;
__temp_44__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_45__;
__temp_45__ = (__temp_43__ + __temp_44__);
float __temp_46__;
__temp_46__ = (1.500000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_47__;
__temp_47__ = (__temp_45__ + __temp_46__);
float __temp_48__;
__temp_48__ = (1.200000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_49__;
__temp_49__ = (__temp_47__ + __temp_48__);
float __temp_50__;
__temp_50__ = (0.900000 * __tilevar_1__[(__iter_8__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_51__;
__temp_51__ = (__temp_49__ + __temp_50__);
float __temp_52__;
__temp_52__ = (0.700000 * __tilevar_1__[(__iter_8__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_53__;
__temp_53__ = (__temp_51__ + __temp_52__);
float __temp_54__;
__temp_54__ = (0.500000 * __tilevar_1__[(__iter_8__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_0__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_7__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_1__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_6__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_2__,1)+(-1)),1)+(-1)))))]);
float __temp_55__;
__temp_55__ = (__temp_53__ + __temp_54__);
float __temp_56__;
__temp_56__ = (__temp_55__ / 159);
__tilevar_0__[(__iter_8__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_7__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_6__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))] = __temp_56__;
}
}
}
}
__syncthreads();
{
int __iter_9__;
__iter_9__ = FORMA_MAX(__iter_2__,1) + (int)(threadIdx.z*1);
if (__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-2))) {
int __iter_10__;
__iter_10__ = FORMA_MAX(__iter_1__,1) + (int)(threadIdx.y*1);
if (__iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-2))) {
int __iter_11__;
__iter_11__ = FORMA_MAX(__iter_0__,1) + (int)(threadIdx.x);
if (__iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-2))) {
float __temp_57__;
__temp_57__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_58__;
__temp_58__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_59__;
__temp_59__ = (__temp_57__ + __temp_58__);
float __temp_60__;
__temp_60__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_61__;
__temp_61__ = (__temp_59__ + __temp_60__);
float __temp_62__;
__temp_62__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_63__;
__temp_63__ = (__temp_61__ + __temp_62__);
float __temp_64__;
__temp_64__ = (1.500000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_65__;
__temp_65__ = (__temp_63__ + __temp_64__);
float __temp_66__;
__temp_66__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_67__;
__temp_67__ = (__temp_65__ + __temp_66__);
float __temp_68__;
__temp_68__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_69__;
__temp_69__ = (__temp_67__ + __temp_68__);
float __temp_70__;
__temp_70__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_71__;
__temp_71__ = (__temp_69__ + __temp_70__);
float __temp_72__;
__temp_72__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(-1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_73__;
__temp_73__ = (__temp_71__ + __temp_72__);
float __temp_74__;
__temp_74__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_75__;
__temp_75__ = (__temp_73__ + __temp_74__);
float __temp_76__;
__temp_76__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_77__;
__temp_77__ = (__temp_75__ + __temp_76__);
float __temp_78__;
__temp_78__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_79__;
__temp_79__ = (__temp_77__ + __temp_78__);
float __temp_80__;
__temp_80__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_81__;
__temp_81__ = (__temp_79__ + __temp_80__);
float __temp_82__;
__temp_82__ = (1.500000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_83__;
__temp_83__ = (__temp_81__ + __temp_82__);
float __temp_84__;
__temp_84__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_85__;
__temp_85__ = (__temp_83__ + __temp_84__);
float __temp_86__;
__temp_86__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_87__;
__temp_87__ = (__temp_85__ + __temp_86__);
float __temp_88__;
__temp_88__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_89__;
__temp_89__ = (__temp_87__ + __temp_88__);
float __temp_90__;
__temp_90__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_91__;
__temp_91__ = (__temp_89__ + __temp_90__);
float __temp_92__;
__temp_92__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_93__;
__temp_93__ = (__temp_91__ + __temp_92__);
float __temp_94__;
__temp_94__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_95__;
__temp_95__ = (__temp_93__ + __temp_94__);
float __temp_96__;
__temp_96__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(-1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_97__;
__temp_97__ = (__temp_95__ + __temp_96__);
float __temp_98__;
__temp_98__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_99__;
__temp_99__ = (__temp_97__ + __temp_98__);
float __temp_100__;
__temp_100__ = (1.500000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_101__;
__temp_101__ = (__temp_99__ + __temp_100__);
float __temp_102__;
__temp_102__ = (1.200000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_103__;
__temp_103__ = (__temp_101__ + __temp_102__);
float __temp_104__;
__temp_104__ = (0.900000 * __tilevar_0__[(__iter_11__+0)+(-1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_105__;
__temp_105__ = (__temp_103__ + __temp_104__);
float __temp_106__;
__temp_106__ = (0.700000 * __tilevar_0__[(__iter_11__+0)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_107__;
__temp_107__ = (__temp_105__ + __temp_106__);
float __temp_108__;
__temp_108__ = (0.500000 * __tilevar_0__[(__iter_11__+0)+(1)+(0-(FORMA_MAX(__iter_0__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_10__+0)+(1)+(0-(FORMA_MAX(__iter_1__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_9__+0)+(1)+(0-(FORMA_MAX(__iter_2__,1)+(-1)))))]);
float __temp_109__;
__temp_109__ = (__temp_107__ + __temp_108__);
float __temp_110__;
__temp_110__ = (__temp_109__ / 159);
__var_2__[(__iter_11__+0)+(N-0)*((__iter_10__+0)+(M-0)*((__iter_9__+0)))] = __temp_110__;
}
}
}
}
}
int __blockSizeToSMemSize___kernel_j3d27pt0__(dim3 blockDim){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z)- (1-(-1));
int FORMA_BLOCKDIM_Y = (int)(blockDim.y)- (2-(-2));
int FORMA_BLOCKDIM_X = (int)(blockDim.x)- (2-(-2));
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-4))*(FORMA_BLOCKDIM_Y-(-4))*(FORMA_BLOCKDIM_X-(-4))));
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-2))*(FORMA_BLOCKDIM_Y-(-2))*(FORMA_BLOCKDIM_X-(-2))));
return SMemSize;
}
__global__ void __kernel_j3d27pt1__(float * __restrict__ __var_2__, int L, int M, int N, float * __var_1__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z)- (1-(-1));
int FORMA_BLOCKDIM_Y = (int)(blockDim.y)- (2-(-2));
int FORMA_BLOCKDIM_X = (int)(blockDim.x)- (2-(-2));
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-4))*(FORMA_BLOCKDIM_Y-(-4))*(FORMA_BLOCKDIM_X-(-4))));
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-2))*(FORMA_BLOCKDIM_Y-(-2))*(FORMA_BLOCKDIM_X-(-2))));
int __iter_12__;
__iter_12__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + 1;
int __iter_13__;
__iter_13__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + 1;
int __iter_14__;
__iter_14__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + 1;
{
int __iter_15__;
__iter_15__ = (FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)) + (int)(threadIdx.z*1);
for (;__iter_15__+0 <= (FORMA_MIN((FORMA_MIN(((__iter_14__+FORMA_BLOCKDIM_Z)-1),(L-2))+1),(L-2))+1); __iter_15__ += (int)(blockDim.z*1)) {
int __iter_16__;
__iter_16__ = (FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)) + (int)(threadIdx.y*1);
if (__iter_16__ <= (FORMA_MIN((FORMA_MIN(((__iter_13__+FORMA_BLOCKDIM_Y)-1),(M-2))+1),(M-2))+1)) {
int __iter_17__;
__iter_17__ = (FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)) + (int)(threadIdx.x);
if (__iter_17__ <= (FORMA_MIN((FORMA_MIN(((__iter_12__+FORMA_BLOCKDIM_X)-1),(N-2))+1),(N-2))+1)) {
int __temp_111__;
__temp_111__ = (__iter_15__+0);
int __temp_112__;
__temp_112__ = (__iter_16__+0);
int __temp_113__;
__temp_113__ = (__iter_17__+0);
__tilevar_3__[(__iter_17__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_16__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_15__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))] = __var_2__[__temp_113__+(N-0)*(__temp_112__+(M-0)*(__temp_111__))];
}
}
}
}
__syncthreads();
{
int __iter_18__;
__iter_18__ = FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1) + (int)(threadIdx.z*1);
if (__iter_18__ <= FORMA_MIN((FORMA_MIN(((__iter_14__+FORMA_BLOCKDIM_Z)-1),(L-2))+1),(L-2))) {
int __iter_19__;
__iter_19__ = FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1) + (int)(threadIdx.y*1);
if (__iter_19__ <= FORMA_MIN((FORMA_MIN(((__iter_13__+FORMA_BLOCKDIM_Y)-1),(M-2))+1),(M-2))) {
int __iter_20__;
__iter_20__ = FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1) + (int)(threadIdx.x);
if (__iter_20__ <= FORMA_MIN((FORMA_MIN(((__iter_12__+FORMA_BLOCKDIM_X)-1),(N-2))+1),(N-2))) {
float __temp_114__;
__temp_114__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_115__;
__temp_115__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_116__;
__temp_116__ = (__temp_114__ + __temp_115__);
float __temp_117__;
__temp_117__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_118__;
__temp_118__ = (__temp_116__ + __temp_117__);
float __temp_119__;
__temp_119__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_120__;
__temp_120__ = (__temp_118__ + __temp_119__);
float __temp_121__;
__temp_121__ = (1.500000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_122__;
__temp_122__ = (__temp_120__ + __temp_121__);
float __temp_123__;
__temp_123__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_124__;
__temp_124__ = (__temp_122__ + __temp_123__);
float __temp_125__;
__temp_125__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_126__;
__temp_126__ = (__temp_124__ + __temp_125__);
float __temp_127__;
__temp_127__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_128__;
__temp_128__ = (__temp_126__ + __temp_127__);
float __temp_129__;
__temp_129__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_130__;
__temp_130__ = (__temp_128__ + __temp_129__);
float __temp_131__;
__temp_131__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_132__;
__temp_132__ = (__temp_130__ + __temp_131__);
float __temp_133__;
__temp_133__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_134__;
__temp_134__ = (__temp_132__ + __temp_133__);
float __temp_135__;
__temp_135__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_136__;
__temp_136__ = (__temp_134__ + __temp_135__);
float __temp_137__;
__temp_137__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_138__;
__temp_138__ = (__temp_136__ + __temp_137__);
float __temp_139__;
__temp_139__ = (1.500000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_140__;
__temp_140__ = (__temp_138__ + __temp_139__);
float __temp_141__;
__temp_141__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_142__;
__temp_142__ = (__temp_140__ + __temp_141__);
float __temp_143__;
__temp_143__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_144__;
__temp_144__ = (__temp_142__ + __temp_143__);
float __temp_145__;
__temp_145__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_146__;
__temp_146__ = (__temp_144__ + __temp_145__);
float __temp_147__;
__temp_147__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_148__;
__temp_148__ = (__temp_146__ + __temp_147__);
float __temp_149__;
__temp_149__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_150__;
__temp_150__ = (__temp_148__ + __temp_149__);
float __temp_151__;
__temp_151__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_152__;
__temp_152__ = (__temp_150__ + __temp_151__);
float __temp_153__;
__temp_153__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_154__;
__temp_154__ = (__temp_152__ + __temp_153__);
float __temp_155__;
__temp_155__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_156__;
__temp_156__ = (__temp_154__ + __temp_155__);
float __temp_157__;
__temp_157__ = (1.500000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_158__;
__temp_158__ = (__temp_156__ + __temp_157__);
float __temp_159__;
__temp_159__ = (1.200000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_160__;
__temp_160__ = (__temp_158__ + __temp_159__);
float __temp_161__;
__temp_161__ = (0.900000 * __tilevar_3__[(__iter_20__+0)+(-1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_162__;
__temp_162__ = (__temp_160__ + __temp_161__);
float __temp_163__;
__temp_163__ = (0.700000 * __tilevar_3__[(__iter_20__+0)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_164__;
__temp_164__ = (__temp_162__ + __temp_163__);
float __temp_165__;
__temp_165__ = (0.500000 * __tilevar_3__[(__iter_20__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_12__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_X-(-4))*((__iter_19__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_13__,1)+(-1)),1)+(-1)))+(FORMA_BLOCKDIM_Y-(-4))*((__iter_18__+0)+(1)+(0-(FORMA_MAX((FORMA_MAX(__iter_14__,1)+(-1)),1)+(-1)))))]);
float __temp_166__;
__temp_166__ = (__temp_164__ + __temp_165__);
float __temp_167__;
__temp_167__ = (__temp_166__ / 159);
__tilevar_2__[(__iter_20__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_19__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_18__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))] = __temp_167__;
}
}
}
}
__syncthreads();
{
int __iter_21__;
__iter_21__ = FORMA_MAX(__iter_14__,1) + (int)(threadIdx.z*1);
if (__iter_21__ <= FORMA_MIN(((__iter_14__+FORMA_BLOCKDIM_Z)-1),(L-2))) {
int __iter_22__;
__iter_22__ = FORMA_MAX(__iter_13__,1) + (int)(threadIdx.y*1);
if (__iter_22__ <= FORMA_MIN(((__iter_13__+FORMA_BLOCKDIM_Y)-1),(M-2))) {
int __iter_23__;
__iter_23__ = FORMA_MAX(__iter_12__,1) + (int)(threadIdx.x);
if (__iter_23__ <= FORMA_MIN(((__iter_12__+FORMA_BLOCKDIM_X)-1),(N-2))) {
float __temp_168__;
__temp_168__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_169__;
__temp_169__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_170__;
__temp_170__ = (__temp_168__ + __temp_169__);
float __temp_171__;
__temp_171__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_172__;
__temp_172__ = (__temp_170__ + __temp_171__);
float __temp_173__;
__temp_173__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_174__;
__temp_174__ = (__temp_172__ + __temp_173__);
float __temp_175__;
__temp_175__ = (1.500000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_176__;
__temp_176__ = (__temp_174__ + __temp_175__);
float __temp_177__;
__temp_177__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_178__;
__temp_178__ = (__temp_176__ + __temp_177__);
float __temp_179__;
__temp_179__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_180__;
__temp_180__ = (__temp_178__ + __temp_179__);
float __temp_181__;
__temp_181__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_182__;
__temp_182__ = (__temp_180__ + __temp_181__);
float __temp_183__;
__temp_183__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(-1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_184__;
__temp_184__ = (__temp_182__ + __temp_183__);
float __temp_185__;
__temp_185__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_186__;
__temp_186__ = (__temp_184__ + __temp_185__);
float __temp_187__;
__temp_187__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_188__;
__temp_188__ = (__temp_186__ + __temp_187__);
float __temp_189__;
__temp_189__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_190__;
__temp_190__ = (__temp_188__ + __temp_189__);
float __temp_191__;
__temp_191__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_192__;
__temp_192__ = (__temp_190__ + __temp_191__);
float __temp_193__;
__temp_193__ = (1.500000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_194__;
__temp_194__ = (__temp_192__ + __temp_193__);
float __temp_195__;
__temp_195__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_196__;
__temp_196__ = (__temp_194__ + __temp_195__);
float __temp_197__;
__temp_197__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_198__;
__temp_198__ = (__temp_196__ + __temp_197__);
float __temp_199__;
__temp_199__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_200__;
__temp_200__ = (__temp_198__ + __temp_199__);
float __temp_201__;
__temp_201__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_202__;
__temp_202__ = (__temp_200__ + __temp_201__);
float __temp_203__;
__temp_203__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_204__;
__temp_204__ = (__temp_202__ + __temp_203__);
float __temp_205__;
__temp_205__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_206__;
__temp_206__ = (__temp_204__ + __temp_205__);
float __temp_207__;
__temp_207__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(-1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_208__;
__temp_208__ = (__temp_206__ + __temp_207__);
float __temp_209__;
__temp_209__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_210__;
__temp_210__ = (__temp_208__ + __temp_209__);
float __temp_211__;
__temp_211__ = (1.500000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_212__;
__temp_212__ = (__temp_210__ + __temp_211__);
float __temp_213__;
__temp_213__ = (1.200000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_214__;
__temp_214__ = (__temp_212__ + __temp_213__);
float __temp_215__;
__temp_215__ = (0.900000 * __tilevar_2__[(__iter_23__+0)+(-1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_216__;
__temp_216__ = (__temp_214__ + __temp_215__);
float __temp_217__;
__temp_217__ = (0.700000 * __tilevar_2__[(__iter_23__+0)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_218__;
__temp_218__ = (__temp_216__ + __temp_217__);
float __temp_219__;
__temp_219__ = (0.500000 * __tilevar_2__[(__iter_23__+0)+(1)+(0-(FORMA_MAX(__iter_12__,1)+(-1)))+(FORMA_BLOCKDIM_X-(-2))*((__iter_22__+0)+(1)+(0-(FORMA_MAX(__iter_13__,1)+(-1)))+(FORMA_BLOCKDIM_Y-(-2))*((__iter_21__+0)+(1)+(0-(FORMA_MAX(__iter_14__,1)+(-1)))))]);
float __temp_220__;
__temp_220__ = (__temp_218__ + __temp_219__);
float __temp_221__;
__temp_221__ = (__temp_220__ / 159);
__var_1__[(__iter_23__+0)+(N-0)*((__iter_22__+0)+(M-0)*((__iter_21__+0)))] = __temp_221__;
}
}
}
}
}
int __blockSizeToSMemSize___kernel_j3d27pt1__(dim3 blockDim){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z)- (1-(-1));
int FORMA_BLOCKDIM_Y = (int)(blockDim.y)- (2-(-2));
int FORMA_BLOCKDIM_X = (int)(blockDim.x)- (2-(-2));
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-4))*(FORMA_BLOCKDIM_Y-(-4))*(FORMA_BLOCKDIM_X-(-4))));
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Z-(-2))*(FORMA_BLOCKDIM_Y-(-2))*(FORMA_BLOCKDIM_X-(-2))));
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d27pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((L-0)*(M-0)*(N-0)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
cudaMalloc(&__var_2__,sizeof(float)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel_j3d27pt0__ = ((N-2) - 1 ) + 1;
int __size_1___kernel_j3d27pt0__ = ((M-2) - 1 ) + 1;
int __size_2___kernel_j3d27pt0__ = ((L-2) - 1 ) + 1;
int __max_occupancy_blocksize___kernel_j3d27pt0__;
int _max_occupancy_gridsize___kernel_j3d27pt0__;
cudaOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel_j3d27pt0__,&__max_occupancy_blocksize___kernel_j3d27pt0__,(const void*)__kernel_j3d27pt0__,0,0);
int __max_occupancy_blocksize___kernel_j3d27pt0___0 = pow((float)__max_occupancy_blocksize___kernel_j3d27pt0__, (float)(1.0/(float)3));
__max_occupancy_blocksize___kernel_j3d27pt0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel_j3d27pt0___0/32, 1)*32;
int __block_0___kernel_j3d27pt0__ = 16;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt0___0,FORMA_MAX((__size_0___kernel_j3d27pt0__/1)/32,1)*32),FORMA_MAX_BLOCKDIM_0),5);
__max_occupancy_blocksize___kernel_j3d27pt0__ /= __block_0___kernel_j3d27pt0__;
int __max_occupancy_blocksize___kernel_j3d27pt0___1 = pow((float)__max_occupancy_blocksize___kernel_j3d27pt0__, (float)(1.0/(float)2));
int __block_1___kernel_j3d27pt0__ = 8;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt0___1,__size_1___kernel_j3d27pt0__/1),FORMA_MAX_BLOCKDIM_1),5);
__max_occupancy_blocksize___kernel_j3d27pt0__ /= __block_1___kernel_j3d27pt0__;
int __max_occupancy_blocksize___kernel_j3d27pt0___2 = __max_occupancy_blocksize___kernel_j3d27pt0__;
int __block_2___kernel_j3d27pt0__ = 8;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt0___2,__size_2___kernel_j3d27pt0__/1),FORMA_MAX_BLOCKDIM_2),3);
__max_occupancy_blocksize___kernel_j3d27pt0__ /= __block_2___kernel_j3d27pt0__;
dim3 __blockConfig___kernel_j3d27pt0__(__block_0___kernel_j3d27pt0__,__block_1___kernel_j3d27pt0__,__block_2___kernel_j3d27pt0__);
int __SMemSize___kernel_j3d27pt0__ = 0;
__SMemSize___kernel_j3d27pt0__ = __blockSizeToSMemSize___kernel_j3d27pt0__(__blockConfig___kernel_j3d27pt0__);
while( __SMemSize___kernel_j3d27pt0__ > __FORMA_MAX_SHARED_MEM__){
if( __blockConfig___kernel_j3d27pt0__.z/2 > 3)
__blockConfig___kernel_j3d27pt0__.z /= 2;
__SMemSize___kernel_j3d27pt0__ = __blockSizeToSMemSize___kernel_j3d27pt0__(__blockConfig___kernel_j3d27pt0__);
if( __SMemSize___kernel_j3d27pt0__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel_j3d27pt0__.y/2 > 5)
__blockConfig___kernel_j3d27pt0__.y /= 2;
__SMemSize___kernel_j3d27pt0__ = __blockSizeToSMemSize___kernel_j3d27pt0__(__blockConfig___kernel_j3d27pt0__);
if( __SMemSize___kernel_j3d27pt0__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel_j3d27pt0__.x/2 > FORMA_MIN(32,5))
__blockConfig___kernel_j3d27pt0__.x /= 2;
__SMemSize___kernel_j3d27pt0__ = __blockSizeToSMemSize___kernel_j3d27pt0__(__blockConfig___kernel_j3d27pt0__);
}
__block_0___kernel_j3d27pt0__ = __blockConfig___kernel_j3d27pt0__.x-(2-(-2));
__block_1___kernel_j3d27pt0__ = __blockConfig___kernel_j3d27pt0__.y-(2-(-2));
__block_2___kernel_j3d27pt0__ = __blockConfig___kernel_j3d27pt0__.z-(1-(-1));
int __grid_0___kernel_j3d27pt0__ = FORMA_CEIL(__size_0___kernel_j3d27pt0__,__block_0___kernel_j3d27pt0__*1);
int __grid_1___kernel_j3d27pt0__ = FORMA_CEIL(__size_1___kernel_j3d27pt0__,__block_1___kernel_j3d27pt0__*1);
int __grid_2___kernel_j3d27pt0__ = FORMA_CEIL(__size_2___kernel_j3d27pt0__,__block_2___kernel_j3d27pt0__*1);
dim3 __gridConfig___kernel_j3d27pt0__(__grid_0___kernel_j3d27pt0__,__grid_1___kernel_j3d27pt0__,__grid_2___kernel_j3d27pt0__);
__kernel_j3d27pt0__<<<__gridConfig___kernel_j3d27pt0__,__blockConfig___kernel_j3d27pt0__,__SMemSize___kernel_j3d27pt0__>>>(input, L, M, N,__var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel_j3d27pt0__\n");
int __size_0___kernel_j3d27pt1__ = ((N-2) - 1 ) + 1;
int __size_1___kernel_j3d27pt1__ = ((M-2) - 1 ) + 1;
int __size_2___kernel_j3d27pt1__ = ((L-2) - 1 ) + 1;
int __max_occupancy_blocksize___kernel_j3d27pt1__;
int _max_occupancy_gridsize___kernel_j3d27pt1__;
cudaOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel_j3d27pt1__,&__max_occupancy_blocksize___kernel_j3d27pt1__,(const void*)__kernel_j3d27pt1__,0,0);
int __max_occupancy_blocksize___kernel_j3d27pt1___0 = pow((double)__max_occupancy_blocksize___kernel_j3d27pt1__, (double)(1.0/(double)3));
__max_occupancy_blocksize___kernel_j3d27pt1___0 = FORMA_MAX(__max_occupancy_blocksize___kernel_j3d27pt1___0/32, 1)*32;
int __block_0___kernel_j3d27pt1__ = 16;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt1___0,FORMA_MAX((__size_0___kernel_j3d27pt1__/1)/32,1)*32),FORMA_MAX_BLOCKDIM_0),5);
__max_occupancy_blocksize___kernel_j3d27pt1__ /= __block_0___kernel_j3d27pt1__;
int __max_occupancy_blocksize___kernel_j3d27pt1___1 = pow((double)__max_occupancy_blocksize___kernel_j3d27pt1__, (double)(1.0/(double)2));
int __block_1___kernel_j3d27pt1__ = 8;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt1___1,__size_1___kernel_j3d27pt1__/1),FORMA_MAX_BLOCKDIM_1),5);
__max_occupancy_blocksize___kernel_j3d27pt1__ /= __block_1___kernel_j3d27pt1__;
int __max_occupancy_blocksize___kernel_j3d27pt1___2 = __max_occupancy_blocksize___kernel_j3d27pt1__;
int __block_2___kernel_j3d27pt1__ = 8;//FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel_j3d27pt1___2,__size_2___kernel_j3d27pt1__/1),FORMA_MAX_BLOCKDIM_2),3);
__max_occupancy_blocksize___kernel_j3d27pt1__ /= __block_2___kernel_j3d27pt1__;
dim3 __blockConfig___kernel_j3d27pt1__(__block_0___kernel_j3d27pt1__,__block_1___kernel_j3d27pt1__,__block_2___kernel_j3d27pt1__);
int __SMemSize___kernel_j3d27pt1__ = 0;
__SMemSize___kernel_j3d27pt1__ = __blockSizeToSMemSize___kernel_j3d27pt1__(__blockConfig___kernel_j3d27pt1__);
while( __SMemSize___kernel_j3d27pt1__ > __FORMA_MAX_SHARED_MEM__){
if( __blockConfig___kernel_j3d27pt1__.z/2 > 3)
__blockConfig___kernel_j3d27pt1__.z /= 2;
__SMemSize___kernel_j3d27pt1__ = __blockSizeToSMemSize___kernel_j3d27pt1__(__blockConfig___kernel_j3d27pt1__);
if( __SMemSize___kernel_j3d27pt1__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel_j3d27pt1__.y/2 > 5)
__blockConfig___kernel_j3d27pt1__.y /= 2;
__SMemSize___kernel_j3d27pt1__ = __blockSizeToSMemSize___kernel_j3d27pt1__(__blockConfig___kernel_j3d27pt1__);
if( __SMemSize___kernel_j3d27pt1__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel_j3d27pt1__.x/2 > FORMA_MIN(32,5))
__blockConfig___kernel_j3d27pt1__.x /= 2;
__SMemSize___kernel_j3d27pt1__ = __blockSizeToSMemSize___kernel_j3d27pt1__(__blockConfig___kernel_j3d27pt1__);
}
__block_0___kernel_j3d27pt1__ = __blockConfig___kernel_j3d27pt1__.x-(2-(-2));
__block_1___kernel_j3d27pt1__ = __blockConfig___kernel_j3d27pt1__.y-(2-(-2));
__block_2___kernel_j3d27pt1__ = __blockConfig___kernel_j3d27pt1__.z-(1-(-1));
int __grid_0___kernel_j3d27pt1__ = FORMA_CEIL(__size_0___kernel_j3d27pt1__,__block_0___kernel_j3d27pt1__*1);
int __grid_1___kernel_j3d27pt1__ = FORMA_CEIL(__size_1___kernel_j3d27pt1__,__block_1___kernel_j3d27pt1__*1);
int __grid_2___kernel_j3d27pt1__ = FORMA_CEIL(__size_2___kernel_j3d27pt1__,__block_2___kernel_j3d27pt1__*1);
dim3 __gridConfig___kernel_j3d27pt1__(__grid_0___kernel_j3d27pt1__,__grid_1___kernel_j3d27pt1__,__grid_2___kernel_j3d27pt1__);
__kernel_j3d27pt1__<<<__gridConfig___kernel_j3d27pt1__,__blockConfig___kernel_j3d27pt1__,__SMemSize___kernel_j3d27pt1__>>>(__var_2__, L, M, N,__var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel_j3d27pt1__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((L-0)*(M-0)*(N-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
}
/*Host Free End*/
|
df4e6a1b5cc469b18f06863673b06ef5437f624b.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <algorithm>
#include <cstdint>
#include <mutex>
#include <hip/hip_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cast.cuh"
#include "chainerx/cuda/cuda.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/op_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/indexable_array.h"
#include "chainerx/indexer.h"
#include "chainerx/macro.h"
#include "chainerx/routines/creation.h"
#include "chainerx/scalar.h"
#include "chainerx/shape.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct ArangeImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t i, CudaType& out) { out = start + step * static_cast<CudaType>(i); }
CudaType start;
CudaType step;
};
class CudaArangeOp : public ArangeOp {
public:
void Call(Scalar start, Scalar step, const Array& out) override {
Device& device = out.device();
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<T>(ArangeImpl<T>{static_cast<CudaType>(start), static_cast<CudaType>(step)}, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArangeOp, CudaArangeOp);
template <typename T>
struct IdentityImpl {
using CudaType = cuda_internal::DataType<T>;
explicit IdentityImpl(int64_t n) : n_plus_one{n + 1} {}
__device__ void operator()(int64_t i, CudaType& out) { out = i % n_plus_one == 0 ? CudaType{1} : CudaType{0}; }
int64_t n_plus_one;
};
class CudaIdentityOp : public IdentityOp {
public:
void Call(const Array& out) override {
CHAINERX_ASSERT(out.ndim() == 2);
CHAINERX_ASSERT(out.shape()[0] == out.shape()[1]);
Device& device = out.device();
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<T>(IdentityImpl<T>{out.shape()[0]}, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(IdentityOp, CudaIdentityOp);
template <typename T>
struct EyeImpl {
using CudaType = cuda_internal::DataType<T>;
EyeImpl(int64_t m, int64_t k) : start{k < 0 ? -k * m : k}, stop{m * (m - k)}, step{m + 1} {}
__device__ void operator()(int64_t i, CudaType& out) {
out = start <= i && i < stop && (i - start) % step == 0 ? CudaType{1} : CudaType{0};
}
int64_t start;
int64_t stop;
int64_t step;
};
class CudaEyeOp : public EyeOp {
public:
void Call(int64_t k, const Array& out) override {
Device& device = out.device();
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [k, &out](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<T>(EyeImpl<T>{out.shape()[1], k}, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(EyeOp, CudaEyeOp);
template <typename T>
__global__ void SetVecInMat(
IndexableArray<const T, 1> vec_iarray,
IndexableArray<T, 2> mat_iarray,
Indexer<1> vec_indexer,
Indexer<2> mat_indexer,
int64_t mat_row_start,
int64_t mat_col_start) {
auto mat_it = mat_indexer.It(0);
for (auto vec_it = vec_indexer.It(blockIdx.x * blockDim.x + threadIdx.x, blockDim.x * gridDim.x); vec_it; ++vec_it) {
mat_it.index()[0] = mat_row_start + vec_it.raw_index();
mat_it.index()[1] = mat_col_start + vec_it.raw_index();
mat_iarray[mat_it] = vec_iarray[vec_it];
}
}
class CudaDiagflatOp : public DiagflatOp {
public:
void Call(const Array& v, int64_t k, const Array& out) override {
CHAINERX_ASSERT(v.ndim() == 1);
CHAINERX_ASSERT(out.ndim() == 2);
Device& device = v.device();
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
// Start indices for the 2-D array axes with applied offset k.
int64_t row_start{0};
int64_t col_start{0};
if (k >= 0) {
col_start += k;
} else {
row_start -= k;
}
// Initialize all elements to 0 first instead of conditionally filling in the diagonal.
device.Fill(out, T{0});
IndexableArray<const T, 1> v_iarray{v};
IndexableArray<T, 2> out_iarray{out};
Indexer<1> v_indexer{v.shape()};
Indexer<2> out_indexer{out.shape()};
// TODO(niboshi): Calculate kMaxBlockSize per device
std::lock_guard<std::mutex> lock{*cuda_internal::g_mutex};
static const int kMaxBlockSize = CudaOccupancyMaxPotentialBlockSize(&SetVecInMat<T>).block_size;
int64_t total_size = out_indexer.total_size();
int64_t grid_size = (total_size + kMaxBlockSize - 1) / kMaxBlockSize;
int64_t block_size = std::min<int64_t>(total_size, kMaxBlockSize);
hipLaunchKernelGGL(( SetVecInMat), dim3(grid_size), dim3(block_size), 0, 0, v_iarray, out_iarray, v_indexer, out_indexer, row_start, col_start);
});
}
};
CHAINERX_REGISTER_OP_CUDA(DiagflatOp, CudaDiagflatOp);
template <typename T>
struct LinspaceImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t i, CudaType& out) {
double value = n == 1 ? start : (start * (n - 1 - i) + stop * i) / (n - 1);
out = cuda_numeric_cast<CudaType>(value);
}
int64_t n;
double start;
double stop;
};
class CudaLinspaceOp : public LinspaceOp {
public:
void Call(double start, double stop, const Array& out) override {
CHAINERX_ASSERT(out.ndim() == 1);
CHAINERX_ASSERT(out.shape()[0] > 0);
Device& device = out.device();
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
int64_t n = out.shape()[0];
Elementwise<T>(LinspaceImpl<T>{n, start, stop}, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(LinspaceOp, CudaLinspaceOp);
template <typename T>
struct FillImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType& out) { out = value; }
CudaType value;
};
} // namespace
void CudaDevice::Fill(const Array& out, Scalar value) {
CudaSetDeviceScope scope{index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<T>(FillImpl<T>{static_cast<CudaType>(value)}, out);
});
}
} // namespace cuda
} // namespace chainerx
| df4e6a1b5cc469b18f06863673b06ef5437f624b.cu | #include "chainerx/cuda/cuda_device.h"
#include <algorithm>
#include <cstdint>
#include <mutex>
#include <cuda_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cast.cuh"
#include "chainerx/cuda/cuda.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/op_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/indexable_array.h"
#include "chainerx/indexer.h"
#include "chainerx/macro.h"
#include "chainerx/routines/creation.h"
#include "chainerx/scalar.h"
#include "chainerx/shape.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct ArangeImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t i, CudaType& out) { out = start + step * static_cast<CudaType>(i); }
CudaType start;
CudaType step;
};
class CudaArangeOp : public ArangeOp {
public:
void Call(Scalar start, Scalar step, const Array& out) override {
Device& device = out.device();
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<T>(ArangeImpl<T>{static_cast<CudaType>(start), static_cast<CudaType>(step)}, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArangeOp, CudaArangeOp);
template <typename T>
struct IdentityImpl {
using CudaType = cuda_internal::DataType<T>;
explicit IdentityImpl(int64_t n) : n_plus_one{n + 1} {}
__device__ void operator()(int64_t i, CudaType& out) { out = i % n_plus_one == 0 ? CudaType{1} : CudaType{0}; }
int64_t n_plus_one;
};
class CudaIdentityOp : public IdentityOp {
public:
void Call(const Array& out) override {
CHAINERX_ASSERT(out.ndim() == 2);
CHAINERX_ASSERT(out.shape()[0] == out.shape()[1]);
Device& device = out.device();
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<T>(IdentityImpl<T>{out.shape()[0]}, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(IdentityOp, CudaIdentityOp);
template <typename T>
struct EyeImpl {
using CudaType = cuda_internal::DataType<T>;
EyeImpl(int64_t m, int64_t k) : start{k < 0 ? -k * m : k}, stop{m * (m - k)}, step{m + 1} {}
__device__ void operator()(int64_t i, CudaType& out) {
out = start <= i && i < stop && (i - start) % step == 0 ? CudaType{1} : CudaType{0};
}
int64_t start;
int64_t stop;
int64_t step;
};
class CudaEyeOp : public EyeOp {
public:
void Call(int64_t k, const Array& out) override {
Device& device = out.device();
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [k, &out](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<T>(EyeImpl<T>{out.shape()[1], k}, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(EyeOp, CudaEyeOp);
template <typename T>
__global__ void SetVecInMat(
IndexableArray<const T, 1> vec_iarray,
IndexableArray<T, 2> mat_iarray,
Indexer<1> vec_indexer,
Indexer<2> mat_indexer,
int64_t mat_row_start,
int64_t mat_col_start) {
auto mat_it = mat_indexer.It(0);
for (auto vec_it = vec_indexer.It(blockIdx.x * blockDim.x + threadIdx.x, blockDim.x * gridDim.x); vec_it; ++vec_it) {
mat_it.index()[0] = mat_row_start + vec_it.raw_index();
mat_it.index()[1] = mat_col_start + vec_it.raw_index();
mat_iarray[mat_it] = vec_iarray[vec_it];
}
}
class CudaDiagflatOp : public DiagflatOp {
public:
void Call(const Array& v, int64_t k, const Array& out) override {
CHAINERX_ASSERT(v.ndim() == 1);
CHAINERX_ASSERT(out.ndim() == 2);
Device& device = v.device();
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
// Start indices for the 2-D array axes with applied offset k.
int64_t row_start{0};
int64_t col_start{0};
if (k >= 0) {
col_start += k;
} else {
row_start -= k;
}
// Initialize all elements to 0 first instead of conditionally filling in the diagonal.
device.Fill(out, T{0});
IndexableArray<const T, 1> v_iarray{v};
IndexableArray<T, 2> out_iarray{out};
Indexer<1> v_indexer{v.shape()};
Indexer<2> out_indexer{out.shape()};
// TODO(niboshi): Calculate kMaxBlockSize per device
std::lock_guard<std::mutex> lock{*cuda_internal::g_mutex};
static const int kMaxBlockSize = CudaOccupancyMaxPotentialBlockSize(&SetVecInMat<T>).block_size;
int64_t total_size = out_indexer.total_size();
int64_t grid_size = (total_size + kMaxBlockSize - 1) / kMaxBlockSize;
int64_t block_size = std::min<int64_t>(total_size, kMaxBlockSize);
SetVecInMat<<<grid_size, block_size>>>(v_iarray, out_iarray, v_indexer, out_indexer, row_start, col_start);
});
}
};
CHAINERX_REGISTER_OP_CUDA(DiagflatOp, CudaDiagflatOp);
template <typename T>
struct LinspaceImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t i, CudaType& out) {
double value = n == 1 ? start : (start * (n - 1 - i) + stop * i) / (n - 1);
out = cuda_numeric_cast<CudaType>(value);
}
int64_t n;
double start;
double stop;
};
class CudaLinspaceOp : public LinspaceOp {
public:
void Call(double start, double stop, const Array& out) override {
CHAINERX_ASSERT(out.ndim() == 1);
CHAINERX_ASSERT(out.shape()[0] > 0);
Device& device = out.device();
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
int64_t n = out.shape()[0];
Elementwise<T>(LinspaceImpl<T>{n, start, stop}, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(LinspaceOp, CudaLinspaceOp);
template <typename T>
struct FillImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType& out) { out = value; }
CudaType value;
};
} // namespace
void CudaDevice::Fill(const Array& out, Scalar value) {
CudaSetDeviceScope scope{index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<T>(FillImpl<T>{static_cast<CudaType>(value)}, out);
});
}
} // namespace cuda
} // namespace chainerx
|
960fe0a33783e6a43280967e415ddacbb6f498a3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_stencil37_hack2_cp_cols.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
double *shared_rows = NULL;
hipMalloc(&shared_rows, XSIZE*YSIZE);
double *shared_cols = NULL;
hipMalloc(&shared_cols, XSIZE*YSIZE);
double *shared_slices = NULL;
hipMalloc(&shared_slices, XSIZE*YSIZE);
int d_xpitch = 2;
int d_ypitch = 2;
int d_zpitch = 2;
int s_xpitch = 2;
int s_ypitch = 2;
int s_zpitch = 2;
int n_rows = 1;
int n_cols = 1;
int n_slices = 1;
int tile_x = 1;
int tile_y = 1;
int tile_z = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_stencil37_hack2_cp_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,shared_rows,shared_cols,shared_slices,d_xpitch,d_ypitch,d_zpitch,s_xpitch,s_ypitch,s_zpitch,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_stencil37_hack2_cp_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,shared_rows,shared_cols,shared_slices,d_xpitch,d_ypitch,d_zpitch,s_xpitch,s_ypitch,s_zpitch,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_stencil37_hack2_cp_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,shared_rows,shared_cols,shared_slices,d_xpitch,d_ypitch,d_zpitch,s_xpitch,s_ypitch,s_zpitch,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 960fe0a33783e6a43280967e415ddacbb6f498a3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_stencil37_hack2_cp_cols.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
double *shared_rows = NULL;
cudaMalloc(&shared_rows, XSIZE*YSIZE);
double *shared_cols = NULL;
cudaMalloc(&shared_cols, XSIZE*YSIZE);
double *shared_slices = NULL;
cudaMalloc(&shared_slices, XSIZE*YSIZE);
int d_xpitch = 2;
int d_ypitch = 2;
int d_zpitch = 2;
int s_xpitch = 2;
int s_ypitch = 2;
int s_zpitch = 2;
int n_rows = 1;
int n_cols = 1;
int n_slices = 1;
int tile_x = 1;
int tile_y = 1;
int tile_z = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_stencil37_hack2_cp_cols<<<gridBlock,threadBlock>>>(dst,shared_rows,shared_cols,shared_slices,d_xpitch,d_ypitch,d_zpitch,s_xpitch,s_ypitch,s_zpitch,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_stencil37_hack2_cp_cols<<<gridBlock,threadBlock>>>(dst,shared_rows,shared_cols,shared_slices,d_xpitch,d_ypitch,d_zpitch,s_xpitch,s_ypitch,s_zpitch,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_stencil37_hack2_cp_cols<<<gridBlock,threadBlock>>>(dst,shared_rows,shared_cols,shared_slices,d_xpitch,d_ypitch,d_zpitch,s_xpitch,s_ypitch,s_zpitch,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1cab774c08c54472a73c3bc8f64e2d6703a1fffa.hip | // !!! This is a file automatically generated by hipify!!!
//======================================
//
//
// GPU
//======================================
#include"stdafx.h"
#include"Reshape_DATA.hpp"
#include"Reshape_FUNC.hpp"
#include"Reshape_Base.h"
#include"Reshape_GPU.cuh"
#include"Reshape_LayerData_GPU.cuh"
using namespace Gravisbell;
using namespace Gravisbell::Layer::NeuralNetwork;
namespace Gravisbell {
namespace Layer {
namespace NeuralNetwork {
/** */
Reshape_GPU::Reshape_GPU(Gravisbell::GUID guid, Reshape_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager)
: Reshape_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1))
, layerData (i_layerData) /**< */
{
}
/** */
Reshape_GPU::~Reshape_GPU()
{
}
//================================
//
//================================
/** */
U32 Reshape_GPU::GetLayerKind()const
{
return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase();
}
/** .
@return 0 */
ErrorCode Reshape_GPU::Initialize(void)
{
return this->layerData.Initialize();
}
//===========================
//
//===========================
/** */
Reshape_LayerData_Base& Reshape_GPU::GetLayerData()
{
return this->layerData;
}
const Reshape_LayerData_Base& Reshape_GPU::GetLayerData()const
{
return this->layerData;
}
//================================
//
//================================
/** .()
@param batchSize .
NN.
PreProcessLearnLoop. */
ErrorCode Reshape_GPU::PreProcessLearn()
{
ErrorCode errorCode = this->PreProcessCalculate();
if(errorCode != ErrorCode::ERROR_CODE_NONE)
return errorCode;
return ErrorCode::ERROR_CODE_NONE;
}
/** .()
@param batchSize .
NN.
Calculate. */
ErrorCode Reshape_GPU::PreProcessCalculate()
{
return ErrorCode::ERROR_CODE_NONE;
}
/** .
Calculate. */
ErrorCode Reshape_GPU::PreProcessLoop()
{
return ErrorCode::ERROR_CODE_NONE;
}
/** .
@param lpInputBuffer . GetInputBufferCount
@return 0 */
ErrorCode Reshape_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer)
{
hipMemcpy(o_lppOutputBuffer, i_lppInputBuffer, sizeof(F32)*this->GetInputBufferCount()*this->GetBatchSize(), hipMemcpyDeviceToDevice);
return ErrorCode::ERROR_CODE_NONE;
}
//================================
//
//================================
/** ..
Calculate.
@param o_lppDInputBuffer . [GetBatchSize()][GetInputBufferCount()].
@param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()][GetOutputDataCount()]
*/
ErrorCode Reshape_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
//
if(o_lppDInputBuffer)
{
hipMemcpy(
o_lppDInputBuffer,
i_lppDOutputBuffer,
sizeof(F32)*this->GetInputBufferCount()*this->GetBatchSize(),
hipMemcpyDeviceToDevice);
}
return ErrorCode::ERROR_CODE_NONE;
}
/** .
Calculate.
@param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()].
*/
ErrorCode Reshape_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer);
}
} // Gravisbell;
} // Layer;
} // NeuralNetwork;
| 1cab774c08c54472a73c3bc8f64e2d6703a1fffa.cu | //======================================
// フィードフォワードニューラルネットワークの統合処理レイヤー
// 結合、活性化
// GPU処理用
//======================================
#include"stdafx.h"
#include"Reshape_DATA.hpp"
#include"Reshape_FUNC.hpp"
#include"Reshape_Base.h"
#include"Reshape_GPU.cuh"
#include"Reshape_LayerData_GPU.cuh"
using namespace Gravisbell;
using namespace Gravisbell::Layer::NeuralNetwork;
namespace Gravisbell {
namespace Layer {
namespace NeuralNetwork {
/** コンストラクタ */
Reshape_GPU::Reshape_GPU(Gravisbell::GUID guid, Reshape_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager)
: Reshape_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1))
, layerData (i_layerData) /**< レイヤーデータ */
{
}
/** デストラクタ */
Reshape_GPU::~Reshape_GPU()
{
}
//================================
// 基本処理
//================================
/** レイヤー種別の取得 */
U32 Reshape_GPU::GetLayerKind()const
{
return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase();
}
/** 初期化. 各ニューロンの値をランダムに初期化
@return 成功した場合0 */
ErrorCode Reshape_GPU::Initialize(void)
{
return this->layerData.Initialize();
}
//===========================
// レイヤーデータ関連
//===========================
/** レイヤーデータを取得する */
Reshape_LayerData_Base& Reshape_GPU::GetLayerData()
{
return this->layerData;
}
const Reshape_LayerData_Base& Reshape_GPU::GetLayerData()const
{
return this->layerData;
}
//================================
// 演算処理
//================================
/** 演算前処理を実行する.(学習用)
@param batchSize 同時に演算を行うバッチのサイズ.
NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない.
失敗した場合はPreProcessLearnLoop以降の処理は実行不可. */
ErrorCode Reshape_GPU::PreProcessLearn()
{
ErrorCode errorCode = this->PreProcessCalculate();
if(errorCode != ErrorCode::ERROR_CODE_NONE)
return errorCode;
return ErrorCode::ERROR_CODE_NONE;
}
/** 演算前処理を実行する.(演算用)
@param batchSize 同時に演算を行うバッチのサイズ.
NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない.
失敗した場合はCalculate以降の処理は実行不可. */
ErrorCode Reshape_GPU::PreProcessCalculate()
{
return ErrorCode::ERROR_CODE_NONE;
}
/** ループの初期化処理.データセットの実行開始前に実行する
失敗した場合はCalculate以降の処理は実行不可. */
ErrorCode Reshape_GPU::PreProcessLoop()
{
return ErrorCode::ERROR_CODE_NONE;
}
/** 演算処理を実行する.
@param lpInputBuffer 入力データバッファ. GetInputBufferCountで取得した値の要素数が必要
@return 成功した場合0が返る */
ErrorCode Reshape_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer)
{
cudaMemcpy(o_lppOutputBuffer, i_lppInputBuffer, sizeof(F32)*this->GetInputBufferCount()*this->GetBatchSize(), cudaMemcpyDeviceToDevice);
return ErrorCode::ERROR_CODE_NONE;
}
//================================
// 学習処理
//================================
/** 入力誤差計算をを実行する.学習せずに入力誤差を取得したい場合に使用する.
入力信号、出力信号は直前のCalculateの値を参照する.
@param o_lppDInputBuffer 入力誤差差分格納先レイヤー. [GetBatchSize()の戻り値][GetInputBufferCount()の戻り値]の要素数が必要.
@param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要な配列の[GetOutputDataCount()]配列
直前の計算結果を使用する */
ErrorCode Reshape_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
// 入力誤差計算
if(o_lppDInputBuffer)
{
cudaMemcpy(
o_lppDInputBuffer,
i_lppDOutputBuffer,
sizeof(F32)*this->GetInputBufferCount()*this->GetBatchSize(),
cudaMemcpyDeviceToDevice);
}
return ErrorCode::ERROR_CODE_NONE;
}
/** 学習処理を実行する.
入力信号、出力信号は直前のCalculateの値を参照する.
@param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要.
直前の計算結果を使用する */
ErrorCode Reshape_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer);
}
} // Gravisbell;
} // Layer;
} // NeuralNetwork;
|
be14a1124f84a596c7512b00acf399a59f0310e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "optimizer.h"
#include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
LegionRuntime::Logger::Category log_optimizer("optimizer");
__global__
void sgd_update(int count, float lr, float weight_decay,
float momentum, bool nesterov,
const float* WGrad, float* V, float* W)
{
// Refernce https://pytorch.org/docs/stable/_modules/torch/optim/sgd.html#SGD
CUDA_KERNEL_LOOP(i, count)
{
float gt = WGrad[i] + weight_decay * W[i];
if (momentum > 0.0f) {
V[i] = V[i] * momentum + gt;
if (nesterov)
gt = gt + momentum * V[i];
else
gt = V[i];
}
W[i] -= lr * gt;
}
}
__host__
void SGDOptimizer::update_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
const SGDOptimizer* op = (SGDOptimizer*) task->args;
if (op->momentum > 0.0f) {
assert(regions.size() == 3);
assert(task->regions.size() == 3);
} else {
assert(regions.size() == 2);
assert(task->regions.size() == 2);
}
Domain domain = runtime->get_index_space_domain(ctx,
task->regions[1].region.get_index_space());
const float *w_grad_ptr = NULL;
float *w_ptr = NULL, *v_ptr = NULL;
size_t size = 0, num_replicas = 0;
switch(domain.get_dim()) {
case 1:
{
TensorAccessorR<float, 1> accWGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 1> accW(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
for (int i = 0; i < domain.get_dim()-1; i++) {
assert(accW.rect.lo[i] == accWGrad.rect.lo[i]);
assert(accW.rect.hi[i] == accWGrad.rect.hi[i]);
}
size = accW.rect.volume();
assert(accWGrad.rect.volume() % accW.rect.volume() == 0);
num_replicas = accWGrad.rect.volume() / accW.rect.volume();
w_grad_ptr = accWGrad.ptr;
w_ptr = accW.ptr;
if (op->momentum > 0.0f) {
TensorAccessorW<float, 1> accV(
regions[2], task->regions[2], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(accW.rect == accV.rect);
v_ptr = accV.ptr;
}
break;
}
case 2:
{
TensorAccessorR<float, 2> accWGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> accW(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
for (int i = 0; i < domain.get_dim()-1; i++) {
assert(accW.rect.lo[i] == accWGrad.rect.lo[i]);
assert(accW.rect.hi[i] == accWGrad.rect.hi[i]);
}
size = accW.rect.volume();
assert(accWGrad.rect.volume() % accW.rect.volume() == 0);
num_replicas = accWGrad.rect.volume() / accW.rect.volume();
w_grad_ptr = accWGrad.ptr;
w_ptr = accW.ptr;
if (op->momentum > 0.0f) {
TensorAccessorW<float, 2> accV(
regions[2], task->regions[2], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(accW.rect == accV.rect);
v_ptr = accV.ptr;
}
break;
}
default:
{
// Unsupported dims
assert(false);
}
}
// Step 1: gather gradients in the first replica
for (int i = 1; i < num_replicas; i++) {
const float* src = w_grad_ptr + i * size;
hipLaunchKernelGGL(( apply_add_with_scale), dim3(GET_BLOCKS(size)), dim3(CUDA_NUM_THREADS), 0, 0,
(float*) w_grad_ptr, src, size, 1.0f);
}
// Step 2: SGD update
hipLaunchKernelGGL(( sgd_update), dim3(GET_BLOCKS(size)), dim3(CUDA_NUM_THREADS), 0, 0,
size, op->lr, op->weight_decay, op->momentum, op->nesterov,
w_grad_ptr, v_ptr, w_ptr);
checkCUDA(hipDeviceSynchronize());
}
| be14a1124f84a596c7512b00acf399a59f0310e5.cu | /* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "optimizer.h"
#include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
LegionRuntime::Logger::Category log_optimizer("optimizer");
__global__
void sgd_update(int count, float lr, float weight_decay,
float momentum, bool nesterov,
const float* WGrad, float* V, float* W)
{
// Refernce https://pytorch.org/docs/stable/_modules/torch/optim/sgd.html#SGD
CUDA_KERNEL_LOOP(i, count)
{
float gt = WGrad[i] + weight_decay * W[i];
if (momentum > 0.0f) {
V[i] = V[i] * momentum + gt;
if (nesterov)
gt = gt + momentum * V[i];
else
gt = V[i];
}
W[i] -= lr * gt;
}
}
__host__
void SGDOptimizer::update_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
const SGDOptimizer* op = (SGDOptimizer*) task->args;
if (op->momentum > 0.0f) {
assert(regions.size() == 3);
assert(task->regions.size() == 3);
} else {
assert(regions.size() == 2);
assert(task->regions.size() == 2);
}
Domain domain = runtime->get_index_space_domain(ctx,
task->regions[1].region.get_index_space());
const float *w_grad_ptr = NULL;
float *w_ptr = NULL, *v_ptr = NULL;
size_t size = 0, num_replicas = 0;
switch(domain.get_dim()) {
case 1:
{
TensorAccessorR<float, 1> accWGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 1> accW(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
for (int i = 0; i < domain.get_dim()-1; i++) {
assert(accW.rect.lo[i] == accWGrad.rect.lo[i]);
assert(accW.rect.hi[i] == accWGrad.rect.hi[i]);
}
size = accW.rect.volume();
assert(accWGrad.rect.volume() % accW.rect.volume() == 0);
num_replicas = accWGrad.rect.volume() / accW.rect.volume();
w_grad_ptr = accWGrad.ptr;
w_ptr = accW.ptr;
if (op->momentum > 0.0f) {
TensorAccessorW<float, 1> accV(
regions[2], task->regions[2], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(accW.rect == accV.rect);
v_ptr = accV.ptr;
}
break;
}
case 2:
{
TensorAccessorR<float, 2> accWGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> accW(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
for (int i = 0; i < domain.get_dim()-1; i++) {
assert(accW.rect.lo[i] == accWGrad.rect.lo[i]);
assert(accW.rect.hi[i] == accWGrad.rect.hi[i]);
}
size = accW.rect.volume();
assert(accWGrad.rect.volume() % accW.rect.volume() == 0);
num_replicas = accWGrad.rect.volume() / accW.rect.volume();
w_grad_ptr = accWGrad.ptr;
w_ptr = accW.ptr;
if (op->momentum > 0.0f) {
TensorAccessorW<float, 2> accV(
regions[2], task->regions[2], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(accW.rect == accV.rect);
v_ptr = accV.ptr;
}
break;
}
default:
{
// Unsupported dims
assert(false);
}
}
// Step 1: gather gradients in the first replica
for (int i = 1; i < num_replicas; i++) {
const float* src = w_grad_ptr + i * size;
apply_add_with_scale<<<GET_BLOCKS(size), CUDA_NUM_THREADS>>>(
(float*) w_grad_ptr, src, size, 1.0f);
}
// Step 2: SGD update
sgd_update<<<GET_BLOCKS(size), CUDA_NUM_THREADS>>>(
size, op->lr, op->weight_decay, op->momentum, op->nesterov,
w_grad_ptr, v_ptr, w_ptr);
checkCUDA(cudaDeviceSynchronize());
}
|
58f2a255b381a5357fd313707c385e1283bac13b.hip | // !!! This is a file automatically generated by hipify!!!
/***
* /file
*
* Validation test for Laplacian matrix.
*
* solve - u" = f with Dirichlet boundary conditions
* Numerical solution is confronted with analytical one.
*
*/
#include<iostream>
#include<fstream>
#include<sstream>
#include<string>
#include<vector>
#include<mpi.h>
#define USE_PRECONDITIONER
#define USE_MODE_MATRIX
#define MPI_NODE_PER_EDGE 16
#define EXACT_SOLUTION_NO 5
#include"../analytical_solutions.hpp"
#include<sipg_sem_2d_multigpu.hpp>
#include<iomanip>
#include<CUDA_TIMER.hpp>
int main(int argc, char** argv)
{
MPI_Init(&argc, &argv);
int pid, nprocs;
MPI_Comm CartComm;
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
const double toll = 1e-14;
if(pid == 0)
{
std::cerr<<"EXACT_SOLUTION_NO "<<EXACT_SOLUTION_NO<<std::endl;
std::cerr<<"MPI_NODE_PER_EDGE "<<MPI_NODE_PER_EDGE<<std::endl;
#ifdef USE_MODE_MATRIX
std::cout<<"USE_MODE_MATRIX is ON"<<std::endl;
#endif
#ifdef USE_PRECONDITIONER
std::cout<<"USE_PRECONDITIONER is ON"<<std::endl;
#endif
std::cout<<"toll: "<<toll<<std::endl;
}
int dims[3] = {MPI_NODE_PER_EDGE, MPI_NODE_PER_EDGE, 1};
int period[3] = {0, 0, 0};
MPI_Cart_create(MPI_COMM_WORLD, 3, dims, period, false, &CartComm);
int coords[3] = {0, 0, 0};
MPI_Cart_get(CartComm, 3, dims, period, coords);
// int degree = 2;
for (int degree = 2; degree < 4; ++degree)
{
double L2_err_old(0), H1_err_old(0);
// int dim = 8;
for (int dim = 32; dim < 257; dim*=2)
{
CUDA_TIMER t;
using namespace test_func;
square_mesh_multigpu<double> sq_mesh( dim, MPI_NODE_PER_EDGE, coords[0], coords[1] );
if (pid == 0) t.start();
sipg_sem_2d_multigpu<double> p(CartComm, degree, sq_mesh, f, u_ex, dx_u_ex, dy_u_ex, toll);
if (pid == 0) t.stop();
if(pid == 0)
{
std::cerr<<MPI_NODE_PER_EDGE*dim<<"\t"<<degree<<"\t";
std::cerr<<std::setw(12)<<log(p.H1_err/H1_err_old)/log(2)<<"\t";
std::cerr<<std::setw(12)<<p.H1_err<<"\t";
std::cerr<<std::setw(12)<<log(p.L2_err/L2_err_old)/log(2)<<"\t";
std::cerr<<std::setw(12)<<p.L2_err<<"\t";
std::cerr<<t.elapsed_millisecs();
std::cerr<<"\t"<<p.iterations;
std::cerr<<std::endl;
L2_err_old = p.L2_err;
H1_err_old = p.H1_err;
}
sq_mesh.device_info.free();
}
if (pid == 0) std::cerr<<std::endl;
}
#if 0
hipError_t error = hipGetLastError();
std::string lastError = hipGetErrorString(error);
std::cout<<lastError<<std::endl;
#endif
MPI_Finalize();
return 0;
}
| 58f2a255b381a5357fd313707c385e1283bac13b.cu | /***
* /file
*
* Validation test for Laplacian matrix.
*
* solve - u" = f with Dirichlet boundary conditions
* Numerical solution is confronted with analytical one.
*
*/
#include<iostream>
#include<fstream>
#include<sstream>
#include<string>
#include<vector>
#include<mpi.h>
#define USE_PRECONDITIONER
#define USE_MODE_MATRIX
#define MPI_NODE_PER_EDGE 16
#define EXACT_SOLUTION_NO 5
#include"../analytical_solutions.hpp"
#include<sipg_sem_2d_multigpu.hpp>
#include<iomanip>
#include<CUDA_TIMER.hpp>
int main(int argc, char** argv)
{
MPI_Init(&argc, &argv);
int pid, nprocs;
MPI_Comm CartComm;
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
const double toll = 1e-14;
if(pid == 0)
{
std::cerr<<"EXACT_SOLUTION_NO "<<EXACT_SOLUTION_NO<<std::endl;
std::cerr<<"MPI_NODE_PER_EDGE "<<MPI_NODE_PER_EDGE<<std::endl;
#ifdef USE_MODE_MATRIX
std::cout<<"USE_MODE_MATRIX is ON"<<std::endl;
#endif
#ifdef USE_PRECONDITIONER
std::cout<<"USE_PRECONDITIONER is ON"<<std::endl;
#endif
std::cout<<"toll: "<<toll<<std::endl;
}
int dims[3] = {MPI_NODE_PER_EDGE, MPI_NODE_PER_EDGE, 1};
int period[3] = {0, 0, 0};
MPI_Cart_create(MPI_COMM_WORLD, 3, dims, period, false, &CartComm);
int coords[3] = {0, 0, 0};
MPI_Cart_get(CartComm, 3, dims, period, coords);
// int degree = 2;
for (int degree = 2; degree < 4; ++degree)
{
double L2_err_old(0), H1_err_old(0);
// int dim = 8;
for (int dim = 32; dim < 257; dim*=2)
{
CUDA_TIMER t;
using namespace test_func;
square_mesh_multigpu<double> sq_mesh( dim, MPI_NODE_PER_EDGE, coords[0], coords[1] );
if (pid == 0) t.start();
sipg_sem_2d_multigpu<double> p(CartComm, degree, sq_mesh, f, u_ex, dx_u_ex, dy_u_ex, toll);
if (pid == 0) t.stop();
if(pid == 0)
{
std::cerr<<MPI_NODE_PER_EDGE*dim<<"\t"<<degree<<"\t";
std::cerr<<std::setw(12)<<log(p.H1_err/H1_err_old)/log(2)<<"\t";
std::cerr<<std::setw(12)<<p.H1_err<<"\t";
std::cerr<<std::setw(12)<<log(p.L2_err/L2_err_old)/log(2)<<"\t";
std::cerr<<std::setw(12)<<p.L2_err<<"\t";
std::cerr<<t.elapsed_millisecs();
std::cerr<<"\t"<<p.iterations;
std::cerr<<std::endl;
L2_err_old = p.L2_err;
H1_err_old = p.H1_err;
}
sq_mesh.device_info.free();
}
if (pid == 0) std::cerr<<std::endl;
}
#if 0
cudaError_t error = cudaGetLastError();
std::string lastError = cudaGetErrorString(error);
std::cout<<lastError<<std::endl;
#endif
MPI_Finalize();
return 0;
}
|
820f54c7729a00760c79ea8eef7e8d47c7c977c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "simulation_gpu.h"
void cuda_failure(std::string stmt, int ce) {
std::cerr << "CUDA Failure: [" << ce << "] in line: " << stmt << std::endl;
}
CudaPRNG::CudaPRNG(unsigned long long int seed, int size_)
: size(size_) {
CUDA_HANDLERR(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
CUDA_HANDLERR(hiprandSetPseudoRandomGeneratorSeed(gen, seed));
CUDA_HANDLERR(hipMalloc((void **) &where, size_ * sizeof(float)));
}
void CudaPRNG::generate() {
CUDA_HANDLERR(hiprandGenerateUniform(gen, where, size));
}
Simple2DIsingParamsDevice::Simple2DIsingParamsDevice(const Simple2DIsingParams ¶ms) {
// Copy constructor from CPU version of params
spins = CudaDevice::copy_array_to_device<int>(
params.initial_spins.size(),
&(params.initial_spins[0])
);
n = (int) params.initial_spins.size();
beta = (float) calc_beta(params.temperature);
magnetic_moment = (float) params.magnetic_moment;
xlen = (int) params.xlen;
ylen = (int) params.ylen;
interaction = (float) params.interaction;
external_field = (float) params.external_field;
}
// These kernels are direct analogues of methods in the CPU simulation
__device__
float get_spin_energy_simple(int i,
const struct Simple2DIsingParamsDevice &dev) {
// TODO: generalise over dimensions
float res = 0;
float spin = dev.spins[i];
res += -dev.magnetic_moment *
dev.external_field * spin;
int xlen = dev.xlen;
int ylen = dev.ylen;
int x = i % xlen;
int y = i / xlen;
int left = (x - 1) % xlen + y * xlen;
int right = (x + 1) % xlen + y * xlen;
int top = x + (y - 1) % ylen * xlen;
int down = x + (y + 1) % ylen * xlen;
res += -dev.interaction * spin * dev.spins[left];
res += -dev.interaction * spin * dev.spins[right];
res += -dev.interaction * spin * dev.spins[top];
res += -dev.interaction * spin * dev.spins[down];
return res;
}
__device__
float get_spin_flip_prob(float energy, float beta) {
return exp(-beta * energy);
}
__device__
void
flip_one_spin_stochastically(struct Simple2DIsingParamsDevice &dev,
size_t i,
float *random_floats) {
// \Delta E = E_after_flip - E_now = -spin_energy_now - spin_energy_now
float delta_energy = -2 * get_spin_energy_simple(i, dev);
if (delta_energy < 0 ||
random_floats[i] < get_spin_flip_prob(delta_energy, dev.beta)) {
dev.spins[i] = -dev.spins[i];
}
}
__global__
void
flip_spins_stochastically(struct Simple2DIsingParamsDevice dev,
int offset,
float *random_floats) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int x = i % dev.xlen;
int y = i / dev.xlen;
int is_odd_position = (x + y + offset) % 2;
if (i < dev.n && is_odd_position) {
flip_one_spin_stochastically(dev, i, random_floats);
}
}
void GPUSimple2DIsingModel::run(size_t max_steps) {
for (size_t i = 0; i < max_steps; i++) {
execute_simulation_step_on_gpu();
step_count++;
}
read_spins_from_gpu();
}
void GPUSimple2DIsingModel::execute_simulation_step_on_gpu() {
prng.generate();
int blocks = (dev.n + 255) / 256;
int THREADS_PER_BLOCK = 256;
hipLaunchKernelGGL(( flip_spins_stochastically), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, dev, 0,
prng.where);
hipLaunchKernelGGL(( flip_spins_stochastically), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, dev, 1,
prng.where);
}
void GPUSimple2DIsingModel::read_spins_from_gpu() {
CudaDevice::copy_array_from_device<int>(dev.n, dev.spins, &(spins[0]));
}
template<typename DevType, typename HostType>
DevType *CudaDevice::copy_array_to_device_with_conversion(
int size, const HostType *from) {
DevType *conv = (DevType *) malloc(sizeof(DevType) * size); // TODO errhandle
for (size_t i = 0; i < size; i++) {
conv[i] = (DevType) from[i];
}
CudaDevice::copy_array_to_device(size, conv);
free(conv);
}
template<typename DevType>
DevType *CudaDevice::copy_array_to_device(int size, const DevType *from) {
DevType *to;
CUDA_HANDLERR(hipMalloc(&to, size * sizeof(DevType)));
CUDA_HANDLERR(hipMemcpy(to, from, size * sizeof(DevType), hipMemcpyHostToDevice));
return to;
}
template float *
CudaDevice::copy_array_to_device<float>(int size, const float *from);
template<typename DevType, typename HostType>
void CudaDevice::copy_array_from_device_with_conversion(
int size, const DevType *from, HostType *to) {
DevType *conv = (DevType *) malloc(sizeof(DevType) * size);
CudaDevice::copy_array_from_device(size, from, conv);
if (conv != nullptr) {
for (size_t i = 0; i < size; i++) {
to[i] = (HostType) conv[i];
}
}
free(conv);
}
template<typename T>
void CudaDevice::copy_array_from_device(int size, const T *from, T *to) {
CUDA_HANDLERR(hipMemcpy(to, from, size * sizeof(T), hipMemcpyDeviceToHost));
}
template void
CudaDevice::copy_array_from_device<float>(int, const float *, float *);
void CudaDevice::free_array_from_device(void *arr) {
CUDA_HANDLERR(hipFree(arr));
}
| 820f54c7729a00760c79ea8eef7e8d47c7c977c3.cu | #include "simulation_gpu.h"
void cuda_failure(std::string stmt, int ce) {
std::cerr << "CUDA Failure: [" << ce << "] in line: " << stmt << std::endl;
}
CudaPRNG::CudaPRNG(unsigned long long int seed, int size_)
: size(size_) {
CUDA_HANDLERR(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
CUDA_HANDLERR(curandSetPseudoRandomGeneratorSeed(gen, seed));
CUDA_HANDLERR(cudaMalloc((void **) &where, size_ * sizeof(float)));
}
void CudaPRNG::generate() {
CUDA_HANDLERR(curandGenerateUniform(gen, where, size));
}
Simple2DIsingParamsDevice::Simple2DIsingParamsDevice(const Simple2DIsingParams ¶ms) {
// Copy constructor from CPU version of params
spins = CudaDevice::copy_array_to_device<int>(
params.initial_spins.size(),
&(params.initial_spins[0])
);
n = (int) params.initial_spins.size();
beta = (float) calc_beta(params.temperature);
magnetic_moment = (float) params.magnetic_moment;
xlen = (int) params.xlen;
ylen = (int) params.ylen;
interaction = (float) params.interaction;
external_field = (float) params.external_field;
}
// These kernels are direct analogues of methods in the CPU simulation
__device__
float get_spin_energy_simple(int i,
const struct Simple2DIsingParamsDevice &dev) {
// TODO: generalise over dimensions
float res = 0;
float spin = dev.spins[i];
res += -dev.magnetic_moment *
dev.external_field * spin;
int xlen = dev.xlen;
int ylen = dev.ylen;
int x = i % xlen;
int y = i / xlen;
int left = (x - 1) % xlen + y * xlen;
int right = (x + 1) % xlen + y * xlen;
int top = x + (y - 1) % ylen * xlen;
int down = x + (y + 1) % ylen * xlen;
res += -dev.interaction * spin * dev.spins[left];
res += -dev.interaction * spin * dev.spins[right];
res += -dev.interaction * spin * dev.spins[top];
res += -dev.interaction * spin * dev.spins[down];
return res;
}
__device__
float get_spin_flip_prob(float energy, float beta) {
return exp(-beta * energy);
}
__device__
void
flip_one_spin_stochastically(struct Simple2DIsingParamsDevice &dev,
size_t i,
float *random_floats) {
// \Delta E = E_after_flip - E_now = -spin_energy_now - spin_energy_now
float delta_energy = -2 * get_spin_energy_simple(i, dev);
if (delta_energy < 0 ||
random_floats[i] < get_spin_flip_prob(delta_energy, dev.beta)) {
dev.spins[i] = -dev.spins[i];
}
}
__global__
void
flip_spins_stochastically(struct Simple2DIsingParamsDevice dev,
int offset,
float *random_floats) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int x = i % dev.xlen;
int y = i / dev.xlen;
int is_odd_position = (x + y + offset) % 2;
if (i < dev.n && is_odd_position) {
flip_one_spin_stochastically(dev, i, random_floats);
}
}
void GPUSimple2DIsingModel::run(size_t max_steps) {
for (size_t i = 0; i < max_steps; i++) {
execute_simulation_step_on_gpu();
step_count++;
}
read_spins_from_gpu();
}
void GPUSimple2DIsingModel::execute_simulation_step_on_gpu() {
prng.generate();
int blocks = (dev.n + 255) / 256;
int THREADS_PER_BLOCK = 256;
flip_spins_stochastically<<<blocks, THREADS_PER_BLOCK>>>(dev, 0,
prng.where);
flip_spins_stochastically<<<blocks, THREADS_PER_BLOCK>>>(dev, 1,
prng.where);
}
void GPUSimple2DIsingModel::read_spins_from_gpu() {
CudaDevice::copy_array_from_device<int>(dev.n, dev.spins, &(spins[0]));
}
template<typename DevType, typename HostType>
DevType *CudaDevice::copy_array_to_device_with_conversion(
int size, const HostType *from) {
DevType *conv = (DevType *) malloc(sizeof(DevType) * size); // TODO errhandle
for (size_t i = 0; i < size; i++) {
conv[i] = (DevType) from[i];
}
CudaDevice::copy_array_to_device(size, conv);
free(conv);
}
template<typename DevType>
DevType *CudaDevice::copy_array_to_device(int size, const DevType *from) {
DevType *to;
CUDA_HANDLERR(cudaMalloc(&to, size * sizeof(DevType)));
CUDA_HANDLERR(cudaMemcpy(to, from, size * sizeof(DevType), cudaMemcpyHostToDevice));
return to;
}
template float *
CudaDevice::copy_array_to_device<float>(int size, const float *from);
template<typename DevType, typename HostType>
void CudaDevice::copy_array_from_device_with_conversion(
int size, const DevType *from, HostType *to) {
DevType *conv = (DevType *) malloc(sizeof(DevType) * size);
CudaDevice::copy_array_from_device(size, from, conv);
if (conv != nullptr) {
for (size_t i = 0; i < size; i++) {
to[i] = (HostType) conv[i];
}
}
free(conv);
}
template<typename T>
void CudaDevice::copy_array_from_device(int size, const T *from, T *to) {
CUDA_HANDLERR(cudaMemcpy(to, from, size * sizeof(T), cudaMemcpyDeviceToHost));
}
template void
CudaDevice::copy_array_from_device<float>(int, const float *, float *);
void CudaDevice::free_array_from_device(void *arr) {
CUDA_HANDLERR(cudaFree(arr));
}
|
6572b3fa82c28d40d3f0f62394b6341e24a2bae2.hip | // !!! This is a file automatically generated by hipify!!!
/*
GFC code: A GPU-based compressor for arrays of double-precision
floating-point values.
Copyright (c) 2011-2020, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Molly A. O'Neil and Martin Burtscher
URL: The latest version of this code is available at
https://userweb.cs.txstate.edu/~burtscher/research/GFC/.
Publication: This work is described in detail in the following paper.
Molly A. O'Neil and Martin Burtscher. Floating-Point Data Compression at 75
Gb/s on a GPU. Proceedings of the Fourth Workshop on General Purpose Processing
Using GPUs, pp. 7:1-7:7. March 2011.
*/
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "kernels.h"
static void CheckTest(const char *msg)
{
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", hipGetErrorString(e));
}
}
/************************************************************************************/
static void Compress(int blocks, int warpsperblock, int repeat, int dimensionality)
{
hipGetLastError(); // reset error value
// generate a test file with fixed values
FILE *fp = fopen("input.bin", "wb");
if (fp == NULL) {
fprintf(stderr, "Failed to open input file input.bin for write.\n");
}
for (int i = 0; i < MAX; i++) {
double t = i;
fwrite(&t, 8, 1, fp);
}
fclose(fp);
fp = fopen("input.bin", "rb");
if (fp == NULL) {
fprintf(stderr, "Failed to open input file input.bin for read.\n");
}
// allocate CPU buffers
ull *cbuf = (ull *)malloc(sizeof(ull) * MAX); // uncompressed data
if (cbuf == NULL) {
fprintf(stderr, "cannot allocate cbuf\n");
}
int doubles = fread(cbuf, 8, MAX, fp);
if (doubles != MAX) {
fprintf(stderr, "Error in reading input.bin. Exit\n");
if (cbuf != NULL) free(cbuf);
fclose(fp);
return ;
}
fclose(fp);
const int num_warps = blocks * warpsperblock;
char *dbuf = (char *)malloc(sizeof(char) * ((MAX+1)/2*17)); // compressed data
if (dbuf == NULL) {
fprintf(stderr, "cannot allocate dbuf\n");
}
int *cut = (int *)malloc(sizeof(int) * num_warps); // chunk boundaries
if (cut == NULL) {
fprintf(stderr, "cannot allocate cut\n");
}
int *off = (int *)malloc(sizeof(int) * num_warps); // offset table
if (off == NULL) {
fprintf(stderr, "cannot allocate off\n");
}
// calculate required padding for last chunk
int padding = ((doubles + WARPSIZE - 1) & -WARPSIZE) - doubles;
doubles += padding;
// determine chunk assignments per warp
int per = (doubles + num_warps - 1) / (num_warps);
if (per < WARPSIZE) per = WARPSIZE;
per = (per + WARPSIZE - 1) & -WARPSIZE;
int curr = 0, before = 0, d = 0;
for (int i = 0; i < num_warps; i++) {
curr += per;
cut[i] = min(curr, doubles);
if (cut[i] - before > 0) {
d = cut[i] - before;
}
before = cut[i];
}
// set the pad values to ensure correct prediction
if (d <= WARPSIZE) {
for (int i = doubles - padding; i < doubles; i++) {
cbuf[i] = 0;
}
} else {
for (int i = doubles - padding; i < doubles; i++) {
cbuf[i] = cbuf[(i & -WARPSIZE) - (dimensionality - i % dimensionality)];
}
}
// allocate GPU buffers
ull *cbufl; // uncompressed data
char *dbufl; // compressed data
int *cutl; // chunk boundaries
int *offl; // offset table
if (hipSuccess != hipMalloc((void **)&cbufl, sizeof(ull) * doubles))
fprintf(stderr, "could not allocate cbufd\n");
if (hipSuccess != hipMalloc((void **)&dbufl, sizeof(char) * ((doubles+1)/2*17)))
fprintf(stderr, "could not allocate dbufd\n");
if (hipSuccess != hipMalloc((void **)&cutl, sizeof(int) * num_warps))
fprintf(stderr, "could not allocate cutd\n");
if (hipSuccess != hipMalloc((void **)&offl, sizeof(int) * num_warps))
fprintf(stderr, "could not allocate offd\n");
// copy CPU buffer contents to GPU
if (hipSuccess != hipMemcpy(cbufl, cbuf, sizeof(ull) * doubles, hipMemcpyHostToDevice))
fprintf(stderr, "copying of cbuf to device failed\n");
if (hipSuccess != hipMemcpy(cutl, cut, sizeof(int) * num_warps, hipMemcpyHostToDevice))
fprintf(stderr, "copying of cut to device failed\n");
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( CompressionKernel), dim3(blocks), dim3(WARPSIZE*warpsperblock), 0, 0,
dimensionality, cbufl, dbufl, cutl, offl);
CheckTest("compression kernel launch failed"); // hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
fprintf(stderr, "Average compression kernel execution time %f (s)\n", (time * 1e-9f) / repeat);
// transfer offsets back to CPU
if(hipSuccess != hipMemcpy(off, offl, sizeof(int) * num_warps, hipMemcpyDeviceToHost))
fprintf(stderr, "copying of off from device failed\n");
// output header
fp = fopen("output.bin", "wb");
if (fp == NULL) {
fprintf(stderr, "Failed to open output file output.bin.\n");
}
int num;
int doublecnt = doubles-padding;
num = fwrite(&blocks, 1, 1, fp);
assert(1 == num);
num = fwrite(&warpsperblock, 1, 1, fp);
assert(1 == num);
num = fwrite(&dimensionality, 1, 1, fp);
assert(1 == num);
num = fwrite(&doublecnt, 4, 1, fp);
assert(1 == num);
// output offset table
for(int i = 0; i < num_warps; i++) {
int start = 0;
if(i > 0) start = cut[i-1];
off[i] -= ((start+1)/2*17);
num = fwrite(&off[i], 4, 1, fp); // chunk's compressed size in bytes
assert(1 == num);
}
// output compressed data by chunk
for(int i = 0; i < num_warps; i++) {
int offset, start = 0;
if(i > 0) start = cut[i-1];
offset = ((start+1)/2*17);
// transfer compressed data back to CPU by chunk
if (hipSuccess != hipMemcpy(dbuf + offset, dbufl + offset, sizeof(char) * off[i], hipMemcpyDeviceToHost))
fprintf(stderr, "copying of dbuf from device failed\n");
num = fwrite(&dbuf[offset], 1, off[i], fp);
assert(off[i] == num);
}
fclose(fp);
// compression ratio
fp = fopen("input.bin", "rb");
fseek (fp, 0, SEEK_END);
long input_size = ftell (fp);
fp = fopen("output.bin", "rb");
fseek (fp, 0, SEEK_END);
long output_size = ftell (fp);
fprintf(stderr, "Compression ratio = %lf\n", 1.0 * input_size / output_size);
free(cbuf);
free(dbuf);
free(cut);
free(off);
if (hipSuccess != hipFree(cbufl))
fprintf(stderr, "could not deallocate cbufd\n");
if (hipSuccess != hipFree(dbufl))
fprintf(stderr, "could not deallocate dbufd\n");
if (hipSuccess != hipFree(cutl))
fprintf(stderr, "could not deallocate cutd\n");
if (hipSuccess != hipFree(offl))
fprintf(stderr, "could not deallocate offd\n");
}
/************************************************************************************/
static void VerifySystemParameters()
{
assert(1 == sizeof(char));
assert(4 == sizeof(int));
assert(8 == sizeof(ull));
int val = 1;
assert(1 == *((char *)&val));
if ((WARPSIZE <= 0) || ((WARPSIZE & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
}
/************************************************************************************/
int main(int argc, char *argv[])
{
fprintf(stderr, "GPU FP Compressor v2.2\n");
fprintf(stderr, "Copyright 2011-2020 Texas State University\n");
VerifySystemParameters();
int blocks, warpsperblock, dimensionality;
int repeat;
hipFuncSetCacheConfig(CompressionKernel, hipFuncCachePreferL1);
if((4 == argc) || (5 == argc)) { /* compress */
blocks = atoi(argv[1]);
assert((0 < blocks) && (blocks < 256));
warpsperblock = atoi(argv[2]);
assert((0 < warpsperblock) && (warpsperblock < 256));
repeat = atoi(argv[3]);
if(4 == argc) {
dimensionality = 1;
} else {
dimensionality = atoi(argv[4]);
}
assert((0 < dimensionality) && (dimensionality <= WARPSIZE));
Compress(blocks, warpsperblock, repeat, dimensionality);
}
else {
fprintf(stderr, "usage:\n");
fprintf(stderr, "compress: %s <blocks> <warps/block> <repeat> <dimensionality>\n", argv[0]);
fprintf(stderr, "\ninput.bin is generated by the program and the compressed output file is output.bin.\n");
}
return 0;
}
| 6572b3fa82c28d40d3f0f62394b6341e24a2bae2.cu | /*
GFC code: A GPU-based compressor for arrays of double-precision
floating-point values.
Copyright (c) 2011-2020, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Molly A. O'Neil and Martin Burtscher
URL: The latest version of this code is available at
https://userweb.cs.txstate.edu/~burtscher/research/GFC/.
Publication: This work is described in detail in the following paper.
Molly A. O'Neil and Martin Burtscher. Floating-Point Data Compression at 75
Gb/s on a GPU. Proceedings of the Fourth Workshop on General Purpose Processing
Using GPUs, pp. 7:1-7:7. March 2011.
*/
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <chrono>
#include <cuda.h>
#include "kernels.h"
static void CheckTest(const char *msg)
{
cudaError_t e;
cudaDeviceSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
}
}
/************************************************************************************/
static void Compress(int blocks, int warpsperblock, int repeat, int dimensionality)
{
cudaGetLastError(); // reset error value
// generate a test file with fixed values
FILE *fp = fopen("input.bin", "wb");
if (fp == NULL) {
fprintf(stderr, "Failed to open input file input.bin for write.\n");
}
for (int i = 0; i < MAX; i++) {
double t = i;
fwrite(&t, 8, 1, fp);
}
fclose(fp);
fp = fopen("input.bin", "rb");
if (fp == NULL) {
fprintf(stderr, "Failed to open input file input.bin for read.\n");
}
// allocate CPU buffers
ull *cbuf = (ull *)malloc(sizeof(ull) * MAX); // uncompressed data
if (cbuf == NULL) {
fprintf(stderr, "cannot allocate cbuf\n");
}
int doubles = fread(cbuf, 8, MAX, fp);
if (doubles != MAX) {
fprintf(stderr, "Error in reading input.bin. Exit\n");
if (cbuf != NULL) free(cbuf);
fclose(fp);
return ;
}
fclose(fp);
const int num_warps = blocks * warpsperblock;
char *dbuf = (char *)malloc(sizeof(char) * ((MAX+1)/2*17)); // compressed data
if (dbuf == NULL) {
fprintf(stderr, "cannot allocate dbuf\n");
}
int *cut = (int *)malloc(sizeof(int) * num_warps); // chunk boundaries
if (cut == NULL) {
fprintf(stderr, "cannot allocate cut\n");
}
int *off = (int *)malloc(sizeof(int) * num_warps); // offset table
if (off == NULL) {
fprintf(stderr, "cannot allocate off\n");
}
// calculate required padding for last chunk
int padding = ((doubles + WARPSIZE - 1) & -WARPSIZE) - doubles;
doubles += padding;
// determine chunk assignments per warp
int per = (doubles + num_warps - 1) / (num_warps);
if (per < WARPSIZE) per = WARPSIZE;
per = (per + WARPSIZE - 1) & -WARPSIZE;
int curr = 0, before = 0, d = 0;
for (int i = 0; i < num_warps; i++) {
curr += per;
cut[i] = min(curr, doubles);
if (cut[i] - before > 0) {
d = cut[i] - before;
}
before = cut[i];
}
// set the pad values to ensure correct prediction
if (d <= WARPSIZE) {
for (int i = doubles - padding; i < doubles; i++) {
cbuf[i] = 0;
}
} else {
for (int i = doubles - padding; i < doubles; i++) {
cbuf[i] = cbuf[(i & -WARPSIZE) - (dimensionality - i % dimensionality)];
}
}
// allocate GPU buffers
ull *cbufl; // uncompressed data
char *dbufl; // compressed data
int *cutl; // chunk boundaries
int *offl; // offset table
if (cudaSuccess != cudaMalloc((void **)&cbufl, sizeof(ull) * doubles))
fprintf(stderr, "could not allocate cbufd\n");
if (cudaSuccess != cudaMalloc((void **)&dbufl, sizeof(char) * ((doubles+1)/2*17)))
fprintf(stderr, "could not allocate dbufd\n");
if (cudaSuccess != cudaMalloc((void **)&cutl, sizeof(int) * num_warps))
fprintf(stderr, "could not allocate cutd\n");
if (cudaSuccess != cudaMalloc((void **)&offl, sizeof(int) * num_warps))
fprintf(stderr, "could not allocate offd\n");
// copy CPU buffer contents to GPU
if (cudaSuccess != cudaMemcpy(cbufl, cbuf, sizeof(ull) * doubles, cudaMemcpyHostToDevice))
fprintf(stderr, "copying of cbuf to device failed\n");
if (cudaSuccess != cudaMemcpy(cutl, cut, sizeof(int) * num_warps, cudaMemcpyHostToDevice))
fprintf(stderr, "copying of cut to device failed\n");
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
CompressionKernel<<<blocks, WARPSIZE*warpsperblock>>>(
dimensionality, cbufl, dbufl, cutl, offl);
CheckTest("compression kernel launch failed"); // cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
fprintf(stderr, "Average compression kernel execution time %f (s)\n", (time * 1e-9f) / repeat);
// transfer offsets back to CPU
if(cudaSuccess != cudaMemcpy(off, offl, sizeof(int) * num_warps, cudaMemcpyDeviceToHost))
fprintf(stderr, "copying of off from device failed\n");
// output header
fp = fopen("output.bin", "wb");
if (fp == NULL) {
fprintf(stderr, "Failed to open output file output.bin.\n");
}
int num;
int doublecnt = doubles-padding;
num = fwrite(&blocks, 1, 1, fp);
assert(1 == num);
num = fwrite(&warpsperblock, 1, 1, fp);
assert(1 == num);
num = fwrite(&dimensionality, 1, 1, fp);
assert(1 == num);
num = fwrite(&doublecnt, 4, 1, fp);
assert(1 == num);
// output offset table
for(int i = 0; i < num_warps; i++) {
int start = 0;
if(i > 0) start = cut[i-1];
off[i] -= ((start+1)/2*17);
num = fwrite(&off[i], 4, 1, fp); // chunk's compressed size in bytes
assert(1 == num);
}
// output compressed data by chunk
for(int i = 0; i < num_warps; i++) {
int offset, start = 0;
if(i > 0) start = cut[i-1];
offset = ((start+1)/2*17);
// transfer compressed data back to CPU by chunk
if (cudaSuccess != cudaMemcpy(dbuf + offset, dbufl + offset, sizeof(char) * off[i], cudaMemcpyDeviceToHost))
fprintf(stderr, "copying of dbuf from device failed\n");
num = fwrite(&dbuf[offset], 1, off[i], fp);
assert(off[i] == num);
}
fclose(fp);
// compression ratio
fp = fopen("input.bin", "rb");
fseek (fp, 0, SEEK_END);
long input_size = ftell (fp);
fp = fopen("output.bin", "rb");
fseek (fp, 0, SEEK_END);
long output_size = ftell (fp);
fprintf(stderr, "Compression ratio = %lf\n", 1.0 * input_size / output_size);
free(cbuf);
free(dbuf);
free(cut);
free(off);
if (cudaSuccess != cudaFree(cbufl))
fprintf(stderr, "could not deallocate cbufd\n");
if (cudaSuccess != cudaFree(dbufl))
fprintf(stderr, "could not deallocate dbufd\n");
if (cudaSuccess != cudaFree(cutl))
fprintf(stderr, "could not deallocate cutd\n");
if (cudaSuccess != cudaFree(offl))
fprintf(stderr, "could not deallocate offd\n");
}
/************************************************************************************/
static void VerifySystemParameters()
{
assert(1 == sizeof(char));
assert(4 == sizeof(int));
assert(8 == sizeof(ull));
int val = 1;
assert(1 == *((char *)&val));
if ((WARPSIZE <= 0) || ((WARPSIZE & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
}
/************************************************************************************/
int main(int argc, char *argv[])
{
fprintf(stderr, "GPU FP Compressor v2.2\n");
fprintf(stderr, "Copyright 2011-2020 Texas State University\n");
VerifySystemParameters();
int blocks, warpsperblock, dimensionality;
int repeat;
cudaFuncSetCacheConfig(CompressionKernel, cudaFuncCachePreferL1);
if((4 == argc) || (5 == argc)) { /* compress */
blocks = atoi(argv[1]);
assert((0 < blocks) && (blocks < 256));
warpsperblock = atoi(argv[2]);
assert((0 < warpsperblock) && (warpsperblock < 256));
repeat = atoi(argv[3]);
if(4 == argc) {
dimensionality = 1;
} else {
dimensionality = atoi(argv[4]);
}
assert((0 < dimensionality) && (dimensionality <= WARPSIZE));
Compress(blocks, warpsperblock, repeat, dimensionality);
}
else {
fprintf(stderr, "usage:\n");
fprintf(stderr, "compress: %s <blocks> <warps/block> <repeat> <dimensionality>\n", argv[0]);
fprintf(stderr, "\ninput.bin is generated by the program and the compressed output file is output.bin.\n");
}
return 0;
}
|
f480ab3fcf3b81209c05162fee58756ff3564d26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by kindr on 2021/5/8.
//
#include "multiKernelConcurrent.cuh"
#include "../../common/utils.cuh"
#include <cstdio>
const int N = 1 << 25;
__global__
void math_kernel1(int n) {
double sum = 0;
for (int i = 0; i < n; i++) sum += tan(0.1) * tan(0.1);
printf("sum=%g\n", sum);
}
__global__
void math_kernel2(int n) {
double sum = 0;
for (int i = 0; i < n; i++) sum += tan(0.1) * tan(0.1);
printf("sum=%g\n", sum);
}
void multiKernelConcurrent() {
int n_stream = 4;
size_t nStreamBytes = n_stream * sizeof(hipStream_t);
auto *stream = static_cast<hipStream_t *>(malloc(nStreamBytes));
for (int i = 0; i < n_stream; i++) {
hipStreamCreate(&stream[i]);
}
CHECK(hipGetLastError());
for (int i = 0; i < n_stream; i++) {
hipLaunchKernelGGL(( math_kernel1), dim3(1), dim3(1), 0, stream[i], N);
hipLaunchKernelGGL(( math_kernel2), dim3(1), dim3(1), 0, stream[i], N);
}
for (int i = 0; i < n_stream; i++) {
hipLaunchKernelGGL(( math_kernel1), dim3(1), dim3(1), 0, 0, N);
hipLaunchKernelGGL(( math_kernel2), dim3(1), dim3(1), 0, 0, N);
}
hipDeviceSynchronize();
CHECK(hipGetLastError());
for (int i = 0; i < n_stream; i++) {
hipStreamDestroy(stream[i]);
}
free(stream);
}
| f480ab3fcf3b81209c05162fee58756ff3564d26.cu | //
// Created by kindr on 2021/5/8.
//
#include "multiKernelConcurrent.cuh"
#include "../../common/utils.cuh"
#include <cstdio>
const int N = 1 << 25;
__global__
void math_kernel1(int n) {
double sum = 0;
for (int i = 0; i < n; i++) sum += tan(0.1) * tan(0.1);
printf("sum=%g\n", sum);
}
__global__
void math_kernel2(int n) {
double sum = 0;
for (int i = 0; i < n; i++) sum += tan(0.1) * tan(0.1);
printf("sum=%g\n", sum);
}
void multiKernelConcurrent() {
int n_stream = 4;
size_t nStreamBytes = n_stream * sizeof(cudaStream_t);
auto *stream = static_cast<cudaStream_t *>(malloc(nStreamBytes));
for (int i = 0; i < n_stream; i++) {
cudaStreamCreate(&stream[i]);
}
CHECK(cudaGetLastError());
for (int i = 0; i < n_stream; i++) {
math_kernel1<<<1, 1, 0, stream[i]>>>(N);
math_kernel2<<<1, 1, 0, stream[i]>>>(N);
}
for (int i = 0; i < n_stream; i++) {
math_kernel1<<<1, 1>>>(N);
math_kernel2<<<1, 1>>>(N);
}
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
for (int i = 0; i < n_stream; i++) {
cudaStreamDestroy(stream[i]);
}
free(stream);
}
|
a5de25a9324507b2c64c933bd89f301537089575.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#ifndef _SCAN_BEST_KERNEL_H_
#define _SCAN_BEST_KERNEL_H_
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index) cutilBankChecker(temp, index)
#else
#define TEMP(index) temp[index]
#endif
///////////////////////////////////////////////////////////////////////////////
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// Excellent paper "Prefix sums and their applications".
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
// @param g_odata output data in global memory
// @param g_idata input data in global memory
// @param n input number of elements to scan from input data
__extern__shared__ float temp[];
__global__ void scan_best(float *g_odata, float *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
int thid = threadIdx.x;
int ai = thid;
int bi = thid + (n/2);
// compute spacing to avoid bank conflicts
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
TEMP(ai + bankOffsetA) = g_idata[ai];
TEMP(bi + bankOffsetB) = g_idata[bi];
int offset = 1;
printf("The n in scan_best: %d \n", n);
// build the sum in place up the tree
for (int d = n/2; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
TEMP(bi) += TEMP(ai);
}
offset *= 2;
}
// scan back down the tree
// clear the last element
if (thid == 0)
{
int index = n - 1;
index += CONFLICT_FREE_OFFSET(index);
TEMP(index) = 0;
}
// traverse down the tree building the scan in place
for (int d = 1; d < n; d *= 2)
{
offset /= 2;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = TEMP(ai);
TEMP(ai) = TEMP(bi);
TEMP(bi) += t;
}
}
__syncthreads();
// write results to global memory
g_odata[ai] = TEMP(ai + bankOffsetA);
g_odata[bi] = TEMP(bi + bankOffsetB);
}
#endif // #ifndef _SCAN_BEST_KERNEL_H_
| a5de25a9324507b2c64c933bd89f301537089575.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#ifndef _SCAN_BEST_KERNEL_H_
#define _SCAN_BEST_KERNEL_H_
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index) cutilBankChecker(temp, index)
#else
#define TEMP(index) temp[index]
#endif
///////////////////////////////////////////////////////////////////////////////
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// Excellent paper "Prefix sums and their applications".
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
// @param g_odata output data in global memory
// @param g_idata input data in global memory
// @param n input number of elements to scan from input data
__extern__shared__ float temp[];
__global__ void scan_best(float *g_odata, float *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
int thid = threadIdx.x;
int ai = thid;
int bi = thid + (n/2);
// compute spacing to avoid bank conflicts
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
TEMP(ai + bankOffsetA) = g_idata[ai];
TEMP(bi + bankOffsetB) = g_idata[bi];
int offset = 1;
printf("The n in scan_best: %d \n", n);
// build the sum in place up the tree
for (int d = n/2; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
TEMP(bi) += TEMP(ai);
}
offset *= 2;
}
// scan back down the tree
// clear the last element
if (thid == 0)
{
int index = n - 1;
index += CONFLICT_FREE_OFFSET(index);
TEMP(index) = 0;
}
// traverse down the tree building the scan in place
for (int d = 1; d < n; d *= 2)
{
offset /= 2;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = TEMP(ai);
TEMP(ai) = TEMP(bi);
TEMP(bi) += t;
}
}
__syncthreads();
// write results to global memory
g_odata[ai] = TEMP(ai + bankOffsetA);
g_odata[bi] = TEMP(bi + bankOffsetB);
}
#endif // #ifndef _SCAN_BEST_KERNEL_H_
|
Subsets and Splits