hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
259fc161a130138d5916a77bebed097d99584ffe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GameLogic.h" void GameLogic(int64_t xIn, int64_t yIn) { processorType = CPU_PROCESSOR; x = xIn; y = yIn; cellSwitch = 0; int64_t totalBlockCount = x*y; cells = (uint8_t*)malloc(sizeof (uint8_t)*(totalBlockCount)); outputCell = (uint8_t*)malloc(sizeof (uint8_t)*(totalBlockCount)); srand(time(NULL)); while(totalBlockCount-- != 0) { if(rand()%2) { cells[totalBlockCount] = 1; } else { cells[totalBlockCount] = 0; } outputCell[totalBlockCount]= 0; } cudaSetup(); } void desructor_GameLogic() { printf("\n\nEXIT\n\n"); hipFree(d_data_in); hipFree(d_data_in_x); hipFree(d_data_in_y); hipFree(d_data_out); free(cells); free(outputCell); } uint8_t* getGameOfLifeState() { return cells; } void processWithCPU(){processorType = CPU_PROCESSOR;} void processWithGPUBasic(){processorType = GPU_BASIC_PROCESSOR;} void processWithGPUOpt(){processorType = GPU_OPTIMIZED_PROCESSOR;} void step() { if(processorType == CPU_PROCESSOR) { cpuImplementation(); } else { GPUImplementation(); } } void cpuImplementation() { uint64_t yId =0, xId = 0; while(yId < y) { xId = 0; while(xId < x) { outputCell[xId+(yId*y)] = 0; uint8_t localCellCount = surrondingCellCount(cells, xId, yId, x, y); switch(localCellCount) { case 2: if(cells[xId+(yId*y)]) { outputCell[xId+(yId*y)] = 1; } break; case 3: outputCell[xId+(yId*y)] = 1; break; } ++xId; } ++yId; } uint8_t * switcher = cells; cells = outputCell; outputCell = switcher; } __host__ __device__ uint8_t surrondingCellCount(uint8_t *cellsLocal, int64_t xCell, int64_t yCell, int64_t x, int64_t y) { //Parse left -> right, top -> bottom uint8_t cellCount = 0; xCell= xCellMinus(1,xCell, x); yCell = yCellMinus(1,yCell, y); uint8_t searchIndex = 0; while(searchIndex < 9) { if(searchIndex == 4) { xCell = xCellPlus(1,xCell, x); ++searchIndex; continue; } if(searchIndex == 3 || searchIndex == 6) { xCell =xCellMinus(3,xCell, x); yCell = yCellPlus(1,yCell, y); } //find cell int64_t cell = xCell + (y * yCell); if(cellsLocal[cell]) { cellCount++; } xCell = xCellPlus(1,xCell, x); ++searchIndex; } return cellCount; } __device__ uint8_t surrondingCellCountOptimized(uint8_t *cellsLocal, int64_t xCell, int64_t x, int64_t y) { //Parse left -> right, top -> bottom uint8_t cellCount = 0; int64_t yCell = 1; xCell= xCellMinus(1,xCell, x); yCell = yCellMinus(1,yCell, y); uint8_t searchIndex = 0; while(searchIndex < 9) { if(searchIndex == 4) { xCell = xCellPlus(1,xCell, x); ++searchIndex; continue; } if(searchIndex == 3 || searchIndex == 6) { xCell =xCellMinus(3,xCell, x); yCell = yCellPlus(1,yCell, y); } //find cell int64_t cell = xCell + (y * yCell); if(cellsLocal[cell]) { cellCount++; } if(cellCount > 3) { return cellCount; } xCell = xCellPlus(1,xCell, x); ++searchIndex; } return cellCount; } __host__ __device__ int64_t xCellPlus(uint8_t add, int64_t value, int64_t x) { if(add == 0 && add < x) { return -1; } if(value+add >= x) { value=(value+add) - x ; } else { value+=add; } return value; } __host__ __device__ int64_t xCellMinus(uint8_t minus, int64_t value, int64_t x) { if(minus == 0 && minus < x) { return -1; } if(value-minus < 0) { value = x + (value-minus); } else { value -= minus; } return value; } __host__ __device__ int64_t yCellPlus(uint8_t add, int64_t value, int64_t y) { if(add == 0 && add < y) { return -1; } if(value+add >= y) { value=(value+add) - y; } else { value+=add; } return value; } __host__ __device__ int64_t yCellMinus(uint8_t minus, int64_t value, int64_t y) { if(minus == 0 && minus < y) { return -1; } if(value-minus < 0) { value = y +(value-minus); } else { value -= minus; } return value; } void cudaSetup() { printf("Get GPU properties\n"); int64_t totalBlockCount = x*y; blockSize = 1024; gridSize = ((totalBlockCount + (blockSize-1))/blockSize); optBlockSize = x; optGridSize =(((x*y) + (optBlockSize-1))/optBlockSize); if((x >= 1024) || (y >= 1024)) { printf("\n\nUnable to use optimized GPU version with x & y larger than 1023\n\n"); } int devID; hipDeviceProp_t props; hipGetDevice(&devID); hipGetDeviceProperties(&props, devID); printf("Allocate memory on GPU\n"); hipMalloc((void**) &d_data_in, (sizeof(uint8_t) * totalBlockCount)); hipMalloc((void**) &d_data_in_x, (sizeof(int64_t))); hipMalloc((void**) &d_data_in_y, (sizeof(int64_t))); hipMalloc((void**) &d_data_out, (sizeof(uint8_t) * totalBlockCount)); } void GPUImplementation() { //Copy data to GPU int64_t totalBlockCount = x*y; hipMemcpy(d_data_in, cells, (sizeof(uint8_t) * totalBlockCount), hipMemcpyHostToDevice); hipMemcpy(d_data_in_x, &x, sizeof(int64_t), hipMemcpyHostToDevice); hipMemcpy(d_data_in_y, &y, sizeof(int64_t), hipMemcpyHostToDevice); //Executing GPU calculation if(processorType == GPU_OPTIMIZED_PROCESSOR && (x < 1024) && (y < 1024)) { hipLaunchKernelGGL(( gpuGameOfLifeOptimized), dim3(optGridSize), dim3(optBlockSize), sizeof(uint8_t)*(x * 3), 0, d_data_in, d_data_in_x, d_data_in_y, d_data_out); } else { hipLaunchKernelGGL(( gpuGameOfLifeNaive), dim3(gridSize), dim3(blockSize), 0, 0, d_data_in, d_data_in_x, d_data_in_y, d_data_out); } hipDeviceSynchronize(); //Complete GPU calculation //Reading GPU data out hipMemcpy(outputCell, d_data_out, (sizeof(uint8_t) * totalBlockCount), hipMemcpyDeviceToHost); uint8_t * switcher = cells; cells = outputCell; outputCell = switcher; } /** * This optimization does not work... Also this has to be for grid sizes less than 1024 x and y access * The naieve call count is * read global memory calls: 9 * write global memory calls: 2 * read shared memory calls: 0 * write shared memory calls: 0 * * The optimized call count is * read global memory calls: 3 * write global memory calls: 1 * read shared memory calls: 12 * write shared memory calls: 3 * */ __global__ void gpuGameOfLifeOptimized(uint8_t* cellsGlobal, int64_t* dataX, int64_t* dataY, uint8_t *outputCellLocal) { extern __shared__ uint8_t sData[]; uint64_t tId = threadIdx.x + (blockIdx.x * blockDim.x); uint64_t x = *dataX, y = *dataY; uint64_t totalBlockCount = x*y; uint64_t yId = tId/y; uint64_t xId = tId - (yId*x); //Pass data to shared if(tId <= totalBlockCount) { uint8_t rowIndex = 0; uint64_t yVal = yCellMinus(1,yId,y); while (rowIndex < 3) { sData[(rowIndex * x) + xId] =cellsGlobal[((yVal*x)) + xId]; yVal = yCellPlus(1,yVal, y); ++rowIndex; } } syncthreads(); if(tId <= totalBlockCount) { uint8_t localCellCount = surrondingCellCountOptimized(sData, xId, x, y); uint64_t result = 0; switch(localCellCount) { case 2: if(sData[x + xId]) { result = 1; } break; case 3: result = 1; break; } outputCellLocal[xId+(yId*x)] = result; } } __global__ void gpuGameOfLifeNaive(uint8_t* cellsLocal, int64_t* dataX, int64_t* dataY, uint8_t *outputCellLocal) { uint64_t tId = threadIdx.x + (blockIdx.x * blockDim.x); uint64_t x = *dataX, y = *dataY; uint64_t totalBlockCount = x*y; if(tId <= totalBlockCount) { uint64_t yId = tId/y; uint64_t xId = tId - (yId*x); outputCellLocal[xId+(yId*x)] = 0; uint8_t localCellCount = surrondingCellCount(cellsLocal, xId, yId, x, y); switch(localCellCount) { case 2: if(cellsLocal[xId+(yId*x)]) { outputCellLocal[xId+(yId*x)] = 1; } break; case 3: outputCellLocal[xId+(yId*x)] = 1; break; } } } //Improved Naieve //__global__ void gpuGameOfLifeNaive(uint8_t* cellsLocal, int64_t* dataX, int64_t* dataY, uint8_t *outputCellLocal) //{ // int64_t x = *dataX, y = *dataY; // int64_t xId = 0; // int64_t yIndex = threadIdx.x + (blockIdx.x * blockDim.x); // int64_t totalBlockCount = x*y; // if(yIndex <= totalBlockCount) // { // xId = 0; // while(xId < x) // { // outputCellLocal[xId+(yIndex*y)] = 0; // uint8_t localCellCount = surrondingCellCount(cellsLocal, xId, yIndex, x, y); // switch(localCellCount) // { // case 2: // if(cellsLocal[xId+(yIndex*y)]) // { // outputCellLocal[xId+(yIndex*y)] = 1; // } // break; // case 3: // outputCellLocal[xId+(yIndex*y)] = 1; // break; // } // // ++xId; // } // } //} //Origonal //__global__ void gpuGameOfLifeNaive(uint8_t* cellsLocal, int64_t* dataX, int64_t* dataY, uint8_t *outputCellLocal) //{ // int64_t x = *dataX, y = *dataY; // int64_t yId =0, xId = 0; // int64_t ysId = threadIdx.x + (blockIdx.x * blockDim.x); // if(ysId == 1) // { // while(yId < y) // { // xId = 0; // while(xId < x) // { // outputCellLocal[xId+(yId*y)] = 0; // uint8_t localCellCount = surrondingCellCount(cellsLocal, xId, yId, x, y); // switch(localCellCount) // { // case 2: // if(cellsLocal[xId+(yId*y)]) // { // outputCellLocal[xId+(yId*y)] = 1; // } // break; // case 3: // outputCellLocal[xId+(yId*y)] = 1; // break; // } // // ++xId; // } // ++yId; // } // } //}
259fc161a130138d5916a77bebed097d99584ffe.cu
#include "GameLogic.h" void GameLogic(int64_t xIn, int64_t yIn) { processorType = CPU_PROCESSOR; x = xIn; y = yIn; cellSwitch = 0; int64_t totalBlockCount = x*y; cells = (uint8_t*)malloc(sizeof (uint8_t)*(totalBlockCount)); outputCell = (uint8_t*)malloc(sizeof (uint8_t)*(totalBlockCount)); srand(time(NULL)); while(totalBlockCount-- != 0) { if(rand()%2) { cells[totalBlockCount] = 1; } else { cells[totalBlockCount] = 0; } outputCell[totalBlockCount]= 0; } cudaSetup(); } void desructor_GameLogic() { printf("\n\nEXIT\n\n"); cudaFree(d_data_in); cudaFree(d_data_in_x); cudaFree(d_data_in_y); cudaFree(d_data_out); free(cells); free(outputCell); } uint8_t* getGameOfLifeState() { return cells; } void processWithCPU(){processorType = CPU_PROCESSOR;} void processWithGPUBasic(){processorType = GPU_BASIC_PROCESSOR;} void processWithGPUOpt(){processorType = GPU_OPTIMIZED_PROCESSOR;} void step() { if(processorType == CPU_PROCESSOR) { cpuImplementation(); } else { GPUImplementation(); } } void cpuImplementation() { uint64_t yId =0, xId = 0; while(yId < y) { xId = 0; while(xId < x) { outputCell[xId+(yId*y)] = 0; uint8_t localCellCount = surrondingCellCount(cells, xId, yId, x, y); switch(localCellCount) { case 2: if(cells[xId+(yId*y)]) { outputCell[xId+(yId*y)] = 1; } break; case 3: outputCell[xId+(yId*y)] = 1; break; } ++xId; } ++yId; } uint8_t * switcher = cells; cells = outputCell; outputCell = switcher; } __host__ __device__ uint8_t surrondingCellCount(uint8_t *cellsLocal, int64_t xCell, int64_t yCell, int64_t x, int64_t y) { //Parse left -> right, top -> bottom uint8_t cellCount = 0; xCell= xCellMinus(1,xCell, x); yCell = yCellMinus(1,yCell, y); uint8_t searchIndex = 0; while(searchIndex < 9) { if(searchIndex == 4) { xCell = xCellPlus(1,xCell, x); ++searchIndex; continue; } if(searchIndex == 3 || searchIndex == 6) { xCell =xCellMinus(3,xCell, x); yCell = yCellPlus(1,yCell, y); } //find cell int64_t cell = xCell + (y * yCell); if(cellsLocal[cell]) { cellCount++; } xCell = xCellPlus(1,xCell, x); ++searchIndex; } return cellCount; } __device__ uint8_t surrondingCellCountOptimized(uint8_t *cellsLocal, int64_t xCell, int64_t x, int64_t y) { //Parse left -> right, top -> bottom uint8_t cellCount = 0; int64_t yCell = 1; xCell= xCellMinus(1,xCell, x); yCell = yCellMinus(1,yCell, y); uint8_t searchIndex = 0; while(searchIndex < 9) { if(searchIndex == 4) { xCell = xCellPlus(1,xCell, x); ++searchIndex; continue; } if(searchIndex == 3 || searchIndex == 6) { xCell =xCellMinus(3,xCell, x); yCell = yCellPlus(1,yCell, y); } //find cell int64_t cell = xCell + (y * yCell); if(cellsLocal[cell]) { cellCount++; } if(cellCount > 3) { return cellCount; } xCell = xCellPlus(1,xCell, x); ++searchIndex; } return cellCount; } __host__ __device__ int64_t xCellPlus(uint8_t add, int64_t value, int64_t x) { if(add == 0 && add < x) { return -1; } if(value+add >= x) { value=(value+add) - x ; } else { value+=add; } return value; } __host__ __device__ int64_t xCellMinus(uint8_t minus, int64_t value, int64_t x) { if(minus == 0 && minus < x) { return -1; } if(value-minus < 0) { value = x + (value-minus); } else { value -= minus; } return value; } __host__ __device__ int64_t yCellPlus(uint8_t add, int64_t value, int64_t y) { if(add == 0 && add < y) { return -1; } if(value+add >= y) { value=(value+add) - y; } else { value+=add; } return value; } __host__ __device__ int64_t yCellMinus(uint8_t minus, int64_t value, int64_t y) { if(minus == 0 && minus < y) { return -1; } if(value-minus < 0) { value = y +(value-minus); } else { value -= minus; } return value; } void cudaSetup() { printf("Get GPU properties\n"); int64_t totalBlockCount = x*y; blockSize = 1024; gridSize = ((totalBlockCount + (blockSize-1))/blockSize); optBlockSize = x; optGridSize =(((x*y) + (optBlockSize-1))/optBlockSize); if((x >= 1024) || (y >= 1024)) { printf("\n\nUnable to use optimized GPU version with x & y larger than 1023\n\n"); } int devID; cudaDeviceProp props; cudaGetDevice(&devID); cudaGetDeviceProperties(&props, devID); printf("Allocate memory on GPU\n"); cudaMalloc((void**) &d_data_in, (sizeof(uint8_t) * totalBlockCount)); cudaMalloc((void**) &d_data_in_x, (sizeof(int64_t))); cudaMalloc((void**) &d_data_in_y, (sizeof(int64_t))); cudaMalloc((void**) &d_data_out, (sizeof(uint8_t) * totalBlockCount)); } void GPUImplementation() { //Copy data to GPU int64_t totalBlockCount = x*y; cudaMemcpy(d_data_in, cells, (sizeof(uint8_t) * totalBlockCount), cudaMemcpyHostToDevice); cudaMemcpy(d_data_in_x, &x, sizeof(int64_t), cudaMemcpyHostToDevice); cudaMemcpy(d_data_in_y, &y, sizeof(int64_t), cudaMemcpyHostToDevice); //Executing GPU calculation if(processorType == GPU_OPTIMIZED_PROCESSOR && (x < 1024) && (y < 1024)) { gpuGameOfLifeOptimized<<<optGridSize, optBlockSize, sizeof(uint8_t)*(x * 3)>>>(d_data_in, d_data_in_x, d_data_in_y, d_data_out); } else { gpuGameOfLifeNaive<<<gridSize, blockSize>>>(d_data_in, d_data_in_x, d_data_in_y, d_data_out); } cudaThreadSynchronize(); //Complete GPU calculation //Reading GPU data out cudaMemcpy(outputCell, d_data_out, (sizeof(uint8_t) * totalBlockCount), cudaMemcpyDeviceToHost); uint8_t * switcher = cells; cells = outputCell; outputCell = switcher; } /** * This optimization does not work... Also this has to be for grid sizes less than 1024 x and y access * The naieve call count is * read global memory calls: 9 * write global memory calls: 2 * read shared memory calls: 0 * write shared memory calls: 0 * * The optimized call count is * read global memory calls: 3 * write global memory calls: 1 * read shared memory calls: 12 * write shared memory calls: 3 * */ __global__ void gpuGameOfLifeOptimized(uint8_t* cellsGlobal, int64_t* dataX, int64_t* dataY, uint8_t *outputCellLocal) { extern __shared__ uint8_t sData[]; uint64_t tId = threadIdx.x + (blockIdx.x * blockDim.x); uint64_t x = *dataX, y = *dataY; uint64_t totalBlockCount = x*y; uint64_t yId = tId/y; uint64_t xId = tId - (yId*x); //Pass data to shared if(tId <= totalBlockCount) { uint8_t rowIndex = 0; uint64_t yVal = yCellMinus(1,yId,y); while (rowIndex < 3) { sData[(rowIndex * x) + xId] =cellsGlobal[((yVal*x)) + xId]; yVal = yCellPlus(1,yVal, y); ++rowIndex; } } syncthreads(); if(tId <= totalBlockCount) { uint8_t localCellCount = surrondingCellCountOptimized(sData, xId, x, y); uint64_t result = 0; switch(localCellCount) { case 2: if(sData[x + xId]) { result = 1; } break; case 3: result = 1; break; } outputCellLocal[xId+(yId*x)] = result; } } __global__ void gpuGameOfLifeNaive(uint8_t* cellsLocal, int64_t* dataX, int64_t* dataY, uint8_t *outputCellLocal) { uint64_t tId = threadIdx.x + (blockIdx.x * blockDim.x); uint64_t x = *dataX, y = *dataY; uint64_t totalBlockCount = x*y; if(tId <= totalBlockCount) { uint64_t yId = tId/y; uint64_t xId = tId - (yId*x); outputCellLocal[xId+(yId*x)] = 0; uint8_t localCellCount = surrondingCellCount(cellsLocal, xId, yId, x, y); switch(localCellCount) { case 2: if(cellsLocal[xId+(yId*x)]) { outputCellLocal[xId+(yId*x)] = 1; } break; case 3: outputCellLocal[xId+(yId*x)] = 1; break; } } } //Improved Naieve //__global__ void gpuGameOfLifeNaive(uint8_t* cellsLocal, int64_t* dataX, int64_t* dataY, uint8_t *outputCellLocal) //{ // int64_t x = *dataX, y = *dataY; // int64_t xId = 0; // int64_t yIndex = threadIdx.x + (blockIdx.x * blockDim.x); // int64_t totalBlockCount = x*y; // if(yIndex <= totalBlockCount) // { // xId = 0; // while(xId < x) // { // outputCellLocal[xId+(yIndex*y)] = 0; // uint8_t localCellCount = surrondingCellCount(cellsLocal, xId, yIndex, x, y); // switch(localCellCount) // { // case 2: // if(cellsLocal[xId+(yIndex*y)]) // { // outputCellLocal[xId+(yIndex*y)] = 1; // } // break; // case 3: // outputCellLocal[xId+(yIndex*y)] = 1; // break; // } // // ++xId; // } // } //} //Origonal //__global__ void gpuGameOfLifeNaive(uint8_t* cellsLocal, int64_t* dataX, int64_t* dataY, uint8_t *outputCellLocal) //{ // int64_t x = *dataX, y = *dataY; // int64_t yId =0, xId = 0; // int64_t ysId = threadIdx.x + (blockIdx.x * blockDim.x); // if(ysId == 1) // { // while(yId < y) // { // xId = 0; // while(xId < x) // { // outputCellLocal[xId+(yId*y)] = 0; // uint8_t localCellCount = surrondingCellCount(cellsLocal, xId, yId, x, y); // switch(localCellCount) // { // case 2: // if(cellsLocal[xId+(yId*y)]) // { // outputCellLocal[xId+(yId*y)] = 1; // } // break; // case 3: // outputCellLocal[xId+(yId*y)] = 1; // break; // } // // ++xId; // } // ++yId; // } // } //}
25fc995793ee91d608bc2ef3ed466af38d3ccc96.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define EIGEN_USE_GPU #include "betainc_impl.cuh" #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "unsupported/Eigen/CXX11/Tensor" template <typename T> void CalBetainc(const size_t size, T *input_a, T *input_b, T *input_x, T *output, const uint32_t &device_id, hipStream_t cuda_stream) { int num = static_cast<int>(size); T *agpu = input_a, *bgpu = input_b, *xgpu = input_x; int gpudevice = device_id; Eigen::GpuStreamDevice stream(&cuda_stream, gpudevice); Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<T, 1>> Eigen_a(agpu, num); Eigen::TensorMap<Eigen::Tensor<T, 1>> Eigen_b(bgpu, num); Eigen::TensorMap<Eigen::Tensor<T, 1>> Eigen_x(xgpu, num); Eigen::TensorMap<Eigen::Tensor<T, 1>> Eigen_z(output, num); Eigen_z.device(gpu_device) = Eigen::betainc(Eigen_a, Eigen_b, Eigen_x); return; } template CUDA_LIB_EXPORT void CalBetainc<float>(const size_t size, float *input_a, float *input_b, float *input_x, float *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBetainc<double>(const size_t size, double *input_a, double *input_b, double *input_x, double *output, const uint32_t &device_id, hipStream_t cuda_stream);
25fc995793ee91d608bc2ef3ed466af38d3ccc96.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define EIGEN_USE_GPU #include "betainc_impl.cuh" #include <math.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include "unsupported/Eigen/CXX11/Tensor" template <typename T> void CalBetainc(const size_t size, T *input_a, T *input_b, T *input_x, T *output, const uint32_t &device_id, cudaStream_t cuda_stream) { int num = static_cast<int>(size); T *agpu = input_a, *bgpu = input_b, *xgpu = input_x; int gpudevice = device_id; Eigen::GpuStreamDevice stream(&cuda_stream, gpudevice); Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<T, 1>> Eigen_a(agpu, num); Eigen::TensorMap<Eigen::Tensor<T, 1>> Eigen_b(bgpu, num); Eigen::TensorMap<Eigen::Tensor<T, 1>> Eigen_x(xgpu, num); Eigen::TensorMap<Eigen::Tensor<T, 1>> Eigen_z(output, num); Eigen_z.device(gpu_device) = Eigen::betainc(Eigen_a, Eigen_b, Eigen_x); return; } template CUDA_LIB_EXPORT void CalBetainc<float>(const size_t size, float *input_a, float *input_b, float *input_x, float *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBetainc<double>(const size_t size, double *input_a, double *input_b, double *input_x, double *output, const uint32_t &device_id, cudaStream_t cuda_stream);
0f58a1fd0b6f15fa5d8bf43d3aeaa25ad85f2a3f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Lab 5, image filters with CUDA. // Compile with a command-line similar to Lab 4: // nvcc filter.cu -c -arch=sm_30 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter // or (multicore lab) // nvcc filter.cu -c -arch=sm_20 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter // 2017-11-27: Early pre-release, dubbed "beta". // 2017-12-03: First official version! Brand new lab 5 based on the old lab 6. // Better variable names, better prepared for some lab tasks. More changes may come // but I call this version 1.0b2. // 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages // that allocated too much memory. b3 // 2017-12-04: More fixes: Tightened up the kernel with edge clamping. // Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4 #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #ifdef __APPLE__ #include <GLUT/glut.h> #include <OpenGL/gl.h> #else #include <GL/glut.h> #endif #include "readppm.h" #include "milli.h" // Use these for setting shared memory size. #define maxKernelSizeX 10 #define maxKernelSizeY 10 #define BLOCKSIZE 32 __global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey) { int localx = threadIdx.x; int localy = threadIdx.y; int dy, dx; unsigned int sumx, sumy, sumz; __shared__ unsigned char local_mem[BLOCKSIZE*3][BLOCKSIZE]; int startx = (BLOCKSIZE-2*kernelsizex)*blockIdx.x; //block start index int starty = (BLOCKSIZE-2*kernelsizey)*blockIdx.y; int realx = localx - kernelsizex + startx; //real image idx that local idx corresponding to int realy = localy - kernelsizey + starty; int xx = min(max(realx, 0), imagesizex-1); //clamping int yy = min(max(realy, 0), imagesizey-1); local_mem[3*localx+0][localy] = image[((yy)*imagesizex+(xx))*3+0]; local_mem[3*localx+1][localy] = image[((yy)*imagesizex+(xx))*3+1]; local_mem[3*localx+2][localy] = image[((yy)*imagesizex+(xx))*3+2]; __syncthreads(); int divby = (2*kernelsizex+1)*(2*kernelsizey+1); // Works for box filters only! int x,y; // Filter kernel (simple box filter) sumx=0;sumy=0;sumz=0; for(dy=-kernelsizey;dy<=kernelsizey;dy++) { for(dx=-kernelsizex;dx<=kernelsizex;dx++) { x = min(max(localx+dx, 0), BLOCKSIZE-1); y = min(max(localy+dy, 0), BLOCKSIZE-1); sumx += local_mem[x*3+0][y]; sumy += local_mem[x*3+1][y]; sumz += local_mem[x*3+2][y]; } } if (realx >= 0 && realx <= imagesizex - 1){ if (realy >= 0 && realy <= imagesizey - 1){ out[((yy)*imagesizex+(xx))*3+0] = sumx/divby; out[((yy)*imagesizex+(xx))*3+1] = sumy/divby; out[((yy)*imagesizex+(xx))*3+2] = sumz/divby; /*out[((yy)*imagesizex+(xx))*3+0] =local_mem[3*localx+0][localy]; out[((yy)*imagesizex+(xx))*3+1] =local_mem[3*localx+1][localy]; out[((yy)*imagesizex+(xx))*3+2] =local_mem[3*localx+2][localy];*/ } } } // Global variables for image data unsigned char *image, *pixels, *dev_bitmap, *dev_input; unsigned int imagesizey, imagesizex; // Image size //////////////////////////////////////////////////////////////////////////////// // main computation function //////////////////////////////////////////////////////////////////////////////// void computeImages(int kernelsizex, int kernelsizey) { double t; if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY) { printf("Kernel size out of bounds!\n"); return; } pixels = (unsigned char *) malloc(imagesizex*imagesizey*3); hipMalloc( (void**)&dev_input, imagesizex*imagesizey*3); hipMemcpy( dev_input, image, imagesizey*imagesizex*3, hipMemcpyHostToDevice ); hipMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3); printf("GRIDSIZE:%f\n", ceil(float(imagesizex)/(32-2*kernelsizex))); dim3 grid(ceil(float(imagesizex)/(BLOCKSIZE-2*kernelsizex)),ceil(float(imagesizey)/(BLOCKSIZE-2*kernelsizey))); dim3 block(BLOCKSIZE,BLOCKSIZE); ResetMilli(); hipLaunchKernelGGL(( filter), dim3(grid),dim3(block), 0, 0, dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // change to blocksize = 32*32 hipDeviceSynchronize(); t = GetSeconds(); printf("COST %lf seconds\n", t); // Check for errors! hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, hipMemcpyDeviceToHost ); hipFree( dev_bitmap ); hipFree( dev_input ); } // Display images void Draw() { // Dump the whole picture onto the screen. glClearColor( 0.0, 0.0, 0.0, 1.0 ); glClear( GL_COLOR_BUFFER_BIT ); if (imagesizey >= imagesizex) { // Not wide - probably square. Original left, result right. glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); glRasterPos2i(0, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels); } else { // Wide image! Original on top, result below. glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels ); glRasterPos2i(-1, 0); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); } glFlush(); } // Main program, inits int main( int argc, char** argv) { glutInit(&argc, argv); glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA ); if (argc > 1) image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey); else image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey); if (imagesizey >= imagesizex) glutInitWindowSize( imagesizex*2, imagesizey ); else glutInitWindowSize( imagesizex, imagesizey*2 ); glutCreateWindow("Lab 5"); glutDisplayFunc(Draw); ResetMilli(); computeImages(7, 7); // You can save the result to a file like this: writeppm("out.ppm", imagesizey, imagesizex, pixels); glutMainLoop(); return 0; }
0f58a1fd0b6f15fa5d8bf43d3aeaa25ad85f2a3f.cu
// Lab 5, image filters with CUDA. // Compile with a command-line similar to Lab 4: // nvcc filter.cu -c -arch=sm_30 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter // or (multicore lab) // nvcc filter.cu -c -arch=sm_20 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter // 2017-11-27: Early pre-release, dubbed "beta". // 2017-12-03: First official version! Brand new lab 5 based on the old lab 6. // Better variable names, better prepared for some lab tasks. More changes may come // but I call this version 1.0b2. // 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages // that allocated too much memory. b3 // 2017-12-04: More fixes: Tightened up the kernel with edge clamping. // Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4 #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #ifdef __APPLE__ #include <GLUT/glut.h> #include <OpenGL/gl.h> #else #include <GL/glut.h> #endif #include "readppm.h" #include "milli.h" // Use these for setting shared memory size. #define maxKernelSizeX 10 #define maxKernelSizeY 10 #define BLOCKSIZE 32 __global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey) { int localx = threadIdx.x; int localy = threadIdx.y; int dy, dx; unsigned int sumx, sumy, sumz; __shared__ unsigned char local_mem[BLOCKSIZE*3][BLOCKSIZE]; int startx = (BLOCKSIZE-2*kernelsizex)*blockIdx.x; //block start index int starty = (BLOCKSIZE-2*kernelsizey)*blockIdx.y; int realx = localx - kernelsizex + startx; //real image idx that local idx corresponding to int realy = localy - kernelsizey + starty; int xx = min(max(realx, 0), imagesizex-1); //clamping int yy = min(max(realy, 0), imagesizey-1); local_mem[3*localx+0][localy] = image[((yy)*imagesizex+(xx))*3+0]; local_mem[3*localx+1][localy] = image[((yy)*imagesizex+(xx))*3+1]; local_mem[3*localx+2][localy] = image[((yy)*imagesizex+(xx))*3+2]; __syncthreads(); int divby = (2*kernelsizex+1)*(2*kernelsizey+1); // Works for box filters only! int x,y; // Filter kernel (simple box filter) sumx=0;sumy=0;sumz=0; for(dy=-kernelsizey;dy<=kernelsizey;dy++) { for(dx=-kernelsizex;dx<=kernelsizex;dx++) { x = min(max(localx+dx, 0), BLOCKSIZE-1); y = min(max(localy+dy, 0), BLOCKSIZE-1); sumx += local_mem[x*3+0][y]; sumy += local_mem[x*3+1][y]; sumz += local_mem[x*3+2][y]; } } if (realx >= 0 && realx <= imagesizex - 1){ if (realy >= 0 && realy <= imagesizey - 1){ out[((yy)*imagesizex+(xx))*3+0] = sumx/divby; out[((yy)*imagesizex+(xx))*3+1] = sumy/divby; out[((yy)*imagesizex+(xx))*3+2] = sumz/divby; /*out[((yy)*imagesizex+(xx))*3+0] =local_mem[3*localx+0][localy]; out[((yy)*imagesizex+(xx))*3+1] =local_mem[3*localx+1][localy]; out[((yy)*imagesizex+(xx))*3+2] =local_mem[3*localx+2][localy];*/ } } } // Global variables for image data unsigned char *image, *pixels, *dev_bitmap, *dev_input; unsigned int imagesizey, imagesizex; // Image size //////////////////////////////////////////////////////////////////////////////// // main computation function //////////////////////////////////////////////////////////////////////////////// void computeImages(int kernelsizex, int kernelsizey) { double t; if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY) { printf("Kernel size out of bounds!\n"); return; } pixels = (unsigned char *) malloc(imagesizex*imagesizey*3); cudaMalloc( (void**)&dev_input, imagesizex*imagesizey*3); cudaMemcpy( dev_input, image, imagesizey*imagesizex*3, cudaMemcpyHostToDevice ); cudaMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3); printf("GRIDSIZE:%f\n", ceil(float(imagesizex)/(32-2*kernelsizex))); dim3 grid(ceil(float(imagesizex)/(BLOCKSIZE-2*kernelsizex)),ceil(float(imagesizey)/(BLOCKSIZE-2*kernelsizey))); dim3 block(BLOCKSIZE,BLOCKSIZE); ResetMilli(); filter<<<grid,block>>>(dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // change to blocksize = 32*32 cudaThreadSynchronize(); t = GetSeconds(); printf("COST %lf seconds\n", t); // Check for errors! cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, cudaMemcpyDeviceToHost ); cudaFree( dev_bitmap ); cudaFree( dev_input ); } // Display images void Draw() { // Dump the whole picture onto the screen. glClearColor( 0.0, 0.0, 0.0, 1.0 ); glClear( GL_COLOR_BUFFER_BIT ); if (imagesizey >= imagesizex) { // Not wide - probably square. Original left, result right. glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); glRasterPos2i(0, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels); } else { // Wide image! Original on top, result below. glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels ); glRasterPos2i(-1, 0); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); } glFlush(); } // Main program, inits int main( int argc, char** argv) { glutInit(&argc, argv); glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA ); if (argc > 1) image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey); else image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey); if (imagesizey >= imagesizex) glutInitWindowSize( imagesizex*2, imagesizey ); else glutInitWindowSize( imagesizex, imagesizey*2 ); glutCreateWindow("Lab 5"); glutDisplayFunc(Draw); ResetMilli(); computeImages(7, 7); // You can save the result to a file like this: writeppm("out.ppm", imagesizey, imagesizex, pixels); glutMainLoop(); return 0; }
05637d1d269fea0dfd61bc049bc3e1e5ff8ed640.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" using namespace std; template < typename F > struct vArray { F* _; size_t n; vArray( F* _, size_t n ) : _( _ ) , n( n ) { } __host__ __device__ F& operator[]( size_t I ) const { return _[ I ]; } }; template < typename F > struct Array : vArray< F > { ~ Array() { hipFree( vArray< F >::_ ); } static F* Malloc( size_t N ) { F* _; hipMallocManaged( &_, N * sizeof( F ) ); return _; } Array( size_t n ) : vArray< F >( Malloc( n ), n ) { } }; template < typename F > struct vMatrix { F* _; size_t h; size_t w; size_t v; vMatrix( F* _, size_t h, size_t w, size_t v ) : _( _ ) , h( h ) , w( w ) , v( v ) { } __host__ __device__ F& operator()( size_t Y, size_t X ) const { return _[ Y * v + X ]; } vArray< F > operator[]( size_t I ) const { return vArray< F >( _ + I * v , w ); } }; #include <iostream> template < typename F > ostream& operator <<( ostream& S, const vMatrix< F >& P ) { for ( size_t y = 0; y < P.h; y++ ) { for ( size_t x = 0; x < P.w; x++ ) S << " " << P( y, x ); S << endl; } return S; } template < typename F > struct Matrix : vMatrix< F > { ~ Matrix() { hipFree( vMatrix< F >::_ ); } static F* Malloc( size_t N ) { F* _; hipMallocManaged( &_, N * sizeof( F ) ); return _; } Matrix( size_t h, size_t w ) : vMatrix< F >( Malloc( h * w ), h, w, w ) { } Matrix( const vMatrix< F >& _ ) : vMatrix< F >( Malloc( _.h * _.w ), _.h, _.w, _.w ) { for ( size_t y = 0; y < _.h; y++ ) for ( size_t x = 0; x < _.w; x++ ) (*this)( y, x ) = _( y, x ); } Matrix( const Matrix< F >& _ ) : Matrix< F >( (vMatrix< F >)_ ) { } }; #define UNITS( p, q ) ( ( p + q - 1 ) / q ) #define B_S 256 inline dim3 grid1D( size_t N ) { return dim3( UNITS( N, B_S ) ); } inline dim3 thread1D() { return dim3( B_S ); } #define B_S_H 32 #define B_S_W 32 inline dim3 grid2D( size_t H, size_t W ) { return dim3( UNITS( W, B_S_W ), UNITS( H, B_S_H ) ); } inline dim3 thread2D() { return dim3( B_S_W, B_S_H ); } #include <vector> template < typename F, int Y, int X > Matrix< F > MakeMatrix( initializer_list< F > args ) { vector< F > argsV = args; Matrix< F > _( Y, X ); for ( size_t y = 0; y < Y; y++ ) { for ( size_t x = 0; x < X; x++ ) { _( y, x ) = argsV[ y * X + x ]; } } return _; } //////////////////////////////////////////////////////////////////////////////// 3.2.4 template < typename F > __global__ void SIGMOID( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = 1 / ( 1 + exp( - P( y, x ) ) ); } template < typename F > Matrix< F > sigmoid( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); hipLaunchKernelGGL(( SIGMOID), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, P ); hipDeviceSynchronize(); return v; } template < typename F > void _3_2_4() { cout << "3.2.4 sigmoid" << endl; cout << sigmoid( MakeMatrix< F, 2, 5 >( { -1, -0.5, 0, 0.5, 1, -1, -0.5, 0, 0.5, 1 } ) ); } //////////////////////////////////////////////////////////////////////////////// 3.2.7 template < typename F > __global__ void RELU( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = max( F( 0 ), P( y, x ) ); } template < typename F > Matrix< F > ReLU( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); hipLaunchKernelGGL(( RELU), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, P ); hipDeviceSynchronize(); return v; } template < typename F > void _3_2_7() { cout << "3.2.7 ReLU" << endl; cout << ReLU( MakeMatrix< F, 2, 5 >( { -1, -0.5, 0, 0.5, 1, -1, -0.5, 0, 0.5, 1 } ) ); } //////////////////////////////////////////////////////////////////////////////// 3.3.2 template < typename F > __global__ void DOT( vMatrix< F > V, vMatrix< F > L, vMatrix< F > R, size_t WH ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; auto lp = L._ + y * L.v; auto rp = R._ + x; F w = 0; for ( size_t _ = 0; _ < WH; _++ ) w += lp[ _ ] * rp[ _ * R.v ]; V( y, x ) = w; } template < typename F > Matrix< F > operator *( const vMatrix< F >& L, const vMatrix< F >& R ) { Matrix< F > v( L.h, R.w ); hipLaunchKernelGGL(( DOT), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, L, R, L.w ); hipDeviceSynchronize(); return v; } template < typename F > void _3_3_2() { cout << "3.3.2 dot operation" << endl; auto l = MakeMatrix< F, 2, 3 >( { 1, 2, 3, 4, 5, 6 } ); auto r = MakeMatrix< F, 3, 2 >( { 1, 2, 3, 4, 5, 6 } ); cout << l * r; } //////////////////////////////////////////////////////////////////////////////// 3.4.3 #include <map> template < typename F > map< string, Matrix< F > > init_network_3_4_3() { map< string, Matrix< F > > _; _.emplace( "W1", MakeMatrix< F, 2, 3 >( { 0.1, 0.3, 0.5, 0.2, 0.4, 0.6 } ) ); _.emplace( "b1", MakeMatrix< F, 1, 3 >( { 0.1, 0.2, 0.3 } ) ); _.emplace( "W2", MakeMatrix< F, 3, 2 >( { 0.1, 0.4, 0.2, 0.5, 0.3, 0.6 } ) ); _.emplace( "b2", MakeMatrix< F, 1, 2 >( { 0.1, 0.2 } ) ); _.emplace( "W3", MakeMatrix< F, 2, 2 >( { 0.1, 0.3, 0.2, 0.4 } ) ); _.emplace( "b3", MakeMatrix< F, 1, 2 >( { 0.1, 0.2 } ) ); return _; } template < typename F > Matrix< F > identify_function( const Matrix< F >& _ ) { return _; } template < typename F > __global__ void ADD( vMatrix< F > V, vMatrix< F > L, vMatrix< F > R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) + R( y, x ); } template < typename F > Matrix< F > operator +( const vMatrix< F >& L, const vMatrix< F >& R ) { Matrix< F > v( L.h, R.w ); hipLaunchKernelGGL(( ADD), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, L, R ); hipDeviceSynchronize(); return v; } template < typename F > Matrix< F > forward( map< string, Matrix< F > >& network, const vMatrix< F >& x ) { auto W1 = network.at( "W1" ); auto W2 = network.at( "W2" ); auto W3 = network.at( "W3" ); auto b1 = network.at( "b1" ); auto b2 = network.at( "b2" ); auto b3 = network.at( "b3" ); auto a1 = x * W1 + b1; auto z1 = sigmoid( a1 ); auto a2 = z1 * W2 + b2; auto z2 = sigmoid( a2 ); auto a3 = z2 * W3 + b3; auto y = identify_function( a3 ); return y; } template < typename F > void _3_4_3() { cout << "3.4.3 neural" << endl; auto network = init_network_3_4_3< F >(); auto x = MakeMatrix< F, 1, 2 >( { 1.0, 0.5 } ); auto y = forward( network, x ); cout << y; } //////////////////////////////////////////////////////////////////////////////// 3.5.1 template < typename F > __global__ void EXP( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = exp( P( y, x ) ); } template < typename F > Matrix< F > exp( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); hipLaunchKernelGGL(( EXP), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, P ); hipDeviceSynchronize(); return v; } template < typename F > F sum( const vMatrix< F >& P ) { F _ = 0; for ( size_t y = 0; y < P.h; y++ ) for ( size_t x = 0; x < P.w; x++ ) _ += P( y, x ); return _; } template < typename F > __global__ void DIV_INP( vMatrix< F > V, F P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) /= P; } template < typename F > void operator /=( const vMatrix< F >& L, F R ) { hipLaunchKernelGGL(( DIV_INP), dim3(grid2D( L.h, L.w )), dim3(thread2D()) , 0, 0, L, R ); hipDeviceSynchronize(); } template < typename F > Matrix< F > softmax_primitive( const vMatrix< F >& p ) { auto v = exp( p ); v /= sum( v ); return v; } template < typename F > void _3_5_1() { cout << "3.5.1 softmax_primitive" << endl; cout << softmax_primitive( MakeMatrix< F, 1, 3 >( { 0.3, 2.9, 4.0 } ) ); } //////////////////////////////////////////////////////////////////////////////// 3.5.2 template < typename F > F max( const vMatrix< F >& P ) { F _ = P( 0, 0 ); for ( size_t y = 0; y < P.h; y++ ) for ( size_t x = 0; x < P.w; x++ ) if ( P( y, x ) > _ ) _ = P( y, x ); return _; } template < typename F > __global__ void SUB_C( vMatrix< F > V, vMatrix< F > L, F R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) - R; } template < typename F > Matrix< F > operator -( const vMatrix< F >& L, F R ) { Matrix< F > v( L.h, L.w ); hipLaunchKernelGGL(( SUB_C), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, L, R ); hipDeviceSynchronize(); return v; } template < typename F > Matrix< F > softmax( const vMatrix< F >& p ) { auto v = exp( p - max( p ) ); v /= sum( v ); return v; } template < typename F > void _3_5_2() { cout << "3.5.2 softmax" << endl; cout << softmax( MakeMatrix< F, 1, 3 >( { 1010, 1000, 990 } ) ); } //////////////////////////////////////////////////////////////////////////////// 3.5.3 template < typename F > void _3_5_3() { cout << "3.5.3 sum( softmax )" << endl; cout << sum( softmax( MakeMatrix< F, 1, 3 >( { 0.3, 2.9, 4.0 } ) ) ) << endl; } //////////////////////////////////////////////////////////////////////////////// 3.6.1 #include <fstream> template < typename F > map< string, Matrix< F > > get_data() { map< string, Matrix< F > > v; { ifstream ifs( "../train-images.idx3-ubyte" ); if ( ! ifs.is_open() ) throw "../train-images.idx3-ubyte"; ifs.ignore( 16 ); Matrix< F > w( 60000, 28 * 28 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ( (unsigned char)ifs.get() ) / 255.0; v.emplace( "x_train", w ); } { ifstream ifs( "../train-labels.idx1-ubyte" ); if ( ! ifs.is_open() ) throw "../train-labels.idx1-ubyte"; ifs.ignore( 8 ); Matrix< F > w( 1, 60000 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ifs.get(); v.emplace( "t_train", w ); } { ifstream ifs( "../t10k-images.idx3-ubyte" ); if ( ! ifs.is_open() ) throw "../t10k-images.idx3-ubyte"; ifs.ignore( 16 ); Matrix< F > w( 10000, 28 * 28 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ( (unsigned char)ifs.get() ) / 255.0; v.emplace( "x_test", w ); } { ifstream ifs( "../t10k-labels.idx1-ubyte" ); if ( ! ifs.is_open() ) throw "../t10k-labels.idx1-ubyte"; ifs.ignore( 8 ); Matrix< F > w( 1, 10000 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ifs.get(); v.emplace( "t_test", w ); } return v; } map< string, Matrix< double > > init_network() { map< string, Matrix< double > > v; ifstream ifs( "../sample_weight.bin" ); if ( ! ifs.is_open() ) throw "../sample_weight.bin"; { Matrix< double > w( 784, 50 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W1", w ); } { Matrix< double > w( 50, 100 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W2", w ); } { Matrix< double > w( 100, 10 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W3", w ); } { Matrix< double > w( 1, 50 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b1", w ); } { Matrix< double > w( 1, 100 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b2", w ); } { Matrix< double > w( 1, 10 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b3", w ); } return v; } template < typename F > __global__ void ADD( vMatrix< F > V, vMatrix< F > L, vArray< F > R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) + R[ x ]; } template < typename F > Matrix< F > operator +( const vMatrix< F >& L, const vArray< F >& R ) { Matrix< F > v( L.h, L.w ); hipLaunchKernelGGL(( ADD), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, L, R ); hipDeviceSynchronize(); return v; } template < typename F > Matrix< F > predict( map< string, Matrix< F > >& network, const vMatrix< F >& x ) { Matrix< F >& W1 = network.at( "W1" ); Matrix< F >& W2 = network.at( "W2" ); Matrix< F >& W3 = network.at( "W3" ); auto b1 = network.at( "b1" )[ 0 ]; auto b2 = network.at( "b2" )[ 0 ]; auto b3 = network.at( "b3" )[ 0 ]; auto a1 = x * W1 + b1; auto z1 = sigmoid( a1 ); auto a2 = z1 * W2 + b2; auto z2 = sigmoid( a2 ); auto a3 = z2 * W3 + b3; auto y = softmax( a3 ); return y; } template < typename F > F argmax( const vArray< F >& P ) { size_t _ = 0; for ( size_t i = 1; i < P.n; i++ ) if ( P[ i ] > P[ _ ] ) _ = i; return F( _ ); } template < typename F > Array< F > argmax( const vMatrix< F >& P ) { Array< F > _( P.h ); for ( size_t y = 0; y < P.h; y++ ) _[ y ] = argmax( P[ y ] ); return _; } template < typename F > vArray< F > Part( const vArray< F >& _, size_t O, size_t N ) { return vArray< F >( _._ + O , N ); } template < typename F > vMatrix< F > Part( const vMatrix< F >& _, size_t Y, size_t X, size_t H, size_t W ) { return vMatrix< F >( _._ + Y * _.v + X , H , W , _.v ); } void _3_6_1() { cout << "3.6.1 MNIST" << endl; auto w = get_data< double >(); auto x_test = w.at( "x_test" ); auto t_test = w.at( "t_test" )[ 0 ]; auto network = init_network(); auto accuracy_cnt = 0; for ( size_t i = 0; i < x_test.h; i++ ) { auto y = predict( network, Part( x_test, i, 0, 1, x_test.w ) ); auto p = argmax( y[ 0 ] ); if ( p == t_test[ i ] ) accuracy_cnt++; } cout << "accuracy_cnt: " << ( ( double)accuracy_cnt / (double)x_test.h ) << endl; } //////////////////////////////////////////////////////////////////////////////// 3.6.3 template < typename F > size_t CountEquals( const vArray< F >& L, const vArray< F >& R ) { size_t _ = 0; for ( size_t i = 0; i < L.n; i++ ) if ( L[ i ] == R[ i ] ) _++; return _; } void _3_6_3() { cout << "3.6.3 MNIST BATCH" << endl; auto w = get_data< double >(); auto x_test = w.at( "x_test" ); auto t_test = w.at( "t_test" )[ 0 ]; auto network = init_network(); auto accuracy_cnt = 0; for ( size_t i = 0; i < x_test.h; i += 100 ) { auto y = predict( network, Part( x_test, i, 0, 100, x_test.w ) ); auto p = argmax( y ); accuracy_cnt += CountEquals( p, Part( t_test, i, 100 ) ); } cout << "accuracy_cnt: " << ( ( double)accuracy_cnt / (double)x_test.h ) << endl; } //////////////////////////////////////////////////////////////////////////////// Main template < typename F > void Main() { _3_2_4< F >(); _3_2_7< F >(); _3_3_2< F >(); _3_4_3< F >(); _3_5_1< F >(); _3_5_2< F >(); _3_5_3< F >(); _3_6_1(); _3_6_3(); } int main( int argc, char* argv[] ) { Main< double >(); }
05637d1d269fea0dfd61bc049bc3e1e5ff8ed640.cu
using namespace std; template < typename F > struct vArray { F* _; size_t n; vArray( F* _, size_t n ) : _( _ ) , n( n ) { } __host__ __device__ F& operator[]( size_t I ) const { return _[ I ]; } }; template < typename F > struct Array : vArray< F > { ~ Array() { cudaFree( vArray< F >::_ ); } static F* Malloc( size_t N ) { F* _; cudaMallocManaged( &_, N * sizeof( F ) ); return _; } Array( size_t n ) : vArray< F >( Malloc( n ), n ) { } }; template < typename F > struct vMatrix { F* _; size_t h; size_t w; size_t v; vMatrix( F* _, size_t h, size_t w, size_t v ) : _( _ ) , h( h ) , w( w ) , v( v ) { } __host__ __device__ F& operator()( size_t Y, size_t X ) const { return _[ Y * v + X ]; } vArray< F > operator[]( size_t I ) const { return vArray< F >( _ + I * v , w ); } }; #include <iostream> template < typename F > ostream& operator <<( ostream& S, const vMatrix< F >& P ) { for ( size_t y = 0; y < P.h; y++ ) { for ( size_t x = 0; x < P.w; x++ ) S << " " << P( y, x ); S << endl; } return S; } template < typename F > struct Matrix : vMatrix< F > { ~ Matrix() { cudaFree( vMatrix< F >::_ ); } static F* Malloc( size_t N ) { F* _; cudaMallocManaged( &_, N * sizeof( F ) ); return _; } Matrix( size_t h, size_t w ) : vMatrix< F >( Malloc( h * w ), h, w, w ) { } Matrix( const vMatrix< F >& _ ) : vMatrix< F >( Malloc( _.h * _.w ), _.h, _.w, _.w ) { for ( size_t y = 0; y < _.h; y++ ) for ( size_t x = 0; x < _.w; x++ ) (*this)( y, x ) = _( y, x ); } Matrix( const Matrix< F >& _ ) : Matrix< F >( (vMatrix< F >)_ ) { } }; #define UNITS( p, q ) ( ( p + q - 1 ) / q ) #define B_S 256 inline dim3 grid1D( size_t N ) { return dim3( UNITS( N, B_S ) ); } inline dim3 thread1D() { return dim3( B_S ); } #define B_S_H 32 #define B_S_W 32 inline dim3 grid2D( size_t H, size_t W ) { return dim3( UNITS( W, B_S_W ), UNITS( H, B_S_H ) ); } inline dim3 thread2D() { return dim3( B_S_W, B_S_H ); } #include <vector> template < typename F, int Y, int X > Matrix< F > MakeMatrix( initializer_list< F > args ) { vector< F > argsV = args; Matrix< F > _( Y, X ); for ( size_t y = 0; y < Y; y++ ) { for ( size_t x = 0; x < X; x++ ) { _( y, x ) = argsV[ y * X + x ]; } } return _; } //////////////////////////////////////////////////////////////////////////////// 3.2.4 template < typename F > __global__ void SIGMOID( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = 1 / ( 1 + exp( - P( y, x ) ) ); } template < typename F > Matrix< F > sigmoid( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); SIGMOID<<< grid2D( v.h, v.w ), thread2D() >>>( v, P ); cudaDeviceSynchronize(); return v; } template < typename F > void _3_2_4() { cout << "3.2.4 sigmoid" << endl; cout << sigmoid( MakeMatrix< F, 2, 5 >( { -1, -0.5, 0, 0.5, 1, -1, -0.5, 0, 0.5, 1 } ) ); } //////////////////////////////////////////////////////////////////////////////// 3.2.7 template < typename F > __global__ void RELU( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = max( F( 0 ), P( y, x ) ); } template < typename F > Matrix< F > ReLU( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); RELU<<< grid2D( v.h, v.w ), thread2D() >>>( v, P ); cudaDeviceSynchronize(); return v; } template < typename F > void _3_2_7() { cout << "3.2.7 ReLU" << endl; cout << ReLU( MakeMatrix< F, 2, 5 >( { -1, -0.5, 0, 0.5, 1, -1, -0.5, 0, 0.5, 1 } ) ); } //////////////////////////////////////////////////////////////////////////////// 3.3.2 template < typename F > __global__ void DOT( vMatrix< F > V, vMatrix< F > L, vMatrix< F > R, size_t WH ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; auto lp = L._ + y * L.v; auto rp = R._ + x; F w = 0; for ( size_t _ = 0; _ < WH; _++ ) w += lp[ _ ] * rp[ _ * R.v ]; V( y, x ) = w; } template < typename F > Matrix< F > operator *( const vMatrix< F >& L, const vMatrix< F >& R ) { Matrix< F > v( L.h, R.w ); DOT<<< grid2D( v.h, v.w ), thread2D() >>>( v, L, R, L.w ); cudaDeviceSynchronize(); return v; } template < typename F > void _3_3_2() { cout << "3.3.2 dot operation" << endl; auto l = MakeMatrix< F, 2, 3 >( { 1, 2, 3, 4, 5, 6 } ); auto r = MakeMatrix< F, 3, 2 >( { 1, 2, 3, 4, 5, 6 } ); cout << l * r; } //////////////////////////////////////////////////////////////////////////////// 3.4.3 #include <map> template < typename F > map< string, Matrix< F > > init_network_3_4_3() { map< string, Matrix< F > > _; _.emplace( "W1", MakeMatrix< F, 2, 3 >( { 0.1, 0.3, 0.5, 0.2, 0.4, 0.6 } ) ); _.emplace( "b1", MakeMatrix< F, 1, 3 >( { 0.1, 0.2, 0.3 } ) ); _.emplace( "W2", MakeMatrix< F, 3, 2 >( { 0.1, 0.4, 0.2, 0.5, 0.3, 0.6 } ) ); _.emplace( "b2", MakeMatrix< F, 1, 2 >( { 0.1, 0.2 } ) ); _.emplace( "W3", MakeMatrix< F, 2, 2 >( { 0.1, 0.3, 0.2, 0.4 } ) ); _.emplace( "b3", MakeMatrix< F, 1, 2 >( { 0.1, 0.2 } ) ); return _; } template < typename F > Matrix< F > identify_function( const Matrix< F >& _ ) { return _; } template < typename F > __global__ void ADD( vMatrix< F > V, vMatrix< F > L, vMatrix< F > R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) + R( y, x ); } template < typename F > Matrix< F > operator +( const vMatrix< F >& L, const vMatrix< F >& R ) { Matrix< F > v( L.h, R.w ); ADD<<< grid2D( v.h, v.w ), thread2D() >>>( v, L, R ); cudaDeviceSynchronize(); return v; } template < typename F > Matrix< F > forward( map< string, Matrix< F > >& network, const vMatrix< F >& x ) { auto W1 = network.at( "W1" ); auto W2 = network.at( "W2" ); auto W3 = network.at( "W3" ); auto b1 = network.at( "b1" ); auto b2 = network.at( "b2" ); auto b3 = network.at( "b3" ); auto a1 = x * W1 + b1; auto z1 = sigmoid( a1 ); auto a2 = z1 * W2 + b2; auto z2 = sigmoid( a2 ); auto a3 = z2 * W3 + b3; auto y = identify_function( a3 ); return y; } template < typename F > void _3_4_3() { cout << "3.4.3 neural" << endl; auto network = init_network_3_4_3< F >(); auto x = MakeMatrix< F, 1, 2 >( { 1.0, 0.5 } ); auto y = forward( network, x ); cout << y; } //////////////////////////////////////////////////////////////////////////////// 3.5.1 template < typename F > __global__ void EXP( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = exp( P( y, x ) ); } template < typename F > Matrix< F > exp( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); EXP<<< grid2D( v.h, v.w ), thread2D() >>>( v, P ); cudaDeviceSynchronize(); return v; } template < typename F > F sum( const vMatrix< F >& P ) { F _ = 0; for ( size_t y = 0; y < P.h; y++ ) for ( size_t x = 0; x < P.w; x++ ) _ += P( y, x ); return _; } template < typename F > __global__ void DIV_INP( vMatrix< F > V, F P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) /= P; } template < typename F > void operator /=( const vMatrix< F >& L, F R ) { DIV_INP<<< grid2D( L.h, L.w ), thread2D() >>>( L, R ); cudaDeviceSynchronize(); } template < typename F > Matrix< F > softmax_primitive( const vMatrix< F >& p ) { auto v = exp( p ); v /= sum( v ); return v; } template < typename F > void _3_5_1() { cout << "3.5.1 softmax_primitive" << endl; cout << softmax_primitive( MakeMatrix< F, 1, 3 >( { 0.3, 2.9, 4.0 } ) ); } //////////////////////////////////////////////////////////////////////////////// 3.5.2 template < typename F > F max( const vMatrix< F >& P ) { F _ = P( 0, 0 ); for ( size_t y = 0; y < P.h; y++ ) for ( size_t x = 0; x < P.w; x++ ) if ( P( y, x ) > _ ) _ = P( y, x ); return _; } template < typename F > __global__ void SUB_C( vMatrix< F > V, vMatrix< F > L, F R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) - R; } template < typename F > Matrix< F > operator -( const vMatrix< F >& L, F R ) { Matrix< F > v( L.h, L.w ); SUB_C<<< grid2D( v.h, v.w ), thread2D() >>>( v, L, R ); cudaDeviceSynchronize(); return v; } template < typename F > Matrix< F > softmax( const vMatrix< F >& p ) { auto v = exp( p - max( p ) ); v /= sum( v ); return v; } template < typename F > void _3_5_2() { cout << "3.5.2 softmax" << endl; cout << softmax( MakeMatrix< F, 1, 3 >( { 1010, 1000, 990 } ) ); } //////////////////////////////////////////////////////////////////////////////// 3.5.3 template < typename F > void _3_5_3() { cout << "3.5.3 sum( softmax )" << endl; cout << sum( softmax( MakeMatrix< F, 1, 3 >( { 0.3, 2.9, 4.0 } ) ) ) << endl; } //////////////////////////////////////////////////////////////////////////////// 3.6.1 #include <fstream> template < typename F > map< string, Matrix< F > > get_data() { map< string, Matrix< F > > v; { ifstream ifs( "../train-images.idx3-ubyte" ); if ( ! ifs.is_open() ) throw "../train-images.idx3-ubyte"; ifs.ignore( 16 ); Matrix< F > w( 60000, 28 * 28 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ( (unsigned char)ifs.get() ) / 255.0; v.emplace( "x_train", w ); } { ifstream ifs( "../train-labels.idx1-ubyte" ); if ( ! ifs.is_open() ) throw "../train-labels.idx1-ubyte"; ifs.ignore( 8 ); Matrix< F > w( 1, 60000 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ifs.get(); v.emplace( "t_train", w ); } { ifstream ifs( "../t10k-images.idx3-ubyte" ); if ( ! ifs.is_open() ) throw "../t10k-images.idx3-ubyte"; ifs.ignore( 16 ); Matrix< F > w( 10000, 28 * 28 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ( (unsigned char)ifs.get() ) / 255.0; v.emplace( "x_test", w ); } { ifstream ifs( "../t10k-labels.idx1-ubyte" ); if ( ! ifs.is_open() ) throw "../t10k-labels.idx1-ubyte"; ifs.ignore( 8 ); Matrix< F > w( 1, 10000 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ifs.get(); v.emplace( "t_test", w ); } return v; } map< string, Matrix< double > > init_network() { map< string, Matrix< double > > v; ifstream ifs( "../sample_weight.bin" ); if ( ! ifs.is_open() ) throw "../sample_weight.bin"; { Matrix< double > w( 784, 50 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W1", w ); } { Matrix< double > w( 50, 100 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W2", w ); } { Matrix< double > w( 100, 10 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W3", w ); } { Matrix< double > w( 1, 50 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b1", w ); } { Matrix< double > w( 1, 100 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b2", w ); } { Matrix< double > w( 1, 10 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b3", w ); } return v; } template < typename F > __global__ void ADD( vMatrix< F > V, vMatrix< F > L, vArray< F > R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) + R[ x ]; } template < typename F > Matrix< F > operator +( const vMatrix< F >& L, const vArray< F >& R ) { Matrix< F > v( L.h, L.w ); ADD<<< grid2D( v.h, v.w ), thread2D() >>>( v, L, R ); cudaDeviceSynchronize(); return v; } template < typename F > Matrix< F > predict( map< string, Matrix< F > >& network, const vMatrix< F >& x ) { Matrix< F >& W1 = network.at( "W1" ); Matrix< F >& W2 = network.at( "W2" ); Matrix< F >& W3 = network.at( "W3" ); auto b1 = network.at( "b1" )[ 0 ]; auto b2 = network.at( "b2" )[ 0 ]; auto b3 = network.at( "b3" )[ 0 ]; auto a1 = x * W1 + b1; auto z1 = sigmoid( a1 ); auto a2 = z1 * W2 + b2; auto z2 = sigmoid( a2 ); auto a3 = z2 * W3 + b3; auto y = softmax( a3 ); return y; } template < typename F > F argmax( const vArray< F >& P ) { size_t _ = 0; for ( size_t i = 1; i < P.n; i++ ) if ( P[ i ] > P[ _ ] ) _ = i; return F( _ ); } template < typename F > Array< F > argmax( const vMatrix< F >& P ) { Array< F > _( P.h ); for ( size_t y = 0; y < P.h; y++ ) _[ y ] = argmax( P[ y ] ); return _; } template < typename F > vArray< F > Part( const vArray< F >& _, size_t O, size_t N ) { return vArray< F >( _._ + O , N ); } template < typename F > vMatrix< F > Part( const vMatrix< F >& _, size_t Y, size_t X, size_t H, size_t W ) { return vMatrix< F >( _._ + Y * _.v + X , H , W , _.v ); } void _3_6_1() { cout << "3.6.1 MNIST" << endl; auto w = get_data< double >(); auto x_test = w.at( "x_test" ); auto t_test = w.at( "t_test" )[ 0 ]; auto network = init_network(); auto accuracy_cnt = 0; for ( size_t i = 0; i < x_test.h; i++ ) { auto y = predict( network, Part( x_test, i, 0, 1, x_test.w ) ); auto p = argmax( y[ 0 ] ); if ( p == t_test[ i ] ) accuracy_cnt++; } cout << "accuracy_cnt: " << ( ( double)accuracy_cnt / (double)x_test.h ) << endl; } //////////////////////////////////////////////////////////////////////////////// 3.6.3 template < typename F > size_t CountEquals( const vArray< F >& L, const vArray< F >& R ) { size_t _ = 0; for ( size_t i = 0; i < L.n; i++ ) if ( L[ i ] == R[ i ] ) _++; return _; } void _3_6_3() { cout << "3.6.3 MNIST BATCH" << endl; auto w = get_data< double >(); auto x_test = w.at( "x_test" ); auto t_test = w.at( "t_test" )[ 0 ]; auto network = init_network(); auto accuracy_cnt = 0; for ( size_t i = 0; i < x_test.h; i += 100 ) { auto y = predict( network, Part( x_test, i, 0, 100, x_test.w ) ); auto p = argmax( y ); accuracy_cnt += CountEquals( p, Part( t_test, i, 100 ) ); } cout << "accuracy_cnt: " << ( ( double)accuracy_cnt / (double)x_test.h ) << endl; } //////////////////////////////////////////////////////////////////////////////// Main template < typename F > void Main() { _3_2_4< F >(); _3_2_7< F >(); _3_3_2< F >(); _3_4_3< F >(); _3_5_1< F >(); _3_5_2< F >(); _3_5_3< F >(); _3_6_1(); _3_6_3(); } int main( int argc, char* argv[] ) { Main< double >(); }
65b889a6b433ba0867ab630d0cb29f2c86242d51.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #include "gpu_err_check.h" // Print device properties void printDevProp(hipDeviceProp_t devProp) { printf("Name: %s\n\n", devProp.name); printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Clock rate: %d kHz\n\n", devProp.clockRate); printf("Total global memory: %lu bytes\n", devProp.totalGlobalMem); printf("Total shared memory per block: %lu bytes\n", devProp.sharedMemPerBlock); printf("Total shared memory per SM: %lu bytes\n", devProp.sharedMemPerMultiprocessor); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Total registers per SM: %d\n", devProp.regsPerMultiprocessor); printf("Maximum memory pitch: %lu bytes\n", devProp.memPitch); printf("\t(Maximum pitch in bytes\nallowed by memory copies)\n"); printf("Total constant memory: %lu bytes\n", devProp.totalConstMem); printf("L2 Cache Size: %d bytes\n", devProp.l2CacheSize); printf("Supports caching locals in L1: %s\n", (devProp.localL1CacheSupported ? "Yes" : "No")); printf("Supports caching globals in L1: %s\n", (devProp.globalL1CacheSupported ? "Yes" : "No")); printf("Memory Bus Width: %d bits\n", devProp.memoryBusWidth); printf("Memory Clock Rate: %d kHz\n\n", devProp.memoryClockRate); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Warp size: %d threads\n", devProp.warpSize); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) { printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); } for (int i = 0; i < 3; ++i) { printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); } printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } void get_device_info(int dev_number){ hipDeviceProp_t d_info; cudaSafeCall( hipGetDeviceProperties(&d_info, dev_number) ); printDevProp(d_info); } int main(){ // Get device info get_device_info(0); return 0; }
65b889a6b433ba0867ab630d0cb29f2c86242d51.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <cuda.h> #include "gpu_err_check.h" // Print device properties void printDevProp(cudaDeviceProp devProp) { printf("Name: %s\n\n", devProp.name); printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Clock rate: %d kHz\n\n", devProp.clockRate); printf("Total global memory: %lu bytes\n", devProp.totalGlobalMem); printf("Total shared memory per block: %lu bytes\n", devProp.sharedMemPerBlock); printf("Total shared memory per SM: %lu bytes\n", devProp.sharedMemPerMultiprocessor); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Total registers per SM: %d\n", devProp.regsPerMultiprocessor); printf("Maximum memory pitch: %lu bytes\n", devProp.memPitch); printf("\t(Maximum pitch in bytes\nallowed by memory copies)\n"); printf("Total constant memory: %lu bytes\n", devProp.totalConstMem); printf("L2 Cache Size: %d bytes\n", devProp.l2CacheSize); printf("Supports caching locals in L1: %s\n", (devProp.localL1CacheSupported ? "Yes" : "No")); printf("Supports caching globals in L1: %s\n", (devProp.globalL1CacheSupported ? "Yes" : "No")); printf("Memory Bus Width: %d bits\n", devProp.memoryBusWidth); printf("Memory Clock Rate: %d kHz\n\n", devProp.memoryClockRate); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Warp size: %d threads\n", devProp.warpSize); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) { printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); } for (int i = 0; i < 3; ++i) { printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); } printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } void get_device_info(int dev_number){ cudaDeviceProp d_info; cudaSafeCall( cudaGetDeviceProperties(&d_info, dev_number) ); printDevProp(d_info); } int main(){ // Get device info get_device_info(0); return 0; }
49987bac55bd94b744023586fb3e128586cc7f2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Axpy Layer * * Created on: May 1, 2017 * Author: hujie */ #include "caffe/layers/axpy_layer.hpp" namespace caffe { template <typename Dtype> __global__ void AxpyForward(const int count, const int spatial_dim, const Dtype* scale_data, const Dtype* x_data, const Dtype* y_data, Dtype* out_data) { CUDA_KERNEL_LOOP(index, count) { out_data[index] = scale_data[index / spatial_dim] * x_data[index] + y_data[index]; } } template <typename Dtype> void AxpyLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* scale_data = bottom[0]->gpu_data(); const Dtype* x_data = bottom[1]->gpu_data(); const Dtype* y_data = bottom[2]->gpu_data(); Dtype* out_data = top[0]->mutable_gpu_data(); const int count = bottom[1]->count(); hipLaunchKernelGGL(( AxpyForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[1]->count(2), scale_data, x_data, y_data, out_data); } template <typename Dtype> __global__ void AxpyBackwardScale(const int outer_num, const int spatial_dim, const Dtype* x_data, const Dtype* top_diff, Dtype* scale_diff) { __shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS]; unsigned int tid = threadIdx.x; buffer[tid] = 0; __syncthreads(); for (int j = tid; j < spatial_dim; j += blockDim.x) { int offset = blockIdx.x * spatial_dim + j; buffer[tid] += top_diff[offset] * x_data[offset]; } __syncthreads(); for (int i = blockDim.x / 2; i > 0; i >>= 1) { if (tid < i) { buffer[threadIdx.x] += buffer[threadIdx.x + i]; } __syncthreads(); } if (tid == 0) { scale_diff[blockIdx.x] = buffer[0]; } } template <typename Dtype> __global__ void AxpyBackwardX(const int count, const int spatial_dim, const Dtype* scale_data, const Dtype* top_diff, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = scale_data[index / spatial_dim] * top_diff[index]; } } template <typename Dtype> void AxpyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); if (propagate_down[0]) { int outer_num = bottom[1]->count(0, 2); hipLaunchKernelGGL(( AxpyBackwardScale<Dtype>), dim3(outer_num), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num, bottom[1]->count(2), bottom[1]->gpu_data(), top_diff, bottom[0]->mutable_gpu_diff()); } if (propagate_down[1]) { hipLaunchKernelGGL(( AxpyBackwardX<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top[0]->count(2), bottom[0]->gpu_data(), top_diff, bottom[1]->mutable_gpu_diff()); } if (propagate_down[2]) { caffe_copy(count, top_diff, bottom[2]->mutable_gpu_diff()); } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(AxpyLayer); } // namespace caffe
49987bac55bd94b744023586fb3e128586cc7f2a.cu
/* * Axpy Layer * * Created on: May 1, 2017 * Author: hujie */ #include "caffe/layers/axpy_layer.hpp" namespace caffe { template <typename Dtype> __global__ void AxpyForward(const int count, const int spatial_dim, const Dtype* scale_data, const Dtype* x_data, const Dtype* y_data, Dtype* out_data) { CUDA_KERNEL_LOOP(index, count) { out_data[index] = scale_data[index / spatial_dim] * x_data[index] + y_data[index]; } } template <typename Dtype> void AxpyLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* scale_data = bottom[0]->gpu_data(); const Dtype* x_data = bottom[1]->gpu_data(); const Dtype* y_data = bottom[2]->gpu_data(); Dtype* out_data = top[0]->mutable_gpu_data(); const int count = bottom[1]->count(); AxpyForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom[1]->count(2), scale_data, x_data, y_data, out_data); } template <typename Dtype> __global__ void AxpyBackwardScale(const int outer_num, const int spatial_dim, const Dtype* x_data, const Dtype* top_diff, Dtype* scale_diff) { __shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS]; unsigned int tid = threadIdx.x; buffer[tid] = 0; __syncthreads(); for (int j = tid; j < spatial_dim; j += blockDim.x) { int offset = blockIdx.x * spatial_dim + j; buffer[tid] += top_diff[offset] * x_data[offset]; } __syncthreads(); for (int i = blockDim.x / 2; i > 0; i >>= 1) { if (tid < i) { buffer[threadIdx.x] += buffer[threadIdx.x + i]; } __syncthreads(); } if (tid == 0) { scale_diff[blockIdx.x] = buffer[0]; } } template <typename Dtype> __global__ void AxpyBackwardX(const int count, const int spatial_dim, const Dtype* scale_data, const Dtype* top_diff, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = scale_data[index / spatial_dim] * top_diff[index]; } } template <typename Dtype> void AxpyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); if (propagate_down[0]) { int outer_num = bottom[1]->count(0, 2); AxpyBackwardScale<Dtype><<<outer_num, CAFFE_CUDA_NUM_THREADS>>>( outer_num, bottom[1]->count(2), bottom[1]->gpu_data(), top_diff, bottom[0]->mutable_gpu_diff()); } if (propagate_down[1]) { AxpyBackwardX<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top[0]->count(2), bottom[0]->gpu_data(), top_diff, bottom[1]->mutable_gpu_diff()); } if (propagate_down[2]) { caffe_copy(count, top_diff, bottom[2]->mutable_gpu_diff()); } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(AxpyLayer); } // namespace caffe
dcdc2fcad89a3cce02f23b4595179c19873e52db.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "DecodeValues.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *superposition = NULL; hipMalloc(&superposition, XSIZE*YSIZE); int symbolSize = XSIZE*YSIZE; float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); float *reliability = NULL; hipMalloc(&reliability, XSIZE*YSIZE); int numOfValues = 1; int squaredMode = 1; float *dirX = NULL; hipMalloc(&dirX, XSIZE*YSIZE); float *dirY = NULL; hipMalloc(&dirY, XSIZE*YSIZE); float *negDirX = NULL; hipMalloc(&negDirX, XSIZE*YSIZE); float *negDirY = NULL; hipMalloc(&negDirY, XSIZE*YSIZE); float *originX = NULL; hipMalloc(&originX, XSIZE*YSIZE); float *originY = NULL; hipMalloc(&originY, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( DecodeValues), dim3(gridBlock),dim3(threadBlock), 0, 0, superposition,symbolSize,output,reliability,numOfValues,squaredMode,dirX,dirY,negDirX,negDirY,originX,originY); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( DecodeValues), dim3(gridBlock),dim3(threadBlock), 0, 0, superposition,symbolSize,output,reliability,numOfValues,squaredMode,dirX,dirY,negDirX,negDirY,originX,originY); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( DecodeValues), dim3(gridBlock),dim3(threadBlock), 0, 0, superposition,symbolSize,output,reliability,numOfValues,squaredMode,dirX,dirY,negDirX,negDirY,originX,originY); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
dcdc2fcad89a3cce02f23b4595179c19873e52db.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "DecodeValues.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *superposition = NULL; cudaMalloc(&superposition, XSIZE*YSIZE); int symbolSize = XSIZE*YSIZE; float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); float *reliability = NULL; cudaMalloc(&reliability, XSIZE*YSIZE); int numOfValues = 1; int squaredMode = 1; float *dirX = NULL; cudaMalloc(&dirX, XSIZE*YSIZE); float *dirY = NULL; cudaMalloc(&dirY, XSIZE*YSIZE); float *negDirX = NULL; cudaMalloc(&negDirX, XSIZE*YSIZE); float *negDirY = NULL; cudaMalloc(&negDirY, XSIZE*YSIZE); float *originX = NULL; cudaMalloc(&originX, XSIZE*YSIZE); float *originY = NULL; cudaMalloc(&originY, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); DecodeValues<<<gridBlock,threadBlock>>>(superposition,symbolSize,output,reliability,numOfValues,squaredMode,dirX,dirY,negDirX,negDirY,originX,originY); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { DecodeValues<<<gridBlock,threadBlock>>>(superposition,symbolSize,output,reliability,numOfValues,squaredMode,dirX,dirY,negDirX,negDirY,originX,originY); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { DecodeValues<<<gridBlock,threadBlock>>>(superposition,symbolSize,output,reliability,numOfValues,squaredMode,dirX,dirY,negDirX,negDirY,originX,originY); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9a5162b47f8fe33b6ae83ce85fe124c349bdc671.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> //----------------------------------------------------------------------------- // GpuConstantsPackage: a struct to hold many constants (including pointers // to allocated memory on the device) that can be // uploaded all at once. Placing this in the "constants // cache" is a convenient and performant way of handling // constant information on the GPU. //----------------------------------------------------------------------------- struct GpuConstantsPackage { int nparticle; int* partType; float* partX; float* partY; float* partZ; float* partQ; float* Etot; }; typedef struct GpuConstantsPackage cribSheet; // This device constant is available to all functions in this CUDA unit __device__ __constant__ cribSheet cSh; //----------------------------------------------------------------------------- // GpuMirroredInt: a struct holding mirrored int data on both the CPU and the // GPU. Functions below will operate on this struct // (because this isn't a workshop on C++) //----------------------------------------------------------------------------- struct GpuMirroredInt { int len; // Length of the array (again, this is not a C++ course) int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers. // In fact, if non-pinned memory is transferred to the // GPU from the host, a temporary allocation of pinned // memory will be created and then destroyed. Pinned // memory is not host-pageable, but the only performance // implication is that creating lots of pinned memory // may make it harder for the host OS to manage large // memory jobs. int* HostData; // Pointer to allocated memory on the host int* DevcData; // Pointer to allocated memory on the GPU. Note that the // host can know what the address of memory on the GPU // is, but it cannot simply de-reference that pointer // in host code. }; typedef struct GpuMirroredInt gpuInt; //----------------------------------------------------------------------------- // GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the // GPU. Functions below will operate on this struct // (because this isn't a workshop on C++) //----------------------------------------------------------------------------- struct GpuMirroredFloat { int len; // Length of the array (again, this is not a C++ course) int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers. // In fact, if non-pinned memory is transferred to the // GPU from the host, a temporary allocation of pinned // memory will be created and then destroyed. Pinned // memory is not host-pageable, but the only performance // implication is that creating lots of pinned memory // may make it harder for the host OS to manage large // memory jobs. float* HostData; // Pointer to allocated memory on the host float* DevcData; // Pointer to allocated memory on the GPU. Note that the // host can know what the address of memory on the GPU // is, but it cannot simply de-reference that pointer // in host code. }; typedef struct GpuMirroredFloat gpuFloat; //----------------------------------------------------------------------------- // ParticleSimulator: run a rudimentary simulation of particles //----------------------------------------------------------------------------- __global__ void ParticleSimulator() { // Loop over all particles and compute the electrostatic potential. // Each thread will accumulate its own portion of the potential, // then pool the results at the end. int tidx = threadIdx.x; float qq = 0.0; while (tidx < cSh.nparticle) { // Still the naive way, to show how slow it is int i; for (i = 0; i < tidx; i++) { float dx = cSh.partX[tidx] - cSh.partX[i]; float dy = cSh.partY[tidx] - cSh.partY[i]; float dz = cSh.partZ[tidx] - cSh.partZ[i]; float r = sqrt(dx*dx + dy*dy + dz*dz); qq += cSh.partQ[tidx] * cSh.partQ[i] / r; } // Increment counter tidx += blockDim.x; } // Accumulate energy atomicAdd(&cSh.Etot[0], qq); } //----------------------------------------------------------------------------- // CreateGpuInt: constructor function for allocating memory in a gpuInt // instance. // // Arguments: // len: the length of array to allocate // pin: flag to have the memory pinned (non-pageable on the host side // for optimal transfer speed to the device) //----------------------------------------------------------------------------- gpuInt CreateGpuInt(int len, int pin) { gpuInt G; G.len = len; G.IsPinned = pin; // Now that the official length is recorded, upgrade the real length // to the next convenient multiple of 128, so as to always allocate // GPU memory in 512-byte blocks. This is for alignment purposes, // and keeping host to device transfers in line. len = ((len + 127) / 128) * 128; if (pin == 1) { hipHostMalloc((void **)&G.HostData, len * sizeof(int), hipHostMallocMapped); } else { G.HostData = (int*)malloc(len * sizeof(int)); } hipMalloc((void **)&G.DevcData, len * sizeof(int)); memset(G.HostData, 0, len * sizeof(int)); hipMemset((void *)G.DevcData, 0, len * sizeof(int)); return G; } //----------------------------------------------------------------------------- // DestroyGpuInt: destructor function for freeing memory in a gpuInt // instance. //----------------------------------------------------------------------------- void DestroyGpuInt(gpuInt *G) { if (G->IsPinned == 1) { hipHostFree(G->HostData); } else { free(G->HostData); } hipFree(G->DevcData); } //----------------------------------------------------------------------------- // UploadGpuInt: upload an integer array from the host to the device. //----------------------------------------------------------------------------- void UploadGpuInt(gpuInt *G) { hipMemcpy(G->DevcData, G->HostData, G->len * sizeof(int), hipMemcpyHostToDevice); } //----------------------------------------------------------------------------- // DownloadGpuInt: download an integer array from the host to the device. //----------------------------------------------------------------------------- void DownloadGpuInt(gpuInt *G) { hipMemcpy(G->HostData, G->DevcData, G->len * sizeof(int), hipMemcpyHostToDevice); } //----------------------------------------------------------------------------- // CreateGpuFloat: constructor function for allocating memory in a gpuFloat // instance. // // Arguments: // len: the length of array to allocate // pin: flag to have the memory pinned (non-pageable on the host side // for optimal transfer speed ot the device) //----------------------------------------------------------------------------- gpuFloat CreateGpuFloat(int len, int pin) { gpuFloat G; G.len = len; G.IsPinned = pin; // Now that the official length is recorded, upgrade the real length // to the next convenient multiple of 128, so as to always allocate // GPU memory in 512-byte blocks. This is for alignment purposes, // and keeping host to device transfers in line. len = ((len + 127) / 128) * 128; if (pin == 1) { hipHostMalloc((void **)&G.HostData, len * sizeof(float), hipHostMallocMapped); } else { G.HostData = (float*)malloc(len * sizeof(float)); } hipMalloc((void **)&G.DevcData, len * sizeof(float)); memset(G.HostData, 0, len * sizeof(float)); hipMemset((void *)G.DevcData, 0, len * sizeof(float)); return G; } //----------------------------------------------------------------------------- // DestroyGpuFloat: destructor function for freeing memory in a gpuFloat // instance. //----------------------------------------------------------------------------- void DestroyGpuFloat(gpuFloat *G) { if (G->IsPinned == 1) { hipHostFree(G->HostData); } else { free(G->HostData); } hipFree(G->DevcData); } //----------------------------------------------------------------------------- // UploadGpuFloat: upload an float array from the host to the device. //----------------------------------------------------------------------------- void UploadGpuFloat(gpuFloat *G) { hipMemcpy(G->DevcData, G->HostData, G->len * sizeof(float), hipMemcpyHostToDevice); } //----------------------------------------------------------------------------- // DownloadGpuFloat: download an float array from the host to the device. //----------------------------------------------------------------------------- void DownloadGpuFloat(gpuFloat *G) { hipMemcpy(G->HostData, G->DevcData, G->len * sizeof(float), hipMemcpyHostToDevice); } //----------------------------------------------------------------------------- // main //----------------------------------------------------------------------------- int main() { int i, np; gpuInt particleTypes; gpuFloat particleXcoord, particleYcoord, particleZcoord, particleCharge; gpuFloat etot; // Create a small array of particles and populate it particleTypes = CreateGpuInt(100000, 1); particleXcoord = CreateGpuFloat(100000, 1); particleYcoord = CreateGpuFloat(100000, 1); particleZcoord = CreateGpuFloat(100000, 1); particleCharge = CreateGpuFloat(100000, 1); // Allocate and initialize the total energy // accumulator on the host and on the device. etot = CreateGpuFloat(1, 1); etot.HostData[0] = 0.0; UploadGpuFloat(&etot); // Initialize random number generator. srand() SEEDS the generator, // thereafter each call to rand() will return a different number. // This is a reeally bad generator (much better methods with longer // periods before they start looping back over the same sequence are // available). srand(62052); // Place many, many particles np = 97913; for (i = 0; i < np; i++) { // Integer truncation would happen anyway, I'm just making it explicit particleTypes.HostData[i] = (int)(8 * rand()); // Create some random coordinates (double-to-float conversion // is happening here. On the GPU this can have performance // impact, so keep an eye on the data types at all times! particleXcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX; particleYcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX; particleZcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX; particleCharge.HostData[i] = 0.5 - (double)rand() / (double)RAND_MAX; } // Stage critical constants--see cribSheet struct instance cSh above. cribSheet cnstage; cnstage.nparticle = np; cnstage.partX = particleXcoord.DevcData; cnstage.partY = particleYcoord.DevcData; cnstage.partZ = particleZcoord.DevcData; cnstage.partQ = particleCharge.DevcData; cnstage.Etot = etot.DevcData; // Upload all data to the device UploadGpuInt(&particleTypes); UploadGpuFloat(&particleXcoord); UploadGpuFloat(&particleYcoord); UploadGpuFloat(&particleZcoord); UploadGpuFloat(&particleCharge); // Upload the constants to the constants cache hipMemcpyToSymbol(cSh, &cnstage, sizeof(cribSheet)); // Launch the kernel (upgraded thread count) hipLaunchKernelGGL(( ParticleSimulator), dim3(1), dim3(1024), 0, 0, ); // Download the total energy DownloadGpuFloat(&etot); printf("Total energy = %10.4f\n", etot.HostData[0]); // Device synchronization hipDeviceSynchronize(); return 0; }
9a5162b47f8fe33b6ae83ce85fe124c349bdc671.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> //----------------------------------------------------------------------------- // GpuConstantsPackage: a struct to hold many constants (including pointers // to allocated memory on the device) that can be // uploaded all at once. Placing this in the "constants // cache" is a convenient and performant way of handling // constant information on the GPU. //----------------------------------------------------------------------------- struct GpuConstantsPackage { int nparticle; int* partType; float* partX; float* partY; float* partZ; float* partQ; float* Etot; }; typedef struct GpuConstantsPackage cribSheet; // This device constant is available to all functions in this CUDA unit __device__ __constant__ cribSheet cSh; //----------------------------------------------------------------------------- // GpuMirroredInt: a struct holding mirrored int data on both the CPU and the // GPU. Functions below will operate on this struct // (because this isn't a workshop on C++) //----------------------------------------------------------------------------- struct GpuMirroredInt { int len; // Length of the array (again, this is not a C++ course) int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers. // In fact, if non-pinned memory is transferred to the // GPU from the host, a temporary allocation of pinned // memory will be created and then destroyed. Pinned // memory is not host-pageable, but the only performance // implication is that creating lots of pinned memory // may make it harder for the host OS to manage large // memory jobs. int* HostData; // Pointer to allocated memory on the host int* DevcData; // Pointer to allocated memory on the GPU. Note that the // host can know what the address of memory on the GPU // is, but it cannot simply de-reference that pointer // in host code. }; typedef struct GpuMirroredInt gpuInt; //----------------------------------------------------------------------------- // GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the // GPU. Functions below will operate on this struct // (because this isn't a workshop on C++) //----------------------------------------------------------------------------- struct GpuMirroredFloat { int len; // Length of the array (again, this is not a C++ course) int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers. // In fact, if non-pinned memory is transferred to the // GPU from the host, a temporary allocation of pinned // memory will be created and then destroyed. Pinned // memory is not host-pageable, but the only performance // implication is that creating lots of pinned memory // may make it harder for the host OS to manage large // memory jobs. float* HostData; // Pointer to allocated memory on the host float* DevcData; // Pointer to allocated memory on the GPU. Note that the // host can know what the address of memory on the GPU // is, but it cannot simply de-reference that pointer // in host code. }; typedef struct GpuMirroredFloat gpuFloat; //----------------------------------------------------------------------------- // ParticleSimulator: run a rudimentary simulation of particles //----------------------------------------------------------------------------- __global__ void ParticleSimulator() { // Loop over all particles and compute the electrostatic potential. // Each thread will accumulate its own portion of the potential, // then pool the results at the end. int tidx = threadIdx.x; float qq = 0.0; while (tidx < cSh.nparticle) { // Still the naive way, to show how slow it is int i; for (i = 0; i < tidx; i++) { float dx = cSh.partX[tidx] - cSh.partX[i]; float dy = cSh.partY[tidx] - cSh.partY[i]; float dz = cSh.partZ[tidx] - cSh.partZ[i]; float r = sqrt(dx*dx + dy*dy + dz*dz); qq += cSh.partQ[tidx] * cSh.partQ[i] / r; } // Increment counter tidx += blockDim.x; } // Accumulate energy atomicAdd(&cSh.Etot[0], qq); } //----------------------------------------------------------------------------- // CreateGpuInt: constructor function for allocating memory in a gpuInt // instance. // // Arguments: // len: the length of array to allocate // pin: flag to have the memory pinned (non-pageable on the host side // for optimal transfer speed to the device) //----------------------------------------------------------------------------- gpuInt CreateGpuInt(int len, int pin) { gpuInt G; G.len = len; G.IsPinned = pin; // Now that the official length is recorded, upgrade the real length // to the next convenient multiple of 128, so as to always allocate // GPU memory in 512-byte blocks. This is for alignment purposes, // and keeping host to device transfers in line. len = ((len + 127) / 128) * 128; if (pin == 1) { cudaHostAlloc((void **)&G.HostData, len * sizeof(int), cudaHostAllocMapped); } else { G.HostData = (int*)malloc(len * sizeof(int)); } cudaMalloc((void **)&G.DevcData, len * sizeof(int)); memset(G.HostData, 0, len * sizeof(int)); cudaMemset((void *)G.DevcData, 0, len * sizeof(int)); return G; } //----------------------------------------------------------------------------- // DestroyGpuInt: destructor function for freeing memory in a gpuInt // instance. //----------------------------------------------------------------------------- void DestroyGpuInt(gpuInt *G) { if (G->IsPinned == 1) { cudaFreeHost(G->HostData); } else { free(G->HostData); } cudaFree(G->DevcData); } //----------------------------------------------------------------------------- // UploadGpuInt: upload an integer array from the host to the device. //----------------------------------------------------------------------------- void UploadGpuInt(gpuInt *G) { cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(int), cudaMemcpyHostToDevice); } //----------------------------------------------------------------------------- // DownloadGpuInt: download an integer array from the host to the device. //----------------------------------------------------------------------------- void DownloadGpuInt(gpuInt *G) { cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(int), cudaMemcpyHostToDevice); } //----------------------------------------------------------------------------- // CreateGpuFloat: constructor function for allocating memory in a gpuFloat // instance. // // Arguments: // len: the length of array to allocate // pin: flag to have the memory pinned (non-pageable on the host side // for optimal transfer speed ot the device) //----------------------------------------------------------------------------- gpuFloat CreateGpuFloat(int len, int pin) { gpuFloat G; G.len = len; G.IsPinned = pin; // Now that the official length is recorded, upgrade the real length // to the next convenient multiple of 128, so as to always allocate // GPU memory in 512-byte blocks. This is for alignment purposes, // and keeping host to device transfers in line. len = ((len + 127) / 128) * 128; if (pin == 1) { cudaHostAlloc((void **)&G.HostData, len * sizeof(float), cudaHostAllocMapped); } else { G.HostData = (float*)malloc(len * sizeof(float)); } cudaMalloc((void **)&G.DevcData, len * sizeof(float)); memset(G.HostData, 0, len * sizeof(float)); cudaMemset((void *)G.DevcData, 0, len * sizeof(float)); return G; } //----------------------------------------------------------------------------- // DestroyGpuFloat: destructor function for freeing memory in a gpuFloat // instance. //----------------------------------------------------------------------------- void DestroyGpuFloat(gpuFloat *G) { if (G->IsPinned == 1) { cudaFreeHost(G->HostData); } else { free(G->HostData); } cudaFree(G->DevcData); } //----------------------------------------------------------------------------- // UploadGpuFloat: upload an float array from the host to the device. //----------------------------------------------------------------------------- void UploadGpuFloat(gpuFloat *G) { cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(float), cudaMemcpyHostToDevice); } //----------------------------------------------------------------------------- // DownloadGpuFloat: download an float array from the host to the device. //----------------------------------------------------------------------------- void DownloadGpuFloat(gpuFloat *G) { cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(float), cudaMemcpyHostToDevice); } //----------------------------------------------------------------------------- // main //----------------------------------------------------------------------------- int main() { int i, np; gpuInt particleTypes; gpuFloat particleXcoord, particleYcoord, particleZcoord, particleCharge; gpuFloat etot; // Create a small array of particles and populate it particleTypes = CreateGpuInt(100000, 1); particleXcoord = CreateGpuFloat(100000, 1); particleYcoord = CreateGpuFloat(100000, 1); particleZcoord = CreateGpuFloat(100000, 1); particleCharge = CreateGpuFloat(100000, 1); // Allocate and initialize the total energy // accumulator on the host and on the device. etot = CreateGpuFloat(1, 1); etot.HostData[0] = 0.0; UploadGpuFloat(&etot); // Initialize random number generator. srand() SEEDS the generator, // thereafter each call to rand() will return a different number. // This is a reeally bad generator (much better methods with longer // periods before they start looping back over the same sequence are // available). srand(62052); // Place many, many particles np = 97913; for (i = 0; i < np; i++) { // Integer truncation would happen anyway, I'm just making it explicit particleTypes.HostData[i] = (int)(8 * rand()); // Create some random coordinates (double-to-float conversion // is happening here. On the GPU this can have performance // impact, so keep an eye on the data types at all times! particleXcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX; particleYcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX; particleZcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX; particleCharge.HostData[i] = 0.5 - (double)rand() / (double)RAND_MAX; } // Stage critical constants--see cribSheet struct instance cSh above. cribSheet cnstage; cnstage.nparticle = np; cnstage.partX = particleXcoord.DevcData; cnstage.partY = particleYcoord.DevcData; cnstage.partZ = particleZcoord.DevcData; cnstage.partQ = particleCharge.DevcData; cnstage.Etot = etot.DevcData; // Upload all data to the device UploadGpuInt(&particleTypes); UploadGpuFloat(&particleXcoord); UploadGpuFloat(&particleYcoord); UploadGpuFloat(&particleZcoord); UploadGpuFloat(&particleCharge); // Upload the constants to the constants cache cudaMemcpyToSymbol(cSh, &cnstage, sizeof(cribSheet)); // Launch the kernel (upgraded thread count) ParticleSimulator<<<1, 1024>>>(); // Download the total energy DownloadGpuFloat(&etot); printf("Total energy = %10.4f\n", etot.HostData[0]); // Device synchronization cudaDeviceSynchronize(); return 0; }
3fcc7a63f93843da164b52b587871d4ff6f9d895.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "setup_curand_states.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; hiprandState_t *state_d = NULL; hipMalloc(&state_d, XSIZE*YSIZE); unsigned long t = 1; int THREADS = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( setup_curand_states), dim3(gridBlock),dim3(threadBlock), 0, 0, state_d,t,THREADS); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( setup_curand_states), dim3(gridBlock),dim3(threadBlock), 0, 0, state_d,t,THREADS); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( setup_curand_states), dim3(gridBlock),dim3(threadBlock), 0, 0, state_d,t,THREADS); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3fcc7a63f93843da164b52b587871d4ff6f9d895.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "setup_curand_states.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; curandState *state_d = NULL; cudaMalloc(&state_d, XSIZE*YSIZE); unsigned long t = 1; int THREADS = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); setup_curand_states<<<gridBlock,threadBlock>>>(state_d,t,THREADS); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { setup_curand_states<<<gridBlock,threadBlock>>>(state_d,t,THREADS); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { setup_curand_states<<<gridBlock,threadBlock>>>(state_d,t,THREADS); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
49abb91b9977a793eae1e6a14599ee011a81e69b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common_cuda_funcs.cuh" #include "common_hip_defs.cuh" enum class ScaleType { SINGLE_SCALE, PER_WEIGHT_CHANNEL, PER_ACTIVATION_CHANNEL }; ScaleType get_scale_type(const at::Tensor& input, const at::Tensor& input_low, const at::Tensor& input_range) { TORCH_CHECK(input_low.dim() == input_range.dim(), "input_low and input_range have different dimensionality"); uint64_t scale_dim = input_range.dim(); for (int i = 0; i < scale_dim; i++) { TORCH_CHECK(input_low.size(i) == input_range.size(i), "input_low and input_range have different dimension sizes"); } uint64_t scale_count = input_range.numel(); if (scale_dim > 0) { // For (NxCxHxW) input/output tensors, it is assumed that input_range is // either (1) for single-scale quantization, or (Nx1x1x1) for // per-channel scale weights quantization, or (1xCx1x1) for per-channel // activation quantization if (input_range.size(0) > 1) { TORCH_CHECK(input_range.size(0) == input.size(0), "Scale count and weights input channel count is different"); TORCH_CHECK(input_range.size(0) == scale_count, "Scale shape is not flat"); return ScaleType::PER_WEIGHT_CHANNEL; } else if (scale_dim >= 2 and input_range.size(1) > 1) { TORCH_CHECK(input_range.size(1) == input.size(1), "Scale count and activations channel count is different"); TORCH_CHECK(input_range.size(1) == scale_count, "Scale shape is not flat"); return ScaleType::PER_ACTIVATION_CHANNEL; } // For (1x1x1x1) input/output tensors, it is assumed that input_range // should be PER_WEIGHT_CHANNEL if (scale_count == 1) return ScaleType::PER_WEIGHT_CHANNEL; } return ScaleType::SINGLE_SCALE; } namespace { template <typename scalar_t> __device__ void fakeQuantize( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels ) { scalar_t s = (levels - 1) / (*input_range); (*output) = round((min(max((*input), (*input_low)), (*input_low) + (*input_range)) - (*input_low)) * s) / s + (*input_low); } template <typename scalar_t> __global__ void q_cuda_forward_kernel( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const uint64_t size, const uint64_t contiguous_elements_per_scale, const uint64_t scale_count) { const uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { // "Scales" are derived from input_low/input_range uint64_t scale_idx = static_cast<uint64_t>(idx / contiguous_elements_per_scale) % scale_count; fakeQuantize<scalar_t>((output + idx), (input + idx), input_low + scale_idx, input_range + scale_idx, levels); } } template <typename scalar_t> __device__ void calcGrad( scalar_t* __restrict__ val_grad_input, scalar_t* __restrict__ val_grad_input_low, scalar_t* __restrict__ val_grad_input_range, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ output, const scalar_t range_low, const scalar_t range_high, const scalar_t reverted_range, const scalar_t val_low_grad) { *val_grad_input_range = 0; *val_grad_input_low = 0; *val_grad_input = 0; if ((*input) < range_low) { (*val_grad_input_range) = val_low_grad * (*grad_output); (*val_grad_input_low) = (*grad_output); } else if ((*input) > range_high) { (*val_grad_input_range) = (*grad_output); (*val_grad_input_low) = (*grad_output); } else { (*val_grad_input_range) = (*grad_output) * (((*output) - (*input)) * reverted_range); (*val_grad_input) = (*grad_output); } } template <typename scalar_t> __global__ void q_single_scale_cuda_backward_kernel( scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_input_low, scalar_t* __restrict__ grad_input_range, scalar_t* __restrict__ dev_tmp_range, scalar_t* __restrict__ dev_tmp_low, int32_t* __restrict__ dev_last_block_counter_range, int32_t* __restrict__ dev_last_block_counter_low, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const scalar_t level_low, const scalar_t level_high, const size_t size) { const uint16_t tidx = threadIdx.x; const uint32_t bidx = blockIdx.x; const uint64_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x; scalar_t sum_range = 0, sum_low = 0; scalar_t output, val_grad_input_range, val_grad_input_low; scalar_t alpha = level_low / level_high; scalar_t range_low = (*input_low); scalar_t range_high = (*input_low) + (*input_range); scalar_t reverted_range = 1 / (*input_range); for (size_t i = gtidx; i < size; i += grid_size) { fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels); calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i), (input + i), &output, range_low, range_high, reverted_range, alpha); sum_range += val_grad_input_range; sum_low += val_grad_input_low; } __shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK]; __shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_grad_range, sum_range, tidx, bidx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, gridDim.x); reduce_with_shared_memory<scalar_t>(sh_grad_low, sum_low, tidx, bidx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, gridDim.x); } template <typename scalar_t> __global__ void q_scale_per_weight_channel_cuda_backward_kernel( scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_input_low, scalar_t* __restrict__ grad_input_range, scalar_t* __restrict__ dev_tmp_range, scalar_t* __restrict__ dev_tmp_low, int32_t* __restrict__ dev_last_block_counter_range, int32_t* __restrict__ dev_last_block_counter_low, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const scalar_t level_low, const scalar_t level_high, const size_t elements_per_scale) { const uint16_t tidx = threadIdx.x; const uint32_t scale_idx = blockIdx.x; const uint32_t per_scale_block_idx = blockIdx.y; const uint64_t per_scale_tidx = per_scale_block_idx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint32_t total_blocks_per_scale = gridDim.y; const uint64_t total_threads_per_scale = total_blocks_per_scale * CUDA_MAX_NUM_THREADS_PER_BLOCK; // Applying scale data offsets input_low += scale_idx; input_range += scale_idx; dev_tmp_low += scale_idx * total_blocks_per_scale; dev_tmp_range += scale_idx * total_blocks_per_scale; dev_last_block_counter_low += scale_idx; dev_last_block_counter_range += scale_idx; grad_input_low += scale_idx; grad_input_range += scale_idx; const size_t offset_for_scaled_quantized_elements = scale_idx * elements_per_scale; input += offset_for_scaled_quantized_elements; grad_input += offset_for_scaled_quantized_elements; grad_output += offset_for_scaled_quantized_elements; scalar_t per_thread_grad_sum_range = 0, per_thread_grad_sum_low = 0; scalar_t output, val_grad_input_range, val_grad_input_low; scalar_t alpha = level_low / level_high; scalar_t range_low = (*input_low); scalar_t range_high = (*input_low) + (*input_range); scalar_t reverted_range = 1 / (*input_range); for (size_t i = per_scale_tidx; i < elements_per_scale; i += total_threads_per_scale) { fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels); calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i), (input + i), &output, range_low, range_high, reverted_range, alpha); per_thread_grad_sum_range += val_grad_input_range; per_thread_grad_sum_low += val_grad_input_low; } __shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK]; __shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_grad_range, per_thread_grad_sum_range, tidx, per_scale_block_idx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, total_blocks_per_scale); reduce_with_shared_memory<scalar_t>(sh_grad_low, per_thread_grad_sum_low, tidx, per_scale_block_idx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, total_blocks_per_scale); } template <typename scalar_t> __global__ void q_scale_per_activation_channel_cuda_backward_kernel( scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_input_low, scalar_t* __restrict__ grad_input_range, scalar_t* __restrict__ dev_tmp_range, scalar_t* __restrict__ dev_tmp_low, int32_t* __restrict__ dev_last_block_counter_range, int32_t* __restrict__ dev_last_block_counter_low, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const scalar_t level_low, const scalar_t level_high, const int64_t total_elements_per_scale, const int64_t contiguous_elements_per_scale, const int64_t scale_count, const int64_t leading_channel_offset) { const uint16_t tidx = threadIdx.x; const uint32_t scale_idx = blockIdx.x; const uint32_t per_scale_block_idx = blockIdx.y; const uint64_t per_scale_tidx = per_scale_block_idx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint32_t total_blocks_per_scale = gridDim.y; const uint64_t total_threads_per_scale = total_blocks_per_scale * CUDA_MAX_NUM_THREADS_PER_BLOCK; // Applying scale data offsets input_low += scale_idx; input_range += scale_idx; dev_tmp_low += scale_idx * total_blocks_per_scale; dev_tmp_range += scale_idx * total_blocks_per_scale; dev_last_block_counter_low += scale_idx; dev_last_block_counter_range += scale_idx; grad_input_low += scale_idx; grad_input_range += scale_idx; scalar_t per_thread_grad_sum_range = 0, per_thread_grad_sum_low = 0; scalar_t output, val_grad_input_range, val_grad_input_low; scalar_t alpha = level_low / level_high; scalar_t range_low = (*input_low); scalar_t range_high = (*input_low) + (*input_range); scalar_t reverted_range = 1 / (*input_range); // The blocks of values belonging to one and the same scale here are interleaved with a period // equal to contiguous_elements_per_scale. Will apply an offset to the beginning of the first // block of values belonging to the current scale of the thread block, and then, in the for loop, map // a contiguously changing loop iteration index into a value-block-skipping offset calculation pattern. const size_t initial_offset = scale_idx * contiguous_elements_per_scale; input += initial_offset; grad_input += initial_offset; grad_output += initial_offset; for (uint64_t i = per_scale_tidx; i < total_elements_per_scale; i += total_threads_per_scale) { size_t additional_offset = (i / contiguous_elements_per_scale) * leading_channel_offset + (i % contiguous_elements_per_scale); fakeQuantize<scalar_t>(&output, (input + additional_offset), input_low, input_range, levels); calcGrad<scalar_t>((grad_input + additional_offset), &val_grad_input_low, &val_grad_input_range, (grad_output + additional_offset), (input + additional_offset), &output, range_low, range_high, reverted_range, alpha); per_thread_grad_sum_range += val_grad_input_range; per_thread_grad_sum_low += val_grad_input_low; } __shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK]; __shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_grad_range, per_thread_grad_sum_range, tidx, per_scale_block_idx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, total_blocks_per_scale); reduce_with_shared_memory<scalar_t>(sh_grad_low, per_thread_grad_sum_low, tidx, per_scale_block_idx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, total_blocks_per_scale); } } at::Tensor q_cuda_forward( at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels) { at::DeviceGuard guard(input.device()); const auto quantized_elements_count = input.numel(); ScaleType scale_type = get_scale_type(input, input_low, input_range); uint64_t contiguous_elements_per_scale = 0; uint64_t scale_count = input_range.numel(); switch (scale_type) { case ScaleType::PER_ACTIVATION_CHANNEL: // Scale count should be equal to 1-st input tensor dimension contiguous_elements_per_scale = quantized_elements_count / (input.size(0) * scale_count); break; case ScaleType::PER_WEIGHT_CHANNEL: // Scale count should be equal to 0-th input tensor dimension contiguous_elements_per_scale = quantized_elements_count / scale_count; break; default: contiguous_elements_per_scale = quantized_elements_count; break; } auto output = at::empty_like(input); PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_cuda_forward", ([&] { hipLaunchKernelGGL(( q_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(quantized_elements_count)), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), input_low.data_ptr<scalar_t>(), input_range.data_ptr<scalar_t>(), levels, quantized_elements_count, contiguous_elements_per_scale, scale_count); }));) return output; } std::vector<at::Tensor> q_single_scale_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { at::DeviceGuard guard(input.device()); const auto size = input.numel(); auto grad_input = at::empty_like(grad_output); auto grad_input_range = at::empty({1}, grad_output.options()); auto grad_input_low = at::empty({1}, grad_output.options()); auto grid_size = ::min(GET_BLOCKS(size), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE); auto dev_tmp_range = at::empty({grid_size}, grad_output.options()); auto dev_tmp_low = at::empty({grid_size}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_single_scale_cuda_backward", ([&] { hipLaunchKernelGGL(( q_single_scale_cuda_backward_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_input_low.data_ptr<scalar_t>(), grad_input_range.data_ptr<scalar_t>(), dev_tmp_range.data_ptr<scalar_t>(), dev_tmp_low.data_ptr<scalar_t>(), dev_last_block_counter_range.data_ptr<int32_t>(), dev_last_block_counter_low.data_ptr<int32_t>(), grad_output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), input_low.data_ptr<scalar_t>(), input_range.data_ptr<scalar_t>(), levels, level_low, level_high, size); }));) return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_scale_per_weight_channel_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { at::DeviceGuard guard(input.device()); const auto scale_count = input_range.size(0); const auto elements_per_scale = input.numel() / scale_count; auto grad_input = at::empty_like(grad_output); auto grad_input_low = at::empty(input_range.sizes(), grad_output.options()); auto grad_input_range = at::empty(input_range.sizes(), grad_output.options()); dim3 grid_size = get_2d_grid_size_for_per_channel(scale_count); auto dev_tmp_range = at::zeros({grid_size.x, grid_size.y}, grad_output.options()); auto dev_tmp_low = at::zeros({grid_size.x, grid_size.y}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt)); PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_single_scale_cuda_backward", ([&] { hipLaunchKernelGGL(( q_scale_per_weight_channel_cuda_backward_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_input_low.data_ptr<scalar_t>(), grad_input_range.data_ptr<scalar_t>(), dev_tmp_range.data_ptr<scalar_t>(), dev_tmp_low.data_ptr<scalar_t>(), dev_last_block_counter_range.data_ptr<int32_t>(), dev_last_block_counter_low.data_ptr<int32_t>(), grad_output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), input_low.data_ptr<scalar_t>(), input_range.data_ptr<scalar_t>(), levels, level_low, level_high, elements_per_scale); })); ) return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_scale_per_activation_channel_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { at::DeviceGuard guard(input.device()); const auto scale_count = input_range.size(1); const auto total_elements_per_scale = input.numel() / scale_count; const auto contiguous_elements_per_scale = input.numel() / (scale_count * input.size(0)); const auto leading_channel_offset = input.numel() / input.size(0); auto grad_input = at::empty_like(grad_output); auto grad_input_low = at::empty(input_range.sizes(), grad_output.options()); auto grad_input_range = at::empty(input_range.sizes(), grad_output.options()); dim3 grid_size = get_2d_grid_size_for_per_channel(scale_count); auto dev_tmp_range = at::zeros({grid_size.x, grid_size.y}, grad_output.options()); auto dev_tmp_low = at::zeros({grid_size.x, grid_size.y}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt)); PROFILE( AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_scale_per_activation_channel_cuda_backward", ([&] { hipLaunchKernelGGL(( q_scale_per_activation_channel_cuda_backward_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_input_low.data_ptr<scalar_t>(), grad_input_range.data_ptr<scalar_t>(), dev_tmp_range.data_ptr<scalar_t>(), dev_tmp_low.data_ptr<scalar_t>(), dev_last_block_counter_range.data_ptr<int32_t>(), dev_last_block_counter_low.data_ptr<int32_t>(), grad_output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), input_low.data_ptr<scalar_t>(), input_range.data_ptr<scalar_t>(), levels, level_low, level_high, total_elements_per_scale, contiguous_elements_per_scale, scale_count, leading_channel_offset); })); ) return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_cuda_backward( at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { at::DeviceGuard guard(input.device()); ScaleType scale_type = get_scale_type(input, input_low, input_range); switch (scale_type) { case ScaleType::PER_ACTIVATION_CHANNEL: return q_scale_per_activation_channel_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); case ScaleType::PER_WEIGHT_CHANNEL: return q_scale_per_weight_channel_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); case ScaleType::SINGLE_SCALE: default: return q_single_scale_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); }; }
49abb91b9977a793eae1e6a14599ee011a81e69b.cu
#include "common_cuda_funcs.cuh" #include "common_cuda_defs.cuh" enum class ScaleType { SINGLE_SCALE, PER_WEIGHT_CHANNEL, PER_ACTIVATION_CHANNEL }; ScaleType get_scale_type(const at::Tensor& input, const at::Tensor& input_low, const at::Tensor& input_range) { TORCH_CHECK(input_low.dim() == input_range.dim(), "input_low and input_range have different dimensionality"); uint64_t scale_dim = input_range.dim(); for (int i = 0; i < scale_dim; i++) { TORCH_CHECK(input_low.size(i) == input_range.size(i), "input_low and input_range have different dimension sizes"); } uint64_t scale_count = input_range.numel(); if (scale_dim > 0) { // For (NxCxHxW) input/output tensors, it is assumed that input_range is // either (1) for single-scale quantization, or (Nx1x1x1) for // per-channel scale weights quantization, or (1xCx1x1) for per-channel // activation quantization if (input_range.size(0) > 1) { TORCH_CHECK(input_range.size(0) == input.size(0), "Scale count and weights input channel count is different"); TORCH_CHECK(input_range.size(0) == scale_count, "Scale shape is not flat"); return ScaleType::PER_WEIGHT_CHANNEL; } else if (scale_dim >= 2 and input_range.size(1) > 1) { TORCH_CHECK(input_range.size(1) == input.size(1), "Scale count and activations channel count is different"); TORCH_CHECK(input_range.size(1) == scale_count, "Scale shape is not flat"); return ScaleType::PER_ACTIVATION_CHANNEL; } // For (1x1x1x1) input/output tensors, it is assumed that input_range // should be PER_WEIGHT_CHANNEL if (scale_count == 1) return ScaleType::PER_WEIGHT_CHANNEL; } return ScaleType::SINGLE_SCALE; } namespace { template <typename scalar_t> __device__ void fakeQuantize( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels ) { scalar_t s = (levels - 1) / (*input_range); (*output) = round((min(max((*input), (*input_low)), (*input_low) + (*input_range)) - (*input_low)) * s) / s + (*input_low); } template <typename scalar_t> __global__ void q_cuda_forward_kernel( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const uint64_t size, const uint64_t contiguous_elements_per_scale, const uint64_t scale_count) { const uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { // "Scales" are derived from input_low/input_range uint64_t scale_idx = static_cast<uint64_t>(idx / contiguous_elements_per_scale) % scale_count; fakeQuantize<scalar_t>((output + idx), (input + idx), input_low + scale_idx, input_range + scale_idx, levels); } } template <typename scalar_t> __device__ void calcGrad( scalar_t* __restrict__ val_grad_input, scalar_t* __restrict__ val_grad_input_low, scalar_t* __restrict__ val_grad_input_range, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ output, const scalar_t range_low, const scalar_t range_high, const scalar_t reverted_range, const scalar_t val_low_grad) { *val_grad_input_range = 0; *val_grad_input_low = 0; *val_grad_input = 0; if ((*input) < range_low) { (*val_grad_input_range) = val_low_grad * (*grad_output); (*val_grad_input_low) = (*grad_output); } else if ((*input) > range_high) { (*val_grad_input_range) = (*grad_output); (*val_grad_input_low) = (*grad_output); } else { (*val_grad_input_range) = (*grad_output) * (((*output) - (*input)) * reverted_range); (*val_grad_input) = (*grad_output); } } template <typename scalar_t> __global__ void q_single_scale_cuda_backward_kernel( scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_input_low, scalar_t* __restrict__ grad_input_range, scalar_t* __restrict__ dev_tmp_range, scalar_t* __restrict__ dev_tmp_low, int32_t* __restrict__ dev_last_block_counter_range, int32_t* __restrict__ dev_last_block_counter_low, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const scalar_t level_low, const scalar_t level_high, const size_t size) { const uint16_t tidx = threadIdx.x; const uint32_t bidx = blockIdx.x; const uint64_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x; scalar_t sum_range = 0, sum_low = 0; scalar_t output, val_grad_input_range, val_grad_input_low; scalar_t alpha = level_low / level_high; scalar_t range_low = (*input_low); scalar_t range_high = (*input_low) + (*input_range); scalar_t reverted_range = 1 / (*input_range); for (size_t i = gtidx; i < size; i += grid_size) { fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels); calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i), (input + i), &output, range_low, range_high, reverted_range, alpha); sum_range += val_grad_input_range; sum_low += val_grad_input_low; } __shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK]; __shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_grad_range, sum_range, tidx, bidx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, gridDim.x); reduce_with_shared_memory<scalar_t>(sh_grad_low, sum_low, tidx, bidx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, gridDim.x); } template <typename scalar_t> __global__ void q_scale_per_weight_channel_cuda_backward_kernel( scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_input_low, scalar_t* __restrict__ grad_input_range, scalar_t* __restrict__ dev_tmp_range, scalar_t* __restrict__ dev_tmp_low, int32_t* __restrict__ dev_last_block_counter_range, int32_t* __restrict__ dev_last_block_counter_low, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const scalar_t level_low, const scalar_t level_high, const size_t elements_per_scale) { const uint16_t tidx = threadIdx.x; const uint32_t scale_idx = blockIdx.x; const uint32_t per_scale_block_idx = blockIdx.y; const uint64_t per_scale_tidx = per_scale_block_idx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint32_t total_blocks_per_scale = gridDim.y; const uint64_t total_threads_per_scale = total_blocks_per_scale * CUDA_MAX_NUM_THREADS_PER_BLOCK; // Applying scale data offsets input_low += scale_idx; input_range += scale_idx; dev_tmp_low += scale_idx * total_blocks_per_scale; dev_tmp_range += scale_idx * total_blocks_per_scale; dev_last_block_counter_low += scale_idx; dev_last_block_counter_range += scale_idx; grad_input_low += scale_idx; grad_input_range += scale_idx; const size_t offset_for_scaled_quantized_elements = scale_idx * elements_per_scale; input += offset_for_scaled_quantized_elements; grad_input += offset_for_scaled_quantized_elements; grad_output += offset_for_scaled_quantized_elements; scalar_t per_thread_grad_sum_range = 0, per_thread_grad_sum_low = 0; scalar_t output, val_grad_input_range, val_grad_input_low; scalar_t alpha = level_low / level_high; scalar_t range_low = (*input_low); scalar_t range_high = (*input_low) + (*input_range); scalar_t reverted_range = 1 / (*input_range); for (size_t i = per_scale_tidx; i < elements_per_scale; i += total_threads_per_scale) { fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels); calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i), (input + i), &output, range_low, range_high, reverted_range, alpha); per_thread_grad_sum_range += val_grad_input_range; per_thread_grad_sum_low += val_grad_input_low; } __shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK]; __shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_grad_range, per_thread_grad_sum_range, tidx, per_scale_block_idx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, total_blocks_per_scale); reduce_with_shared_memory<scalar_t>(sh_grad_low, per_thread_grad_sum_low, tidx, per_scale_block_idx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, total_blocks_per_scale); } template <typename scalar_t> __global__ void q_scale_per_activation_channel_cuda_backward_kernel( scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_input_low, scalar_t* __restrict__ grad_input_range, scalar_t* __restrict__ dev_tmp_range, scalar_t* __restrict__ dev_tmp_low, int32_t* __restrict__ dev_last_block_counter_range, int32_t* __restrict__ dev_last_block_counter_low, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const scalar_t level_low, const scalar_t level_high, const int64_t total_elements_per_scale, const int64_t contiguous_elements_per_scale, const int64_t scale_count, const int64_t leading_channel_offset) { const uint16_t tidx = threadIdx.x; const uint32_t scale_idx = blockIdx.x; const uint32_t per_scale_block_idx = blockIdx.y; const uint64_t per_scale_tidx = per_scale_block_idx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint32_t total_blocks_per_scale = gridDim.y; const uint64_t total_threads_per_scale = total_blocks_per_scale * CUDA_MAX_NUM_THREADS_PER_BLOCK; // Applying scale data offsets input_low += scale_idx; input_range += scale_idx; dev_tmp_low += scale_idx * total_blocks_per_scale; dev_tmp_range += scale_idx * total_blocks_per_scale; dev_last_block_counter_low += scale_idx; dev_last_block_counter_range += scale_idx; grad_input_low += scale_idx; grad_input_range += scale_idx; scalar_t per_thread_grad_sum_range = 0, per_thread_grad_sum_low = 0; scalar_t output, val_grad_input_range, val_grad_input_low; scalar_t alpha = level_low / level_high; scalar_t range_low = (*input_low); scalar_t range_high = (*input_low) + (*input_range); scalar_t reverted_range = 1 / (*input_range); // The blocks of values belonging to one and the same scale here are interleaved with a period // equal to contiguous_elements_per_scale. Will apply an offset to the beginning of the first // block of values belonging to the current scale of the thread block, and then, in the for loop, map // a contiguously changing loop iteration index into a value-block-skipping offset calculation pattern. const size_t initial_offset = scale_idx * contiguous_elements_per_scale; input += initial_offset; grad_input += initial_offset; grad_output += initial_offset; for (uint64_t i = per_scale_tidx; i < total_elements_per_scale; i += total_threads_per_scale) { size_t additional_offset = (i / contiguous_elements_per_scale) * leading_channel_offset + (i % contiguous_elements_per_scale); fakeQuantize<scalar_t>(&output, (input + additional_offset), input_low, input_range, levels); calcGrad<scalar_t>((grad_input + additional_offset), &val_grad_input_low, &val_grad_input_range, (grad_output + additional_offset), (input + additional_offset), &output, range_low, range_high, reverted_range, alpha); per_thread_grad_sum_range += val_grad_input_range; per_thread_grad_sum_low += val_grad_input_low; } __shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK]; __shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_grad_range, per_thread_grad_sum_range, tidx, per_scale_block_idx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, total_blocks_per_scale); reduce_with_shared_memory<scalar_t>(sh_grad_low, per_thread_grad_sum_low, tidx, per_scale_block_idx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, total_blocks_per_scale); } } at::Tensor q_cuda_forward( at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels) { at::DeviceGuard guard(input.device()); const auto quantized_elements_count = input.numel(); ScaleType scale_type = get_scale_type(input, input_low, input_range); uint64_t contiguous_elements_per_scale = 0; uint64_t scale_count = input_range.numel(); switch (scale_type) { case ScaleType::PER_ACTIVATION_CHANNEL: // Scale count should be equal to 1-st input tensor dimension contiguous_elements_per_scale = quantized_elements_count / (input.size(0) * scale_count); break; case ScaleType::PER_WEIGHT_CHANNEL: // Scale count should be equal to 0-th input tensor dimension contiguous_elements_per_scale = quantized_elements_count / scale_count; break; default: contiguous_elements_per_scale = quantized_elements_count; break; } auto output = at::empty_like(input); PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_cuda_forward", ([&] { q_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(quantized_elements_count), CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), input_low.data_ptr<scalar_t>(), input_range.data_ptr<scalar_t>(), levels, quantized_elements_count, contiguous_elements_per_scale, scale_count); }));) return output; } std::vector<at::Tensor> q_single_scale_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { at::DeviceGuard guard(input.device()); const auto size = input.numel(); auto grad_input = at::empty_like(grad_output); auto grad_input_range = at::empty({1}, grad_output.options()); auto grad_input_low = at::empty({1}, grad_output.options()); auto grid_size = std::min(GET_BLOCKS(size), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE); auto dev_tmp_range = at::empty({grid_size}, grad_output.options()); auto dev_tmp_low = at::empty({grid_size}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_single_scale_cuda_backward", ([&] { q_single_scale_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_input_low.data_ptr<scalar_t>(), grad_input_range.data_ptr<scalar_t>(), dev_tmp_range.data_ptr<scalar_t>(), dev_tmp_low.data_ptr<scalar_t>(), dev_last_block_counter_range.data_ptr<int32_t>(), dev_last_block_counter_low.data_ptr<int32_t>(), grad_output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), input_low.data_ptr<scalar_t>(), input_range.data_ptr<scalar_t>(), levels, level_low, level_high, size); }));) return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_scale_per_weight_channel_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { at::DeviceGuard guard(input.device()); const auto scale_count = input_range.size(0); const auto elements_per_scale = input.numel() / scale_count; auto grad_input = at::empty_like(grad_output); auto grad_input_low = at::empty(input_range.sizes(), grad_output.options()); auto grad_input_range = at::empty(input_range.sizes(), grad_output.options()); dim3 grid_size = get_2d_grid_size_for_per_channel(scale_count); auto dev_tmp_range = at::zeros({grid_size.x, grid_size.y}, grad_output.options()); auto dev_tmp_low = at::zeros({grid_size.x, grid_size.y}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt)); PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_single_scale_cuda_backward", ([&] { q_scale_per_weight_channel_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_input_low.data_ptr<scalar_t>(), grad_input_range.data_ptr<scalar_t>(), dev_tmp_range.data_ptr<scalar_t>(), dev_tmp_low.data_ptr<scalar_t>(), dev_last_block_counter_range.data_ptr<int32_t>(), dev_last_block_counter_low.data_ptr<int32_t>(), grad_output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), input_low.data_ptr<scalar_t>(), input_range.data_ptr<scalar_t>(), levels, level_low, level_high, elements_per_scale); })); ) return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_scale_per_activation_channel_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { at::DeviceGuard guard(input.device()); const auto scale_count = input_range.size(1); const auto total_elements_per_scale = input.numel() / scale_count; const auto contiguous_elements_per_scale = input.numel() / (scale_count * input.size(0)); const auto leading_channel_offset = input.numel() / input.size(0); auto grad_input = at::empty_like(grad_output); auto grad_input_low = at::empty(input_range.sizes(), grad_output.options()); auto grad_input_range = at::empty(input_range.sizes(), grad_output.options()); dim3 grid_size = get_2d_grid_size_for_per_channel(scale_count); auto dev_tmp_range = at::zeros({grid_size.x, grid_size.y}, grad_output.options()); auto dev_tmp_low = at::zeros({grid_size.x, grid_size.y}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt)); PROFILE( AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_scale_per_activation_channel_cuda_backward", ([&] { q_scale_per_activation_channel_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_input_low.data_ptr<scalar_t>(), grad_input_range.data_ptr<scalar_t>(), dev_tmp_range.data_ptr<scalar_t>(), dev_tmp_low.data_ptr<scalar_t>(), dev_last_block_counter_range.data_ptr<int32_t>(), dev_last_block_counter_low.data_ptr<int32_t>(), grad_output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), input_low.data_ptr<scalar_t>(), input_range.data_ptr<scalar_t>(), levels, level_low, level_high, total_elements_per_scale, contiguous_elements_per_scale, scale_count, leading_channel_offset); })); ) return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_cuda_backward( at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { at::DeviceGuard guard(input.device()); ScaleType scale_type = get_scale_type(input, input_low, input_range); switch (scale_type) { case ScaleType::PER_ACTIVATION_CHANNEL: return q_scale_per_activation_channel_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); case ScaleType::PER_WEIGHT_CHANNEL: return q_scale_per_weight_channel_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); case ScaleType::SINGLE_SCALE: default: return q_single_scale_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); }; }
861bb860e6e06cfdbd2ef60742000f77bd3ebbb5.hip
// !!! This is a file automatically generated by hipify!!! /** * @file SigmoidLayer2_device.cu * @date 2017-02-07 * @author moonhoen lee * @brief * @details */ #include "hip/hip_runtime.h" #include "Sigmoid2Layer.h" #include "Network.h" #include "SysLog.h" #include "StdOutLog.h" #include "ColdLog.h" #include "Perf.h" #include "PropMgmt.h" #include "MemoryMgmt.h" #define SIGMOID2LAYER_LOG 1 using namespace std; /////////////////////////////////////////////////////////////////////////////////////////// // GPU Kernels template <typename Dtype> __global__ void Forward(const Dtype *input, int size, Dtype *output) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; output[idx] = 1.0 / (1.0 + exp((-1.0) * input[idx])); } template <typename Dtype> __global__ void Backward(const Dtype *outputGrad, const Dtype *output, int size, Dtype *inputGrad) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; inputGrad[idx] = outputGrad[idx] * output[idx] * (1.0 - output[idx]); } template <typename Dtype> Sigmoid2Layer<Dtype>::Sigmoid2Layer() : Layer<Dtype>() { this->type = Layer<Dtype>::Sigmoid2; } template <typename Dtype> void Sigmoid2Layer<Dtype>::feedforward() { const Dtype* inputData = this->_inputData[0]->device_data(); Dtype* outputData = this->_outputData[0]->mutable_device_data(); int size = this->_inputData[0]->getCountByAxis(0); hipLaunchKernelGGL(( Forward), dim3(SOOOA_GET_BLOCKS(size)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0, inputData, size, outputData); } template <typename Dtype> void Sigmoid2Layer<Dtype>::backpropagation() { const Dtype* outputGrads = this->_outputData[0]->device_grad(); const Dtype* output = this->_outputData[0]->device_data(); Dtype* inputGrads = this->_inputData[0]->mutable_device_grad(); int size = this->_inputData[0]->getCountByAxis(0); hipLaunchKernelGGL(( Backward), dim3(SOOOA_GET_BLOCKS(size)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0, outputGrads, output, size, inputGrads); } template <typename Dtype> void Sigmoid2Layer<Dtype>::reshape() { if (!Layer<Dtype>::_adjustInputShape()) { const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1); const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1); assert(count == inputDataCount); } if (!Layer<Dtype>::_isInputShapeChanged(0)) return; const vector<uint32_t>& inputShape = this->_inputData[0]->getShape(); // XXX: FC // TODO: Conv Layer uint32_t batches = inputShape[0]; uint32_t channels = inputShape[1]; uint32_t rows = inputShape[2]; uint32_t cols = inputShape[3]; this->_inputShape[0] = {batches, channels, rows, cols}; this->_outputData[0]->reshape({batches, channels, rows, cols}); STDOUT_COND_LOG(SIGMOID2LAYER_LOG, "<%s> layer' input-0 has reshaped as: %dx%dx%dx%d\n", SLPROP_BASE(name).c_str(), batches, channels, rows, cols); STDOUT_COND_LOG(SIGMOID2LAYER_LOG, "<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n", SLPROP_BASE(name).c_str(), batches, channels, rows, cols); } /**************************************************************************** * layer callback functions ****************************************************************************/ template<typename Dtype> void* Sigmoid2Layer<Dtype>::initLayer() { Sigmoid2Layer* layer = NULL; SNEW(layer, Sigmoid2Layer<Dtype>); SASSUME0(layer != NULL); return (void*)layer; } template<typename Dtype> void Sigmoid2Layer<Dtype>::destroyLayer(void* instancePtr) { Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr; SDELETE(layer); } template<typename Dtype> void Sigmoid2Layer<Dtype>::setInOutTensor(void* instancePtr, void* tensorPtr, bool isInput, int index) { SASSERT0(index == 0); Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr; if (isInput) { SASSERT0(layer->_inputData.size() == 0); layer->_inputData.push_back((Data<Dtype>*)tensorPtr); } else { SASSERT0(layer->_outputData.size() == 0); layer->_outputData.push_back((Data<Dtype>*)tensorPtr); } } template<typename Dtype> bool Sigmoid2Layer<Dtype>::allocLayerTensors(void* instancePtr) { Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr; layer->reshape(); return true; } template<typename Dtype> void Sigmoid2Layer<Dtype>::forwardTensor(void* instancePtr, int miniBatchIdx) { Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr; layer->feedforward(); } template<typename Dtype> void Sigmoid2Layer<Dtype>::backwardTensor(void* instancePtr) { Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr; layer->backpropagation(); } template<typename Dtype> void Sigmoid2Layer<Dtype>::learnTensor(void* instancePtr) { SASSERT0(false); } template<typename Dtype> bool Sigmoid2Layer<Dtype>::checkShape(vector<TensorShape> inputShape, vector<TensorShape> &outputShape) { if (inputShape.size() != 1) return false; TensorShape outputShape1 = inputShape[0]; outputShape.push_back(outputShape1); return true; } template<typename Dtype> uint64_t Sigmoid2Layer<Dtype>::calcGPUSize(vector<TensorShape> inputShape) { return 0UL; } template class Sigmoid2Layer<float>;
861bb860e6e06cfdbd2ef60742000f77bd3ebbb5.cu
/** * @file SigmoidLayer2_device.cu * @date 2017-02-07 * @author moonhoen lee * @brief * @details */ #include "cuda_runtime.h" #include "Sigmoid2Layer.h" #include "Network.h" #include "SysLog.h" #include "StdOutLog.h" #include "ColdLog.h" #include "Perf.h" #include "PropMgmt.h" #include "MemoryMgmt.h" #define SIGMOID2LAYER_LOG 1 using namespace std; /////////////////////////////////////////////////////////////////////////////////////////// // GPU Kernels template <typename Dtype> __global__ void Forward(const Dtype *input, int size, Dtype *output) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; output[idx] = 1.0 / (1.0 + exp((-1.0) * input[idx])); } template <typename Dtype> __global__ void Backward(const Dtype *outputGrad, const Dtype *output, int size, Dtype *inputGrad) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; inputGrad[idx] = outputGrad[idx] * output[idx] * (1.0 - output[idx]); } template <typename Dtype> Sigmoid2Layer<Dtype>::Sigmoid2Layer() : Layer<Dtype>() { this->type = Layer<Dtype>::Sigmoid2; } template <typename Dtype> void Sigmoid2Layer<Dtype>::feedforward() { const Dtype* inputData = this->_inputData[0]->device_data(); Dtype* outputData = this->_outputData[0]->mutable_device_data(); int size = this->_inputData[0]->getCountByAxis(0); Forward<<<SOOOA_GET_BLOCKS(size), SOOOA_CUDA_NUM_THREADS>>>( inputData, size, outputData); } template <typename Dtype> void Sigmoid2Layer<Dtype>::backpropagation() { const Dtype* outputGrads = this->_outputData[0]->device_grad(); const Dtype* output = this->_outputData[0]->device_data(); Dtype* inputGrads = this->_inputData[0]->mutable_device_grad(); int size = this->_inputData[0]->getCountByAxis(0); Backward<<<SOOOA_GET_BLOCKS(size), SOOOA_CUDA_NUM_THREADS>>>( outputGrads, output, size, inputGrads); } template <typename Dtype> void Sigmoid2Layer<Dtype>::reshape() { if (!Layer<Dtype>::_adjustInputShape()) { const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1); const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1); assert(count == inputDataCount); } if (!Layer<Dtype>::_isInputShapeChanged(0)) return; const vector<uint32_t>& inputShape = this->_inputData[0]->getShape(); // XXX: 현재 FC에 대해서만 생각하였음 // TODO: Conv Layer에 대한 구현 필요 uint32_t batches = inputShape[0]; uint32_t channels = inputShape[1]; uint32_t rows = inputShape[2]; uint32_t cols = inputShape[3]; this->_inputShape[0] = {batches, channels, rows, cols}; this->_outputData[0]->reshape({batches, channels, rows, cols}); STDOUT_COND_LOG(SIGMOID2LAYER_LOG, "<%s> layer' input-0 has reshaped as: %dx%dx%dx%d\n", SLPROP_BASE(name).c_str(), batches, channels, rows, cols); STDOUT_COND_LOG(SIGMOID2LAYER_LOG, "<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n", SLPROP_BASE(name).c_str(), batches, channels, rows, cols); } /**************************************************************************** * layer callback functions ****************************************************************************/ template<typename Dtype> void* Sigmoid2Layer<Dtype>::initLayer() { Sigmoid2Layer* layer = NULL; SNEW(layer, Sigmoid2Layer<Dtype>); SASSUME0(layer != NULL); return (void*)layer; } template<typename Dtype> void Sigmoid2Layer<Dtype>::destroyLayer(void* instancePtr) { Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr; SDELETE(layer); } template<typename Dtype> void Sigmoid2Layer<Dtype>::setInOutTensor(void* instancePtr, void* tensorPtr, bool isInput, int index) { SASSERT0(index == 0); Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr; if (isInput) { SASSERT0(layer->_inputData.size() == 0); layer->_inputData.push_back((Data<Dtype>*)tensorPtr); } else { SASSERT0(layer->_outputData.size() == 0); layer->_outputData.push_back((Data<Dtype>*)tensorPtr); } } template<typename Dtype> bool Sigmoid2Layer<Dtype>::allocLayerTensors(void* instancePtr) { Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr; layer->reshape(); return true; } template<typename Dtype> void Sigmoid2Layer<Dtype>::forwardTensor(void* instancePtr, int miniBatchIdx) { Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr; layer->feedforward(); } template<typename Dtype> void Sigmoid2Layer<Dtype>::backwardTensor(void* instancePtr) { Sigmoid2Layer<Dtype>* layer = (Sigmoid2Layer<Dtype>*)instancePtr; layer->backpropagation(); } template<typename Dtype> void Sigmoid2Layer<Dtype>::learnTensor(void* instancePtr) { SASSERT0(false); } template<typename Dtype> bool Sigmoid2Layer<Dtype>::checkShape(vector<TensorShape> inputShape, vector<TensorShape> &outputShape) { if (inputShape.size() != 1) return false; TensorShape outputShape1 = inputShape[0]; outputShape.push_back(outputShape1); return true; } template<typename Dtype> uint64_t Sigmoid2Layer<Dtype>::calcGPUSize(vector<TensorShape> inputShape) { return 0UL; } template class Sigmoid2Layer<float>;
d1a9a56fd1f5bdcd8fb939fce67e20729c1b45eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define NUM_THREADS 32 __global__ void manhattan_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; // If all element is to be computed if(x < n_a && y < n_b) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float t = abs(vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset]); temp[threadIdx.x] += t; } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { d[y * pitch_d + x] = temp[0]; } } }
d1a9a56fd1f5bdcd8fb939fce67e20729c1b45eb.cu
#include "includes.h" #define NUM_THREADS 32 __global__ void manhattan_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; // If all element is to be computed if(x < n_a && y < n_b) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float t = abs(vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset]); temp[threadIdx.x] += t; } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { d[y * pitch_d + x] = temp[0]; } } }
34076e0263b3539e43d6919ee02dcc39a180a599.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <data_simulator.hpp> #include <diagnose.hpp> #include <random> #include <utils.cuh> namespace HugeCTR { template <> void UniformGenerator::fill<float>(float* ptr, size_t num_elements, float a, float b, size_t sm_count, const hiprandGenerator_t& generator, const hipStream_t& stream) { if (a >= b) { CK_THROW_(Error_t::WrongInput, "a must be smaller than b"); } CK_CURAND_THROW_(hiprandGenerateUniform(generator, ptr, num_elements)); auto op = [a, b] __device__(float val) { return val * (b - a) + a; }; hipLaunchKernelGGL(( transform_array), dim3(sm_count * 2), dim3(1024), 0, stream, ptr, ptr, num_elements, op); } template <> void UniformGenerator::fill<float>(Tensor2<float>& tensor, float a, float b, size_t sm_count, const hiprandGenerator_t& generator, const hipStream_t& stream) { UniformGenerator::fill<float>(tensor.get_ptr(), tensor.get_num_elements(), a, b, sm_count, generator, stream); } template <> void HostUniformGenerator::fill<float>(Tensor2<float>& tensor, float a, float b, const hiprandGenerator_t& generator) { if (a >= b) { CK_THROW_(Error_t::WrongInput, "a must be smaller than b"); } CK_CURAND_THROW_(hiprandGenerateUniform(generator, tensor.get_ptr(), tensor.get_num_elements() % 2 != 0 ? tensor.get_num_elements() + 1 : tensor.get_num_elements())); float* p = tensor.get_ptr(); for (size_t i = 0; i < tensor.get_num_elements(); i++) { p[i] = p[i] * (b - a) + a; } } template <> void NormalGenerator::fill<float>(Tensor2<float>& tensor, float mean, float stddev, size_t sm_count, const hiprandGenerator_t& generator, const hipStream_t& stream) { CK_CURAND_THROW_( hiprandGenerateNormal(generator, tensor.get_ptr(), tensor.get_num_elements(), mean, stddev)); } template <> void HostNormalGenerator::fill<float>(Tensor2<float>& tensor, float mean, float stddev, const hiprandGenerator_t& gen) { CK_CURAND_THROW_(hiprandGenerateNormal(gen, tensor.get_ptr(), tensor.get_num_elements() % 2 != 0 ? tensor.get_num_elements() + 1 : tensor.get_num_elements(), mean, stddev)); } void ConstantDataSimulator::fill(Tensor2<float>& tensor, const hiprandGenerator_t& gen) { float* p = tensor.get_ptr(); for (size_t i = 0; i < tensor.get_num_elements(); i++) { p[i] = value_; } } void UniformDataSimulator::fill(Tensor2<float>& tensor, const hiprandGenerator_t& gen) { HostUniformGenerator::fill(tensor, min_, max_, gen); } void GaussianDataSimulator::fill(Tensor2<float>& tensor, const hiprandGenerator_t& gen) { HostNormalGenerator::fill(tensor, mu_, sigma_, gen); } } // namespace HugeCTR
34076e0263b3539e43d6919ee02dcc39a180a599.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <data_simulator.hpp> #include <diagnose.hpp> #include <random> #include <utils.cuh> namespace HugeCTR { template <> void UniformGenerator::fill<float>(float* ptr, size_t num_elements, float a, float b, size_t sm_count, const curandGenerator_t& generator, const cudaStream_t& stream) { if (a >= b) { CK_THROW_(Error_t::WrongInput, "a must be smaller than b"); } CK_CURAND_THROW_(curandGenerateUniform(generator, ptr, num_elements)); auto op = [a, b] __device__(float val) { return val * (b - a) + a; }; transform_array<<<sm_count * 2, 1024, 0, stream>>>(ptr, ptr, num_elements, op); } template <> void UniformGenerator::fill<float>(Tensor2<float>& tensor, float a, float b, size_t sm_count, const curandGenerator_t& generator, const cudaStream_t& stream) { UniformGenerator::fill<float>(tensor.get_ptr(), tensor.get_num_elements(), a, b, sm_count, generator, stream); } template <> void HostUniformGenerator::fill<float>(Tensor2<float>& tensor, float a, float b, const curandGenerator_t& generator) { if (a >= b) { CK_THROW_(Error_t::WrongInput, "a must be smaller than b"); } CK_CURAND_THROW_(curandGenerateUniform(generator, tensor.get_ptr(), tensor.get_num_elements() % 2 != 0 ? tensor.get_num_elements() + 1 : tensor.get_num_elements())); float* p = tensor.get_ptr(); for (size_t i = 0; i < tensor.get_num_elements(); i++) { p[i] = p[i] * (b - a) + a; } } template <> void NormalGenerator::fill<float>(Tensor2<float>& tensor, float mean, float stddev, size_t sm_count, const curandGenerator_t& generator, const cudaStream_t& stream) { CK_CURAND_THROW_( curandGenerateNormal(generator, tensor.get_ptr(), tensor.get_num_elements(), mean, stddev)); } template <> void HostNormalGenerator::fill<float>(Tensor2<float>& tensor, float mean, float stddev, const curandGenerator_t& gen) { CK_CURAND_THROW_(curandGenerateNormal(gen, tensor.get_ptr(), tensor.get_num_elements() % 2 != 0 ? tensor.get_num_elements() + 1 : tensor.get_num_elements(), mean, stddev)); } void ConstantDataSimulator::fill(Tensor2<float>& tensor, const curandGenerator_t& gen) { float* p = tensor.get_ptr(); for (size_t i = 0; i < tensor.get_num_elements(); i++) { p[i] = value_; } } void UniformDataSimulator::fill(Tensor2<float>& tensor, const curandGenerator_t& gen) { HostUniformGenerator::fill(tensor, min_, max_, gen); } void GaussianDataSimulator::fill(Tensor2<float>& tensor, const curandGenerator_t& gen) { HostNormalGenerator::fill(tensor, mu_, sigma_, gen); } } // namespace HugeCTR
3242474f446e66f17f139368d873f2a2f240183f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; double* elements; } Matrix; typedef struct gv_t { int N; //N*N matrix int loop; int block_size; // Thread block size }* GV; // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE double MatMul(GV gv, const Matrix A, const Matrix B, Matrix C) { float milliseconds = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(double); hipMalloc(&d_A.elements, size); hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(double); hipMalloc(&d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(double); hipMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(gv->block_size, gv->block_size); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); hipEventRecord(start); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); hipEventRecord(stop); // Read C from device memory hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); hipEventSynchronize(stop); // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); hipEventElapsedTime(&milliseconds, start, stop); // printf("Execution time=%f ms, Gflops=%f\n", milliseconds, 2.0*pow(gv->N,3)/milliseconds/1e6); // fflush(stdout); return milliseconds; } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue double Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } void init(double* A, int n){ for(int i=0;i<n*n;i++) A[i]=(rand()%10000+1)/1000.0; } /*init every element to 0*/ void init_0(double* C, int n){ for(int i=0;i<n*n;i++) C[i]=0; } /*init every element to 1*/ void init_1(double* C, int n){ for(int i=0;i<n*n;i++) C[i]=1; } /*verify the calculation whether equal to verify matrix*/ void verification(GV gv, double *C, int n){ int i; for(i=0;i<n*n;i++){ if(C[i] != gv->N){ printf("i=%d, C[i]=%f Get Wrong result!\n",i,C[i]); fflush(stdout); } } } void check_malloc(void * pointer){ if (pointer == NULL) { perror("Malloc error!\n"); fprintf (stderr, "at %s, line %d.\n", __FILE__, __LINE__); exit(1); } } int main(int argc, char** argv) { Matrix A, B, C; double elapsed_time; time_t t; int i; GV gv; // double *B; // double a_p[] = {1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16}; // double b_p[] = {16,12,8,4,15,11,7,3,14,10,6,2,13,9,5,1}; if(argc != 4) { fprintf(stderr, "Usage: %s real_matrix_size, thread block_size\n", argv[0]); exit(1); } gv = (GV) malloc(sizeof(*gv)); gv->N = atoi(argv[1]); gv->block_size = atoi(argv[2]); gv->loop = atoi(argv[3]); printf("N=%d,block_size=%d,loop=%d\n", gv->N,gv->block_size,gv->loop); /* Intializes random number generator */ srand((unsigned) time(&t)); /*Initialise matrix A, B and verify matrix*/ //printf("Initialise A B matrix\n"); // printf("-----------------------------\n"); // fflush(stdout); A.width = gv->N; A.height = gv->N; A.elements = (double *)malloc(sizeof(double)*(gv->N*gv->N)); check_malloc(A.elements); B.width = gv->N; B.height = gv->N; B.elements = (double *)malloc(sizeof(double)*(gv->N*gv->N)); check_malloc(B.elements); C.width = gv->N; C.height = gv->N; C.elements = (double *)malloc(sizeof(double)*(gv->N*gv->N)); check_malloc(C.elements); // verify 1 init_1(A.elements,gv->N); init_1(B.elements,gv->N); init_0(C.elements,gv->N); MatMul(gv,A,B,C); verification(gv,C.elements,gv->N); printf("Pass VERIFY 1\n"); fflush(stdout); // start calculation init(A.elements,gv->N); init(B.elements,gv->N); elapsed_time=0; for (i = 0; i < gv->loop; ++i){ init_0(C.elements,gv->N); elapsed_time += MatMul(gv,A,B,C); } elapsed_time = elapsed_time/gv->loop; printf("CALCULATION: Naive GPU average elapsed time: %f seconds, Gflops=%f\n", elapsed_time/1e3, 2.0*pow(gv->N,3)/elapsed_time/1e6); free(A.elements); free(B.elements); free(C.elements); free(gv); return 0; }
3242474f446e66f17f139368d873f2a2f240183f.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; double* elements; } Matrix; typedef struct gv_t { int N; //N*N matrix int loop; int block_size; // Thread block size }* GV; // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE double MatMul(GV gv, const Matrix A, const Matrix B, Matrix C) { float milliseconds = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(double); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(double); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(double); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(gv->block_size, gv->block_size); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); cudaEventRecord(start); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaEventRecord(stop); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); cudaEventElapsedTime(&milliseconds, start, stop); // printf("Execution time=%f ms, Gflops=%f\n", milliseconds, 2.0*pow(gv->N,3)/milliseconds/1e6); // fflush(stdout); return milliseconds; } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue double Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } void init(double* A, int n){ for(int i=0;i<n*n;i++) A[i]=(rand()%10000+1)/1000.0; } /*init every element to 0*/ void init_0(double* C, int n){ for(int i=0;i<n*n;i++) C[i]=0; } /*init every element to 1*/ void init_1(double* C, int n){ for(int i=0;i<n*n;i++) C[i]=1; } /*verify the calculation whether equal to verify matrix*/ void verification(GV gv, double *C, int n){ int i; for(i=0;i<n*n;i++){ if(C[i] != gv->N){ printf("i=%d, C[i]=%f Get Wrong result!\n",i,C[i]); fflush(stdout); } } } void check_malloc(void * pointer){ if (pointer == NULL) { perror("Malloc error!\n"); fprintf (stderr, "at %s, line %d.\n", __FILE__, __LINE__); exit(1); } } int main(int argc, char** argv) { Matrix A, B, C; double elapsed_time; time_t t; int i; GV gv; // double *B; // double a_p[] = {1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16}; // double b_p[] = {16,12,8,4,15,11,7,3,14,10,6,2,13,9,5,1}; if(argc != 4) { fprintf(stderr, "Usage: %s real_matrix_size, thread block_size\n", argv[0]); exit(1); } gv = (GV) malloc(sizeof(*gv)); gv->N = atoi(argv[1]); gv->block_size = atoi(argv[2]); gv->loop = atoi(argv[3]); printf("N=%d,block_size=%d,loop=%d\n", gv->N,gv->block_size,gv->loop); /* Intializes random number generator */ srand((unsigned) time(&t)); /*Initialise matrix A, B and verify matrix*/ //printf("Initialise A B matrix\n"); // printf("-----------------------------\n"); // fflush(stdout); A.width = gv->N; A.height = gv->N; A.elements = (double *)malloc(sizeof(double)*(gv->N*gv->N)); check_malloc(A.elements); B.width = gv->N; B.height = gv->N; B.elements = (double *)malloc(sizeof(double)*(gv->N*gv->N)); check_malloc(B.elements); C.width = gv->N; C.height = gv->N; C.elements = (double *)malloc(sizeof(double)*(gv->N*gv->N)); check_malloc(C.elements); // verify 1 init_1(A.elements,gv->N); init_1(B.elements,gv->N); init_0(C.elements,gv->N); MatMul(gv,A,B,C); verification(gv,C.elements,gv->N); printf("Pass VERIFY 1\n"); fflush(stdout); // start calculation init(A.elements,gv->N); init(B.elements,gv->N); elapsed_time=0; for (i = 0; i < gv->loop; ++i){ init_0(C.elements,gv->N); elapsed_time += MatMul(gv,A,B,C); } elapsed_time = elapsed_time/gv->loop; printf("CALCULATION: Naive GPU average elapsed time: %f seconds, Gflops=%f\n", elapsed_time/1e3, 2.0*pow(gv->N,3)/elapsed_time/1e6); free(A.elements); free(B.elements); free(C.elements); free(gv); return 0; }
85892878237863b0704a1710104ce04d3e48bac1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zmergecgs.cu, normal z -> s, Wed Jan 2 14:18:53 2019 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_s // These routines merge multiple kernels from scgs into one. /* -------------------------------------------------------------------------- */ __global__ void magma_scgs_1_kernel( int num_rows, int num_cols, float beta, float *r, float *q, float *u, float *p ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ float tmp; tmp = r[ i+j*num_rows ] + beta * q[ i+j*num_rows ]; p[ i+j*num_rows ] = tmp + beta * q[ i+j*num_rows ] + beta * beta * p[ i+j*num_rows ]; u[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: u = r + beta q p = u + beta*(q + beta*p) Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta float scalar @param[in] r magmaFloat_ptr vector @param[in] q magmaFloat_ptr vector @param[in,out] u magmaFloat_ptr vector @param[in,out] p magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_scgs_1( magma_int_t num_rows, magma_int_t num_cols, float beta, magmaFloat_ptr r, magmaFloat_ptr q, magmaFloat_ptr u, magmaFloat_ptr p, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_scgs_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, r, q, u, p ); return MAGMA_SUCCESS; } __global__ void magma_scgs_2_kernel( int num_rows, int num_cols, float *r, float *u, float *p ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ float tmp; tmp = r[ i+j*num_rows ]; u[ i+j*num_rows ] = tmp; p[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: u = r p = r Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] r magmaFloat_ptr vector @param[in,out] u magmaFloat_ptr vector @param[in,out] p magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_scgs_2( magma_int_t num_rows, magma_int_t num_cols, magmaFloat_ptr r, magmaFloat_ptr u, magmaFloat_ptr p, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_scgs_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, r, u, p); return MAGMA_SUCCESS; } __global__ void magma_scgs_3_kernel( int num_rows, int num_cols, float alpha, float *v_hat, float *u, float *q, float *t ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ float uloc, tmp; uloc = u[ i+j*num_rows ]; tmp = uloc - alpha * v_hat[ i+j*num_rows ]; t[ i+j*num_rows ] = tmp + uloc; q[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: q = u - alpha v_hat t = u + q Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha float scalar @param[in] v_hat magmaFloat_ptr vector @param[in] u magmaFloat_ptr vector @param[in,out] q magmaFloat_ptr vector @param[in,out] t magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_scgs_3( magma_int_t num_rows, magma_int_t num_cols, float alpha, magmaFloat_ptr v_hat, magmaFloat_ptr u, magmaFloat_ptr q, magmaFloat_ptr t, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_scgs_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, v_hat, u, q, t ); return MAGMA_SUCCESS; } __global__ void magma_scgs_4_kernel( int num_rows, int num_cols, float alpha, float *u_hat, float *t, float *x, float *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ x[ i+j*num_rows ] = x[ i+j*num_rows ] + alpha * u_hat[ i+j*num_rows ]; r[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * t[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + alpha u_hat r = r -alpha*A u_hat = r -alpha*t Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha float scalar @param[in] u_hat magmaFloat_ptr vector @param[in] t magmaFloat_ptr vector @param[in,out] x magmaFloat_ptr vector @param[in,out] r magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_scgs_4( magma_int_t num_rows, magma_int_t num_cols, float alpha, magmaFloat_ptr u_hat, magmaFloat_ptr t, magmaFloat_ptr x, magmaFloat_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_scgs_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, u_hat, t, x, r ); return MAGMA_SUCCESS; }
85892878237863b0704a1710104ce04d3e48bac1.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zmergecgs.cu, normal z -> s, Wed Jan 2 14:18:53 2019 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_s // These routines merge multiple kernels from scgs into one. /* -------------------------------------------------------------------------- */ __global__ void magma_scgs_1_kernel( int num_rows, int num_cols, float beta, float *r, float *q, float *u, float *p ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ float tmp; tmp = r[ i+j*num_rows ] + beta * q[ i+j*num_rows ]; p[ i+j*num_rows ] = tmp + beta * q[ i+j*num_rows ] + beta * beta * p[ i+j*num_rows ]; u[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: u = r + beta q p = u + beta*(q + beta*p) Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta float scalar @param[in] r magmaFloat_ptr vector @param[in] q magmaFloat_ptr vector @param[in,out] u magmaFloat_ptr vector @param[in,out] p magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_scgs_1( magma_int_t num_rows, magma_int_t num_cols, float beta, magmaFloat_ptr r, magmaFloat_ptr q, magmaFloat_ptr u, magmaFloat_ptr p, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_scgs_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, r, q, u, p ); return MAGMA_SUCCESS; } __global__ void magma_scgs_2_kernel( int num_rows, int num_cols, float *r, float *u, float *p ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ float tmp; tmp = r[ i+j*num_rows ]; u[ i+j*num_rows ] = tmp; p[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: u = r p = r Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] r magmaFloat_ptr vector @param[in,out] u magmaFloat_ptr vector @param[in,out] p magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_scgs_2( magma_int_t num_rows, magma_int_t num_cols, magmaFloat_ptr r, magmaFloat_ptr u, magmaFloat_ptr p, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_scgs_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, r, u, p); return MAGMA_SUCCESS; } __global__ void magma_scgs_3_kernel( int num_rows, int num_cols, float alpha, float *v_hat, float *u, float *q, float *t ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ float uloc, tmp; uloc = u[ i+j*num_rows ]; tmp = uloc - alpha * v_hat[ i+j*num_rows ]; t[ i+j*num_rows ] = tmp + uloc; q[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: q = u - alpha v_hat t = u + q Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha float scalar @param[in] v_hat magmaFloat_ptr vector @param[in] u magmaFloat_ptr vector @param[in,out] q magmaFloat_ptr vector @param[in,out] t magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_scgs_3( magma_int_t num_rows, magma_int_t num_cols, float alpha, magmaFloat_ptr v_hat, magmaFloat_ptr u, magmaFloat_ptr q, magmaFloat_ptr t, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_scgs_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, v_hat, u, q, t ); return MAGMA_SUCCESS; } __global__ void magma_scgs_4_kernel( int num_rows, int num_cols, float alpha, float *u_hat, float *t, float *x, float *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ x[ i+j*num_rows ] = x[ i+j*num_rows ] + alpha * u_hat[ i+j*num_rows ]; r[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * t[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + alpha u_hat r = r -alpha*A u_hat = r -alpha*t Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha float scalar @param[in] u_hat magmaFloat_ptr vector @param[in] t magmaFloat_ptr vector @param[in,out] x magmaFloat_ptr vector @param[in,out] r magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_scgs_4( magma_int_t num_rows, magma_int_t num_cols, float alpha, magmaFloat_ptr u_hat, magmaFloat_ptr t, magmaFloat_ptr x, magmaFloat_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_scgs_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, u_hat, t, x, r ); return MAGMA_SUCCESS; }
6e53f6461307fe01fbd4bb72ff0f74eab58525c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "../filler.hpp" #include "./normalize_layer.hpp" #include "../util/math_functions.hpp" namespace caffe { // divid a matrix with vector template <typename Dtype> __global__ void DivBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] / v[c]; } else { B[index] = A[index] / v[r]; } } } template <typename Dtype> __global__ void MulBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] * v[c]; } else { B[index] = A[index] * v[r]; } } } void NormalizeLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { const real_t* bottom_data = bottom[0]->gpu_data(); real_t* top_data = top[0]->mutable_gpu_data(); real_t* buffer_data = buffer_.mutable_gpu_data(); real_t* norm_data; if (across_spatial_) { // need to index it norm_data = norm_.mutable_cpu_data(); } else { norm_data = norm_.mutable_gpu_data(); // add eps to avoid overflow caffe_gpu_set(norm_.count(), real_t(eps_), norm_data); } const real_t* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } const real_t* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); int num = bottom[0]->num(); int dim = bottom[0]->count() / num; int spatial_dim = bottom[0]->height() * bottom[0]->width(); int channels = bottom[0]->channels(); for (int n = 0; n < num; ++n) { caffe_gpu_powx(dim, bottom_data, real_t(2), buffer_data); if (across_spatial_) { real_t normsqr; caffe_gpu_asum(dim, buffer_data, &normsqr); // add eps to avoid overflow norm_data[n] = pow(normsqr+eps_, real_t(0.5)); caffe_gpu_scale(dim, real_t(1.0 / norm_data[n]), bottom_data, top_data); } else { // compute norm caffe_gpu_gemv(CblasTrans, channels, spatial_dim, real_t(1), buffer_data, sum_channel_multiplier, real_t(1), norm_data); caffe_gpu_powx(spatial_dim, norm_data, real_t(0.5), norm_data); // scale the layer // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DivBsx), dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, bottom_data, norm_data, channels, spatial_dim, CblasNoTrans, top_data); CUDA_POST_KERNEL_CHECK; norm_data += spatial_dim; } // scale the output if (channel_shared_) { caffe_gpu_scal(dim, scale[0], top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MulBsx), dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, top_data, scale, channels, spatial_dim, CblasTrans, top_data); CUDA_POST_KERNEL_CHECK; } bottom_data += dim; top_data += dim; } } } // namespace caffe
6e53f6461307fe01fbd4bb72ff0f74eab58525c9.cu
#include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "../filler.hpp" #include "./normalize_layer.hpp" #include "../util/math_functions.hpp" namespace caffe { // divid a matrix with vector template <typename Dtype> __global__ void DivBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] / v[c]; } else { B[index] = A[index] / v[r]; } } } template <typename Dtype> __global__ void MulBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] * v[c]; } else { B[index] = A[index] * v[r]; } } } void NormalizeLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { const real_t* bottom_data = bottom[0]->gpu_data(); real_t* top_data = top[0]->mutable_gpu_data(); real_t* buffer_data = buffer_.mutable_gpu_data(); real_t* norm_data; if (across_spatial_) { // need to index it norm_data = norm_.mutable_cpu_data(); } else { norm_data = norm_.mutable_gpu_data(); // add eps to avoid overflow caffe_gpu_set(norm_.count(), real_t(eps_), norm_data); } const real_t* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } const real_t* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); int num = bottom[0]->num(); int dim = bottom[0]->count() / num; int spatial_dim = bottom[0]->height() * bottom[0]->width(); int channels = bottom[0]->channels(); for (int n = 0; n < num; ++n) { caffe_gpu_powx(dim, bottom_data, real_t(2), buffer_data); if (across_spatial_) { real_t normsqr; caffe_gpu_asum(dim, buffer_data, &normsqr); // add eps to avoid overflow norm_data[n] = pow(normsqr+eps_, real_t(0.5)); caffe_gpu_scale(dim, real_t(1.0 / norm_data[n]), bottom_data, top_data); } else { // compute norm caffe_gpu_gemv(CblasTrans, channels, spatial_dim, real_t(1), buffer_data, sum_channel_multiplier, real_t(1), norm_data); caffe_gpu_powx(spatial_dim, norm_data, real_t(0.5), norm_data); // scale the layer // NOLINT_NEXT_LINE(whitespace/operators) DivBsx<<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, bottom_data, norm_data, channels, spatial_dim, CblasNoTrans, top_data); CUDA_POST_KERNEL_CHECK; norm_data += spatial_dim; } // scale the output if (channel_shared_) { caffe_gpu_scal(dim, scale[0], top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) MulBsx<<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, top_data, scale, channels, spatial_dim, CblasTrans, top_data); CUDA_POST_KERNEL_CHECK; } bottom_data += dim; top_data += dim; } } } // namespace caffe
b1fffb16413f732e3da84968df7047e5a496eab1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2011 Kerem KAT // // http://dissipatedheat.com/ // Do not hesisate to contact me about usage of the code or to make comments // about the code. Your feedback will be appreciated. // keremkat<@>gmail<.>com // // Kodun kullanm hakknda veya yorum yapmak iin benimle iletiim kurmaktan // ekinmeyiniz. Geri bildirimleriniz deerlendirilecektir. // keremkat<@>gmail<.>com // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. #include "tileFlip.h" /** \file tileFlip.cu CUDA tile flip kernelinin launcher metodunu ve kernelini tanmlar. */ /** Kernel 1 griddeki blok boyutu ( BLOCK_SIZE x BLOCK_SIZE kare bloklar ). */ #define BLOCK_SIZE (32) /** GPU zamann lmek iin 1 yapnz. */ #define ENABLE_TIMING_CODE 1 /** Grnty blok blok eviren kernel. \param image [0, 1] aralna normalize edilmi, BGR kanal sral grntnn GPU belleindeki adresi. \param width Grntnn piksel olarak genilii \param height Grntnn piksel olarak ykseklii Metod GPU zerinde alr, ktsn image parametresinin zerine yazar. */__global__ void gpuTileFlip( float* image, int width, int height ) { int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; // satr No. int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; // stun No. int cIdx = ( row * width + col ) * 3; // 3 ile arpm RGB iin, linearIndex. /* *( image + linearIndex ): Blue, in [0, 1] *( image + linearIndex + 1 ): Green, in [0, 1] *( image + linearIndex + 2 ): Red, in [0, 1] */ __shared__ float smBlockB[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float smBlockG[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float smBlockR[BLOCK_SIZE][BLOCK_SIZE]; smBlockB[threadIdx.x][threadIdx.y] = image[ cIdx ]; smBlockG[threadIdx.x][threadIdx.y] = image[ cIdx + 1 ]; smBlockR[threadIdx.x][threadIdx.y] = image[ cIdx + 2 ]; __syncthreads(); image[ cIdx ] = smBlockB[threadIdx.y][threadIdx.x]; image[ cIdx + 1 ] = smBlockG[threadIdx.y][threadIdx.x]; image[ cIdx + 2 ] = smBlockR[threadIdx.y][threadIdx.x]; //image[ cIdxRight + 2 ] = 0; /**( image + cIdx ) = abs((*( image + cIdx ) - *( image + cIdxRight ))); *( image + cIdx + 1 ) = abs((*( image + cIdx + 1 ) - *( image + cIdxRight + 1 ))); *( image + cIdx + 2 ) = abs((*( image + cIdx + 2 ) - *( image + cIdxRight + 2 )));*/ } /** \ref ptKernelLauncher tipinde metod. \param d_Image [0, 1] aralna normalize edilmi, BGR kanal sral grntnn GPU belleindeki adresi. \param width Grntnn piksel olarak genilii \param height Grntnn piksel olarak ykseklii \ref gpuTileFlip kernelini Grid ve Block boyutlarn ayarlayarak aran metod. */ void deviceTileFlipLaunch( float *d_Image, int width, int height ) { // launch kernel dim3 dimBlock( BLOCK_SIZE, BLOCK_SIZE ); dim3 dimGrid( width / dimBlock.x, height / dimBlock.y ); #if ENABLE_TIMING_CODE hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); #endif hipLaunchKernelGGL(( gpuTileFlip), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_Image, width, height); #if ENABLE_TIMING_CODE hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); // block until the device has completed hipDeviceSynchronize(); printf("kernel time: %.3f ms\n", elapsedTime); #endif hipDeviceSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); }
b1fffb16413f732e3da84968df7047e5a496eab1.cu
// Copyright (c) 2011 Kerem KAT // // http://dissipatedheat.com/ // Do not hesisate to contact me about usage of the code or to make comments // about the code. Your feedback will be appreciated. // keremkat<@>gmail<.>com // // Kodun kullanımı hakkında veya yorum yapmak için benimle iletişim kurmaktan // çekinmeyiniz. Geri bildirimleriniz değerlendirilecektir. // keremkat<@>gmail<.>com // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. #include "tileFlip.h" /** \file tileFlip.cu CUDA tile flip kernelinin launcher metodunu ve kernelini tanımlar. */ /** Kernel 1 griddeki blok boyutu ( BLOCK_SIZE x BLOCK_SIZE kare bloklar ). */ #define BLOCK_SIZE (32) /** GPU zamanını ölçmek için 1 yapınız. */ #define ENABLE_TIMING_CODE 1 /** Görüntüyü blok blok çeviren kernel. \param image [0, 1] aralığına normalize edilmiş, BGR kanal sıralı görüntünün GPU belleğindeki adresi. \param width Görüntünün piksel olarak genişliği \param height Görüntünün piksel olarak yüksekliği Metod GPU üzerinde çalışır, çıktısını image parametresinin üzerine yazar. */__global__ void gpuTileFlip( float* image, int width, int height ) { int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; // satır No. int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; // sütun No. int cIdx = ( row * width + col ) * 3; // 3 ile çarpım RGB için, linearIndex. /* *( image + linearIndex ): Blue, in [0, 1] *( image + linearIndex + 1 ): Green, in [0, 1] *( image + linearIndex + 2 ): Red, in [0, 1] */ __shared__ float smBlockB[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float smBlockG[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float smBlockR[BLOCK_SIZE][BLOCK_SIZE]; smBlockB[threadIdx.x][threadIdx.y] = image[ cIdx ]; smBlockG[threadIdx.x][threadIdx.y] = image[ cIdx + 1 ]; smBlockR[threadIdx.x][threadIdx.y] = image[ cIdx + 2 ]; __syncthreads(); image[ cIdx ] = smBlockB[threadIdx.y][threadIdx.x]; image[ cIdx + 1 ] = smBlockG[threadIdx.y][threadIdx.x]; image[ cIdx + 2 ] = smBlockR[threadIdx.y][threadIdx.x]; //image[ cIdxRight + 2 ] = 0; /**( image + cIdx ) = abs((*( image + cIdx ) - *( image + cIdxRight ))); *( image + cIdx + 1 ) = abs((*( image + cIdx + 1 ) - *( image + cIdxRight + 1 ))); *( image + cIdx + 2 ) = abs((*( image + cIdx + 2 ) - *( image + cIdxRight + 2 )));*/ } /** \ref ptKernelLauncher tipinde metod. \param d_Image [0, 1] aralığına normalize edilmiş, BGR kanal sıralı görüntünün GPU belleğindeki adresi. \param width Görüntünün piksel olarak genişliği \param height Görüntünün piksel olarak yüksekliği \ref gpuTileFlip kernelini Grid ve Block boyutlarını ayarlayarak çağıran metod. */ void deviceTileFlipLaunch( float *d_Image, int width, int height ) { // launch kernel dim3 dimBlock( BLOCK_SIZE, BLOCK_SIZE ); dim3 dimGrid( width / dimBlock.x, height / dimBlock.y ); #if ENABLE_TIMING_CODE cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); #endif gpuTileFlip<<< dimGrid, dimBlock >>>( d_Image, width, height); #if ENABLE_TIMING_CODE cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); // block until the device has completed cudaThreadSynchronize(); printf("kernel time: %.3f ms\n", elapsedTime); #endif cudaThreadSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); }
f6e1f65f7bb191d8a224eb9bc4390439dc6069e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TILE_SIZE 32 #define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE) typedef struct { real3 pos, force; float radius, epsilon, padding; } AtomData; inline __device__ void loadAtomData(AtomData& data, int atom, const real4* __restrict__ posq, const float2* __restrict__ radiusEpsilon) { real4 atomPosq = posq[atom]; data.pos = make_real3(atomPosq.x, atomPosq.y, atomPosq.z); float2 temp = radiusEpsilon[atom]; data.radius = temp.x; data.epsilon = temp.y; } __device__ void initParticleParameters(float radius, float epsilon, real& rmixo, real& rmixh, real& emixo, real& emixh) { real sqrtEps = SQRT(epsilon); real denominator = SQRT(EPSO) + sqrtEps; emixo = 4*EPSO*epsilon / (denominator*denominator); denominator = SQRT(EPSH) + sqrtEps; emixh = 4*EPSH*epsilon / (denominator*denominator); real radius2 = radius*radius; real rmino2 = RMINO*RMINO; rmixo = 2*(rmino2*RMINO + radius2*radius) / (rmino2 + radius2); real rminh2 = RMINH*RMINH; rmixh = 2*(rminh2*RMINH + radius2*radius) / (rminh2+radius2); } __device__ void computeOneInteraction(AtomData& atom1, AtomData& atom2, real rmixo, real rmixh, real emixo, real emixh, real3& force, real& energy) { // get deltaR and r between 2 atoms force = atom2.pos - atom1.pos; real r2 = dot(force, force); if (r2 <= 0) { force = make_real3(0); energy = 0; return; } real rI = RSQRT(r2); real r = RECIP(rI); real sk = atom2.radius*SHCTD; real sk2 = sk*sk; if (atom1.radius >= (r+sk)) { force = make_real3(0); energy = 0; return; } real rmax = atom1.radius > (r - sk) ? atom1.radius : (r - sk); real lik = rmax; real lik2 = lik*lik; real lik3 = lik2*lik; real lik4 = lik2*lik2; real uik = (r+sk) < rmixo ? (r+sk) : rmixo; real uik2 = uik*uik; real uik3 = uik2*uik; real uik4 = uik2*uik2; real term = 4*M_PI/(48*r)*(3*(lik4-uik4) - 8*r*(lik3-uik3) + 6*(r2-sk2)*(lik2-uik2)); real r3 = r2*r; real dl1 = lik2*(-lik2 + 2*(r2 + sk2)); real dl2 = lik*(-lik3 + 4*lik2*r - 6*lik*r2 + 2*lik*sk2 + 4*r3 - 4*r*sk2); real dl = atom1.radius > (r-sk)? dl1 : dl2; real du1 = uik2*(-uik2 + 2*(r2 + sk2)); real du2 = uik*(-uik3 + 4*uik2*r - 2*uik*(3*r2 - sk2) + 4*r*(r2 - sk2)); real du = (r+sk) > rmixo ? -du1 : -du2; real mask2 = lik < rmixo ? 1 : 0; real sum = -mask2*(emixo*term); real de = -mask2*emixo*M_PI*(dl+du)/(4*r2); uik = (r+sk) < rmixh ? (r+sk) : rmixh; uik2 = uik*uik; uik3 = uik2*uik; uik4 = uik2*uik2; term = (M_PI)/ (12*r) * (3*(lik4-uik4) - 8*r*(lik3-uik3) + 6*(r2-sk2)*(lik2-uik2)); dl1 = lik2*(-lik2 + 2*r2 + 2*sk2); dl2 = lik*(-lik3 + 4*lik2*r - 6*lik*r2 + 2*lik*sk2 + 4*r3 - 4*r*sk2); dl = atom1.radius > (r-sk) ? dl1 : dl2; du1 = -uik2*(-uik2 + 2*r2 + 2*sk2); du2 = -uik*(-uik3 + 4*uik2*r - 6*uik*r2 + 2*uik*sk2 + 4*r3 - 4*r*sk2); du = (r+sk) > rmixh ? du1 : du2; mask2 = lik < rmixh ? 1 : 0; sum -= mask2*(2*emixh*term); de -= mask2*(2*emixh*M_PI*(dl+du)/(4*r2)); uik = r + sk; uik2 = uik*uik; uik3 = uik2*uik; uik4 = uik2*uik2; real uik5 = uik4*uik; real uik6 = uik3*uik3; real uik10 = uik5*uik5; real uik11 = uik10*uik; real uik12 = uik6*uik6; real uik13 = uik12*uik; lik = rmax > rmixo ? rmax : rmixo; lik2 = lik*lik; lik3 = lik2*lik; lik4 = lik2*lik2; real lik5 = lik4*lik; real lik6 = lik3*lik3; real lik10 = lik5*lik5; real lik11 = lik10*lik; real lik12 = lik6*lik6; real lik13 = lik12*lik; term = 4*M_PI/(120*r*lik5*uik5)*(15*uik*lik*r*(uik4-lik4) - 10*uik2*lik2*(uik3-lik3) + 6*(sk2-r2)*(uik5-lik5)); dl1 = (-5*lik2 + 3*r2 + 3*sk2)/lik5; dl2 = (5*lik3 - 33*lik*r2 - 3*lik*sk2 + 15*(lik2*r+r3-r*sk2))/lik6; dl = (atom1.radius > (r-sk)) || (rmax < rmixo) ? -dl1 : dl2; du = (-5*uik3 + 33*uik*r2 + 3*uik*sk2 - 15*(uik2*r+r3-r*sk2))/uik6; real rmixo7 = rmixo*rmixo*rmixo; rmixo7 = rmixo7*rmixo7*rmixo; real ao = emixo*rmixo7; real idisp = -2*ao*term; mask2 = uik > rmixo ? 1 : 0; de -= mask2*(2*ao*M_PI*(dl + du)/(15*r2)); term = 4*M_PI/(2640*r*lik12*uik12) * (120*uik*lik*r*(uik11-lik11) - 66*uik2*lik2*(uik10-lik10) + 55*(sk2-r2)*(uik12-lik12)); dl1 = (6*lik2 - 5*r2 - 5*sk2)/lik12; dl2 = (6*lik3 - 125*lik*r2 - 5*lik*sk2 + 60*(lik2*r+r3-r*sk2))/lik13; dl = (atom1.radius > (r-sk)) || (rmax < rmixo) ? dl1 : dl2; du = (-6*uik3 + 125*uik*r2 + 5*uik*sk2 - 60*(uik2*r+r3-r*sk2))/uik13; de += mask2*(ao*rmixo7*M_PI*(dl + du)/(60*r2)); real irep = ao*rmixo7*term; sum += mask2*(irep + idisp); lik = rmax > rmixh ? rmax : rmixh; lik2 = lik*lik; lik3 = lik2*lik; lik4 = lik2*lik2; lik5 = lik4*lik; lik6 = lik3*lik3; lik10 = lik5*lik5; lik11 = lik10*lik; lik12 = lik6*lik6; lik13 = lik12*lik; term = 4*M_PI / (120*r*lik5*uik5) * (15*uik*lik*r*(uik4-lik4) - 10*uik2*lik2*(uik3-lik3) + 6*(sk2-r2)*(uik5-lik5)); dl1 = (-5*lik2 + 3*r2 + 3*sk2)/lik5; dl2 = (5*lik3 - 33*lik*r2 - 3*lik*sk2+ 15*(lik2*r+r3-r*sk2))/lik6; dl = (atom1.radius > (r-sk)) || (rmax < rmixh) ? -dl1 : dl2; du = -(5*uik3 - 33*uik*r2 - 3*uik*sk2 + 15*(uik2*r+r3-r*sk2))/uik6; real rmixh7 = rmixh*rmixh*rmixh; rmixh7 = rmixh7*rmixh7*rmixh; real ah = emixh*rmixh7; idisp = -4*ah*term; mask2 = uik > rmixh ? 1 : 0; de -= mask2*(4*ah*M_PI*(dl + du)/(15*r2)); term = 4*M_PI / (2640*r*lik12*uik12) * (120*uik*lik*r*(uik11-lik11) - 66*uik2*lik2*(uik10-lik10) + 55*(sk2-r2)*(uik12-lik12)); dl1 = -(-6*lik2 + 5*r2 + 5*sk2)/lik12; dl2 = (6*lik3 - 125*lik*r2 - 5*lik*sk2 + 60*(lik2*r+r3-r*sk2))/lik13; dl = ((atom1.radius > (r-sk)) || (rmax < rmixh)) ? dl1 : dl2; du = -(6*uik3 - 125*uik*r2 -5*uik*sk2 + 60*(uik2*r+r3-r*sk2))/uik13; irep = 2*ah*rmixh7*term; de += mask2*(ah*rmixh7*M_PI*(dl+du)/(30*r2)); sum += mask2*(irep+idisp); energy = sum; de *= -AWATER*rI; force *= de; } /** * Compute WCA interaction. */ extern "C" __global__ void computeWCAForce(unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, unsigned int startTileIndex, unsigned int numTileIndices, const float2* __restrict__ radiusEpsilon) { unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE; unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; const unsigned int numTiles = numTileIndices; unsigned int pos = (unsigned int) (startTileIndex+warp*(long long)numTiles/totalWarps); unsigned int end = (unsigned int) (startTileIndex+(warp+1)*(long long)numTiles/totalWarps); mixed energy = 0; __shared__ AtomData localData[THREAD_BLOCK_SIZE]; do { // Extract the coordinates of this tile const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); const unsigned int tbx = threadIdx.x - tgx; const unsigned int localGroupIndex = threadIdx.x/TILE_SIZE; int x, y; AtomData data; if (pos < end) { y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos)); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error. y += (x < y ? -1 : 1); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); } unsigned int atom1 = x*TILE_SIZE + tgx; loadAtomData(data, atom1, posq, radiusEpsilon); loadAtomData(localData[threadIdx.x], y*TILE_SIZE+tgx, posq, radiusEpsilon); real emixo, emixh, rmixo, rmixh; initParticleParameters(data.radius, data.epsilon, rmixo, rmixh, emixo, emixh); data.force = make_real3(0); localData[threadIdx.x].force = make_real3(0); // Compute forces. unsigned int tj = tgx; for (unsigned int j = 0; j < TILE_SIZE; j++) { int atom2 = y*TILE_SIZE+tj; if (atom1 != atom2 && atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { real3 tempForce; real tempEnergy; computeOneInteraction(data, localData[tbx+tj], rmixo, rmixh, emixo, emixh, tempForce, tempEnergy); data.force += tempForce; localData[tbx+tj].force -= tempForce; energy += (x == y ? 0.5f*tempEnergy : tempEnergy); real emjxo, emjxh, rmjxo, rmjxh; initParticleParameters(localData[tbx+tj].radius, localData[tbx+tj].epsilon, rmjxo, rmjxh, emjxo, emjxh); computeOneInteraction(localData[tbx+tj], data, rmjxo, rmjxh, emjxo, emjxh, tempForce, tempEnergy); data.force -= tempForce; localData[tbx+tj].force += tempForce; energy += (x == y ? 0.5f*tempEnergy : tempEnergy); } tj = (tj+1) & (TILE_SIZE-1); } unsigned int offset = x*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000))); if (x != y) { offset = y*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000))); } } pos++; } while (pos < end); energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] -= AWATER*energy; }
f6e1f65f7bb191d8a224eb9bc4390439dc6069e1.cu
#define TILE_SIZE 32 #define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE) typedef struct { real3 pos, force; float radius, epsilon, padding; } AtomData; inline __device__ void loadAtomData(AtomData& data, int atom, const real4* __restrict__ posq, const float2* __restrict__ radiusEpsilon) { real4 atomPosq = posq[atom]; data.pos = make_real3(atomPosq.x, atomPosq.y, atomPosq.z); float2 temp = radiusEpsilon[atom]; data.radius = temp.x; data.epsilon = temp.y; } __device__ void initParticleParameters(float radius, float epsilon, real& rmixo, real& rmixh, real& emixo, real& emixh) { real sqrtEps = SQRT(epsilon); real denominator = SQRT(EPSO) + sqrtEps; emixo = 4*EPSO*epsilon / (denominator*denominator); denominator = SQRT(EPSH) + sqrtEps; emixh = 4*EPSH*epsilon / (denominator*denominator); real radius2 = radius*radius; real rmino2 = RMINO*RMINO; rmixo = 2*(rmino2*RMINO + radius2*radius) / (rmino2 + radius2); real rminh2 = RMINH*RMINH; rmixh = 2*(rminh2*RMINH + radius2*radius) / (rminh2+radius2); } __device__ void computeOneInteraction(AtomData& atom1, AtomData& atom2, real rmixo, real rmixh, real emixo, real emixh, real3& force, real& energy) { // get deltaR and r between 2 atoms force = atom2.pos - atom1.pos; real r2 = dot(force, force); if (r2 <= 0) { force = make_real3(0); energy = 0; return; } real rI = RSQRT(r2); real r = RECIP(rI); real sk = atom2.radius*SHCTD; real sk2 = sk*sk; if (atom1.radius >= (r+sk)) { force = make_real3(0); energy = 0; return; } real rmax = atom1.radius > (r - sk) ? atom1.radius : (r - sk); real lik = rmax; real lik2 = lik*lik; real lik3 = lik2*lik; real lik4 = lik2*lik2; real uik = (r+sk) < rmixo ? (r+sk) : rmixo; real uik2 = uik*uik; real uik3 = uik2*uik; real uik4 = uik2*uik2; real term = 4*M_PI/(48*r)*(3*(lik4-uik4) - 8*r*(lik3-uik3) + 6*(r2-sk2)*(lik2-uik2)); real r3 = r2*r; real dl1 = lik2*(-lik2 + 2*(r2 + sk2)); real dl2 = lik*(-lik3 + 4*lik2*r - 6*lik*r2 + 2*lik*sk2 + 4*r3 - 4*r*sk2); real dl = atom1.radius > (r-sk)? dl1 : dl2; real du1 = uik2*(-uik2 + 2*(r2 + sk2)); real du2 = uik*(-uik3 + 4*uik2*r - 2*uik*(3*r2 - sk2) + 4*r*(r2 - sk2)); real du = (r+sk) > rmixo ? -du1 : -du2; real mask2 = lik < rmixo ? 1 : 0; real sum = -mask2*(emixo*term); real de = -mask2*emixo*M_PI*(dl+du)/(4*r2); uik = (r+sk) < rmixh ? (r+sk) : rmixh; uik2 = uik*uik; uik3 = uik2*uik; uik4 = uik2*uik2; term = (M_PI)/ (12*r) * (3*(lik4-uik4) - 8*r*(lik3-uik3) + 6*(r2-sk2)*(lik2-uik2)); dl1 = lik2*(-lik2 + 2*r2 + 2*sk2); dl2 = lik*(-lik3 + 4*lik2*r - 6*lik*r2 + 2*lik*sk2 + 4*r3 - 4*r*sk2); dl = atom1.radius > (r-sk) ? dl1 : dl2; du1 = -uik2*(-uik2 + 2*r2 + 2*sk2); du2 = -uik*(-uik3 + 4*uik2*r - 6*uik*r2 + 2*uik*sk2 + 4*r3 - 4*r*sk2); du = (r+sk) > rmixh ? du1 : du2; mask2 = lik < rmixh ? 1 : 0; sum -= mask2*(2*emixh*term); de -= mask2*(2*emixh*M_PI*(dl+du)/(4*r2)); uik = r + sk; uik2 = uik*uik; uik3 = uik2*uik; uik4 = uik2*uik2; real uik5 = uik4*uik; real uik6 = uik3*uik3; real uik10 = uik5*uik5; real uik11 = uik10*uik; real uik12 = uik6*uik6; real uik13 = uik12*uik; lik = rmax > rmixo ? rmax : rmixo; lik2 = lik*lik; lik3 = lik2*lik; lik4 = lik2*lik2; real lik5 = lik4*lik; real lik6 = lik3*lik3; real lik10 = lik5*lik5; real lik11 = lik10*lik; real lik12 = lik6*lik6; real lik13 = lik12*lik; term = 4*M_PI/(120*r*lik5*uik5)*(15*uik*lik*r*(uik4-lik4) - 10*uik2*lik2*(uik3-lik3) + 6*(sk2-r2)*(uik5-lik5)); dl1 = (-5*lik2 + 3*r2 + 3*sk2)/lik5; dl2 = (5*lik3 - 33*lik*r2 - 3*lik*sk2 + 15*(lik2*r+r3-r*sk2))/lik6; dl = (atom1.radius > (r-sk)) || (rmax < rmixo) ? -dl1 : dl2; du = (-5*uik3 + 33*uik*r2 + 3*uik*sk2 - 15*(uik2*r+r3-r*sk2))/uik6; real rmixo7 = rmixo*rmixo*rmixo; rmixo7 = rmixo7*rmixo7*rmixo; real ao = emixo*rmixo7; real idisp = -2*ao*term; mask2 = uik > rmixo ? 1 : 0; de -= mask2*(2*ao*M_PI*(dl + du)/(15*r2)); term = 4*M_PI/(2640*r*lik12*uik12) * (120*uik*lik*r*(uik11-lik11) - 66*uik2*lik2*(uik10-lik10) + 55*(sk2-r2)*(uik12-lik12)); dl1 = (6*lik2 - 5*r2 - 5*sk2)/lik12; dl2 = (6*lik3 - 125*lik*r2 - 5*lik*sk2 + 60*(lik2*r+r3-r*sk2))/lik13; dl = (atom1.radius > (r-sk)) || (rmax < rmixo) ? dl1 : dl2; du = (-6*uik3 + 125*uik*r2 + 5*uik*sk2 - 60*(uik2*r+r3-r*sk2))/uik13; de += mask2*(ao*rmixo7*M_PI*(dl + du)/(60*r2)); real irep = ao*rmixo7*term; sum += mask2*(irep + idisp); lik = rmax > rmixh ? rmax : rmixh; lik2 = lik*lik; lik3 = lik2*lik; lik4 = lik2*lik2; lik5 = lik4*lik; lik6 = lik3*lik3; lik10 = lik5*lik5; lik11 = lik10*lik; lik12 = lik6*lik6; lik13 = lik12*lik; term = 4*M_PI / (120*r*lik5*uik5) * (15*uik*lik*r*(uik4-lik4) - 10*uik2*lik2*(uik3-lik3) + 6*(sk2-r2)*(uik5-lik5)); dl1 = (-5*lik2 + 3*r2 + 3*sk2)/lik5; dl2 = (5*lik3 - 33*lik*r2 - 3*lik*sk2+ 15*(lik2*r+r3-r*sk2))/lik6; dl = (atom1.radius > (r-sk)) || (rmax < rmixh) ? -dl1 : dl2; du = -(5*uik3 - 33*uik*r2 - 3*uik*sk2 + 15*(uik2*r+r3-r*sk2))/uik6; real rmixh7 = rmixh*rmixh*rmixh; rmixh7 = rmixh7*rmixh7*rmixh; real ah = emixh*rmixh7; idisp = -4*ah*term; mask2 = uik > rmixh ? 1 : 0; de -= mask2*(4*ah*M_PI*(dl + du)/(15*r2)); term = 4*M_PI / (2640*r*lik12*uik12) * (120*uik*lik*r*(uik11-lik11) - 66*uik2*lik2*(uik10-lik10) + 55*(sk2-r2)*(uik12-lik12)); dl1 = -(-6*lik2 + 5*r2 + 5*sk2)/lik12; dl2 = (6*lik3 - 125*lik*r2 - 5*lik*sk2 + 60*(lik2*r+r3-r*sk2))/lik13; dl = ((atom1.radius > (r-sk)) || (rmax < rmixh)) ? dl1 : dl2; du = -(6*uik3 - 125*uik*r2 -5*uik*sk2 + 60*(uik2*r+r3-r*sk2))/uik13; irep = 2*ah*rmixh7*term; de += mask2*(ah*rmixh7*M_PI*(dl+du)/(30*r2)); sum += mask2*(irep+idisp); energy = sum; de *= -AWATER*rI; force *= de; } /** * Compute WCA interaction. */ extern "C" __global__ void computeWCAForce(unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, unsigned int startTileIndex, unsigned int numTileIndices, const float2* __restrict__ radiusEpsilon) { unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE; unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; const unsigned int numTiles = numTileIndices; unsigned int pos = (unsigned int) (startTileIndex+warp*(long long)numTiles/totalWarps); unsigned int end = (unsigned int) (startTileIndex+(warp+1)*(long long)numTiles/totalWarps); mixed energy = 0; __shared__ AtomData localData[THREAD_BLOCK_SIZE]; do { // Extract the coordinates of this tile const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); const unsigned int tbx = threadIdx.x - tgx; const unsigned int localGroupIndex = threadIdx.x/TILE_SIZE; int x, y; AtomData data; if (pos < end) { y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos)); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error. y += (x < y ? -1 : 1); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); } unsigned int atom1 = x*TILE_SIZE + tgx; loadAtomData(data, atom1, posq, radiusEpsilon); loadAtomData(localData[threadIdx.x], y*TILE_SIZE+tgx, posq, radiusEpsilon); real emixo, emixh, rmixo, rmixh; initParticleParameters(data.radius, data.epsilon, rmixo, rmixh, emixo, emixh); data.force = make_real3(0); localData[threadIdx.x].force = make_real3(0); // Compute forces. unsigned int tj = tgx; for (unsigned int j = 0; j < TILE_SIZE; j++) { int atom2 = y*TILE_SIZE+tj; if (atom1 != atom2 && atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { real3 tempForce; real tempEnergy; computeOneInteraction(data, localData[tbx+tj], rmixo, rmixh, emixo, emixh, tempForce, tempEnergy); data.force += tempForce; localData[tbx+tj].force -= tempForce; energy += (x == y ? 0.5f*tempEnergy : tempEnergy); real emjxo, emjxh, rmjxo, rmjxh; initParticleParameters(localData[tbx+tj].radius, localData[tbx+tj].epsilon, rmjxo, rmjxh, emjxo, emjxh); computeOneInteraction(localData[tbx+tj], data, rmjxo, rmjxh, emjxo, emjxh, tempForce, tempEnergy); data.force -= tempForce; localData[tbx+tj].force += tempForce; energy += (x == y ? 0.5f*tempEnergy : tempEnergy); } tj = (tj+1) & (TILE_SIZE-1); } unsigned int offset = x*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000))); if (x != y) { offset = y*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000))); } } pos++; } while (pos < end); energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] -= AWATER*energy; }
316f81b568a1751f838b140e10e005b43eff6616.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void matrixMultiply(double *a, double *b, double *c, int cr, int cc, int ac, int bc){ long x = blockIdx.x * blockDim.x + threadIdx.x; // col long y = blockIdx.y * blockDim.y + threadIdx.y; // row double sum = 0; if(x < cc && y < cr){ for(int k = 0; k<ac; k++){ sum+= a[y*ac+k] * b[k*bc+x]; } c[y * cc + x] = sum; } }
316f81b568a1751f838b140e10e005b43eff6616.cu
#include "includes.h" __global__ void matrixMultiply(double *a, double *b, double *c, int cr, int cc, int ac, int bc){ long x = blockIdx.x * blockDim.x + threadIdx.x; // col long y = blockIdx.y * blockDim.y + threadIdx.y; // row double sum = 0; if(x < cc && y < cr){ for(int k = 0; k<ac; k++){ sum+= a[y*ac+k] * b[k*bc+x]; } c[y * cc + x] = sum; } }
5ac5e344d43c0dc793ad9087025408922b61a063.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void calcNeurons( float t, int layer_size, int *spike_count, int *spikes, float *in_syn, float *V, bool *fired, int threshold) { int id = BLOCK_SIZE * blockIdx.x + threadIdx.x; if (id < layer_size) { __shared__ int spikes_block[BLOCK_SIZE]; __shared__ volatile int spike_count_block; __shared__ volatile int spikes_idx; if (threadIdx.x == 0) { spike_count_block = 0; } __syncthreads(); V[id] += in_syn[id]; in_syn[id] = 0; bool fire = false; //////////////////////////////////////////////////////////// // begin //////////////////////////////////////////////////////////// if (V[id] > threshold && !fired[id]) { fired[id] = true; fire = true; } //////////////////////////////////////////////////////////// // end //////////////////////////////////////////////////////////// if (fire) spikes_block[atomicAdd((int *)&spike_count_block, 1)] = id; __syncthreads(); if (threadIdx.x == 0) { if (spike_count_block > 0) { spikes_idx = atomicAdd(&spike_count[0], spike_count_block); } } __syncthreads(); if (threadIdx.x < spike_count_block) { spikes[spikes_idx + threadIdx.x] = spikes_block[threadIdx.x]; } } } __global__ void calcSynapses( float t, int layer_size_post, int *spike_count_pre, int *spikes_pre, float *in_syn_post, bool *g) { int id_post = BLOCK_SIZE * blockIdx.x + threadIdx.x; if (id_post < layer_size_post) { float lin_syn_post = in_syn_post[id_post]; for (int i = 0; i < spike_count_pre[0]; i++) { //////////////////////////////////////////////////////////// // begin //////////////////////////////////////////////////////////// lin_syn_post += g[spikes_pre[i] * layer_size_post + id_post]; //////////////////////////////////////////////////////////// // end //////////////////////////////////////////////////////////// } in_syn_post[id_post] = lin_syn_post; } }
5ac5e344d43c0dc793ad9087025408922b61a063.cu
#include <stdio.h> __global__ void calcNeurons( float t, int layer_size, int *spike_count, int *spikes, float *in_syn, float *V, bool *fired, int threshold) { int id = BLOCK_SIZE * blockIdx.x + threadIdx.x; if (id < layer_size) { __shared__ int spikes_block[BLOCK_SIZE]; __shared__ volatile int spike_count_block; __shared__ volatile int spikes_idx; if (threadIdx.x == 0) { spike_count_block = 0; } __syncthreads(); V[id] += in_syn[id]; in_syn[id] = 0; bool fire = false; //////////////////////////////////////////////////////////// // begin //////////////////////////////////////////////////////////// if (V[id] > threshold && !fired[id]) { fired[id] = true; fire = true; } //////////////////////////////////////////////////////////// // end //////////////////////////////////////////////////////////// if (fire) spikes_block[atomicAdd((int *)&spike_count_block, 1)] = id; __syncthreads(); if (threadIdx.x == 0) { if (spike_count_block > 0) { spikes_idx = atomicAdd(&spike_count[0], spike_count_block); } } __syncthreads(); if (threadIdx.x < spike_count_block) { spikes[spikes_idx + threadIdx.x] = spikes_block[threadIdx.x]; } } } __global__ void calcSynapses( float t, int layer_size_post, int *spike_count_pre, int *spikes_pre, float *in_syn_post, bool *g) { int id_post = BLOCK_SIZE * blockIdx.x + threadIdx.x; if (id_post < layer_size_post) { float lin_syn_post = in_syn_post[id_post]; for (int i = 0; i < spike_count_pre[0]; i++) { //////////////////////////////////////////////////////////// // begin //////////////////////////////////////////////////////////// lin_syn_post += g[spikes_pre[i] * layer_size_post + id_post]; //////////////////////////////////////////////////////////// // end //////////////////////////////////////////////////////////// } in_syn_post[id_post] = lin_syn_post; } }
8de81643affcd47f48d674272b9fe3323ead6a9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void calcConvolutionForwardPaddedInGPU( float *in, float *padded_in, int batch_size, int in_size_x, int in_size_y, int in_size_z, int padding) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if( id < batch_size * in_size_x * in_size_y * in_size_z ){ int in_index = id; int x = id % in_size_x; id /= in_size_x; int y = id % in_size_y; id /= in_size_y; int z = id % in_size_z; id /= in_size_z; int b = id; int pad_index = b * (in_size_z * (in_size_x + 2*padding) * (in_size_y + 2*padding) ) + z * ((in_size_x + 2*padding) * (in_size_y + 2*padding)) + (y+padding) * (in_size_x + 2*padding) + (x+padding) ; padded_in[pad_index] = in[in_index]; } /* original code for ( int b = 0; b < in.size.b; ++b ){ for ( int z = 0; z < in.size.z; ++z ){ for ( int y = 0; y < in.size.y; ++y ){ for ( int x = 0; x < in.size.x; ++x ){ padded_in( b, padding+x, padding+y, z ) = in( b, x, y, z ); } } } } */ }
8de81643affcd47f48d674272b9fe3323ead6a9f.cu
#include "includes.h" __global__ void calcConvolutionForwardPaddedInGPU( float *in, float *padded_in, int batch_size, int in_size_x, int in_size_y, int in_size_z, int padding) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if( id < batch_size * in_size_x * in_size_y * in_size_z ){ int in_index = id; int x = id % in_size_x; id /= in_size_x; int y = id % in_size_y; id /= in_size_y; int z = id % in_size_z; id /= in_size_z; int b = id; int pad_index = b * (in_size_z * (in_size_x + 2*padding) * (in_size_y + 2*padding) ) + z * ((in_size_x + 2*padding) * (in_size_y + 2*padding)) + (y+padding) * (in_size_x + 2*padding) + (x+padding) ; padded_in[pad_index] = in[in_index]; } /* original code for ( int b = 0; b < in.size.b; ++b ){ for ( int z = 0; z < in.size.z; ++z ){ for ( int y = 0; y < in.size.y; ++y ){ for ( int x = 0; x < in.size.x; ++x ){ padded_in( b, padding+x, padding+y, z ) = in( b, x, y, z ); } } } } */ }
bc5ee1f69702173cd7c8a5a06dd01d353605f053.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common/Constants.h" #include "core/geometry/WarpFieldExtender.h" #include <device_launch_parameters.h> namespace surfelwarp { namespace device { /* Kernel and method for choosing node candidate from init knn array (not field) */ __global__ void labelVertexCandidateKernel( const DeviceArrayView<float4> vertex_confid_array, const ushort4* vertex_knn_array, const float4* node_coords_array, unsigned* vertex_candidate_label ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= vertex_confid_array.Size()) return; //Obtain vertex and its knn const float4 vertex_confid = vertex_confid_array[idx]; const float4 vertex = make_float4(vertex_confid.x, vertex_confid.y, vertex_confid.z, 1.0); const ushort4 knn = vertex_knn_array[idx]; //Check its distance to node float4 node; float dist_square; bool covered = false; //knn-0 node = node_coords_array[knn.x]; dist_square = squared_norm_xyz(node - vertex); if (dist_square < d_node_radius_square) { covered = true; } //knn-1 node = node_coords_array[knn.y]; dist_square = squared_norm_xyz(node - vertex); if (dist_square < d_node_radius_square) { covered = true; } //knn-2 node = node_coords_array[knn.z]; dist_square = squared_norm_xyz(node - vertex); if (dist_square < d_node_radius_square) { covered = true; } //knn-3 node = node_coords_array[knn.w]; dist_square = squared_norm_xyz(node - vertex); if (dist_square < d_node_radius_square) { covered = true; } //Write it to output unsigned label = 1; if (covered) { label = 0; } vertex_candidate_label[idx] = label; } __global__ void compactCandidateKernel( const DeviceArrayView<unsigned> candidate_validity_label, const unsigned* prefixsum_validity_label, const float4* vertex_array, float4* valid_candidate_vertex ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= candidate_validity_label.Size()) return; if (candidate_validity_label[idx] > 0) { const float4 vertex = vertex_array[idx]; valid_candidate_vertex[prefixsum_validity_label[idx] - 1] = make_float4(vertex.x, vertex.y, vertex.z, 1.0); } } } // device } // surfelwarp void surfelwarp::WarpFieldExtender::labelCollectUncoveredNodeCandidate( const DeviceArrayView<float4>& vertex_array, const DeviceArrayView<ushort4>& vertex_knn, const DeviceArrayView<float4>& node_coordinates, hipStream_t stream ) { m_candidate_validity_indicator.ResizeArrayOrException(vertex_array.Size()); dim3 blk(64); dim3 grid(divUp(vertex_array.Size(), blk.x)); hipLaunchKernelGGL(( device::labelVertexCandidateKernel), dim3(grid), dim3(blk), 0, stream, vertex_array, vertex_knn.RawPtr(), node_coordinates.RawPtr(), m_candidate_validity_indicator.Ptr() ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif //Do a prefix sum SURFELWARP_CHECK(vertex_array.Size() == m_candidate_validity_indicator.ArraySize()); m_validity_indicator_prefixsum.InclusiveSum(m_candidate_validity_indicator.ArrayView(), stream); //Do compaction hipLaunchKernelGGL(( device::compactCandidateKernel), dim3(grid), dim3(blk), 0, stream, m_candidate_validity_indicator.ArrayView(), m_validity_indicator_prefixsum.valid_prefixsum_array.ptr(), vertex_array.RawPtr(), m_candidate_vertex_array.DevicePtr() ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif } void surfelwarp::WarpFieldExtender::syncQueryUncoveredNodeCandidateSize( hipStream_t stream ) { //Check the size const auto& prefixsum_array = m_validity_indicator_prefixsum.valid_prefixsum_array; SURFELWARP_CHECK(prefixsum_array.size() == m_candidate_validity_indicator.ArraySize()); //The device ptr const unsigned* candidate_size_dev = prefixsum_array.ptr() + prefixsum_array.size() - 1; unsigned candidate_size; cudaSafeCall(hipMemcpyAsync( &candidate_size, candidate_size_dev, sizeof(unsigned), hipMemcpyDeviceToHost, stream )); //Sync and check the size cudaSafeCall(hipStreamSynchronize(stream)); m_candidate_vertex_array.ResizeArrayOrException(candidate_size); if(candidate_size != 0) m_candidate_vertex_array.SynchronizeToHost(stream, true); //Debug method //LOG(INFO) << "The number of node candidates is " << m_candidate_vertex_array.DeviceArraySize(); }
bc5ee1f69702173cd7c8a5a06dd01d353605f053.cu
#include "common/Constants.h" #include "core/geometry/WarpFieldExtender.h" #include <device_launch_parameters.h> namespace surfelwarp { namespace device { /* Kernel and method for choosing node candidate from init knn array (not field) */ __global__ void labelVertexCandidateKernel( const DeviceArrayView<float4> vertex_confid_array, const ushort4* vertex_knn_array, const float4* node_coords_array, unsigned* vertex_candidate_label ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= vertex_confid_array.Size()) return; //Obtain vertex and its knn const float4 vertex_confid = vertex_confid_array[idx]; const float4 vertex = make_float4(vertex_confid.x, vertex_confid.y, vertex_confid.z, 1.0); const ushort4 knn = vertex_knn_array[idx]; //Check its distance to node float4 node; float dist_square; bool covered = false; //knn-0 node = node_coords_array[knn.x]; dist_square = squared_norm_xyz(node - vertex); if (dist_square < d_node_radius_square) { covered = true; } //knn-1 node = node_coords_array[knn.y]; dist_square = squared_norm_xyz(node - vertex); if (dist_square < d_node_radius_square) { covered = true; } //knn-2 node = node_coords_array[knn.z]; dist_square = squared_norm_xyz(node - vertex); if (dist_square < d_node_radius_square) { covered = true; } //knn-3 node = node_coords_array[knn.w]; dist_square = squared_norm_xyz(node - vertex); if (dist_square < d_node_radius_square) { covered = true; } //Write it to output unsigned label = 1; if (covered) { label = 0; } vertex_candidate_label[idx] = label; } __global__ void compactCandidateKernel( const DeviceArrayView<unsigned> candidate_validity_label, const unsigned* prefixsum_validity_label, const float4* vertex_array, float4* valid_candidate_vertex ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= candidate_validity_label.Size()) return; if (candidate_validity_label[idx] > 0) { const float4 vertex = vertex_array[idx]; valid_candidate_vertex[prefixsum_validity_label[idx] - 1] = make_float4(vertex.x, vertex.y, vertex.z, 1.0); } } } // device } // surfelwarp void surfelwarp::WarpFieldExtender::labelCollectUncoveredNodeCandidate( const DeviceArrayView<float4>& vertex_array, const DeviceArrayView<ushort4>& vertex_knn, const DeviceArrayView<float4>& node_coordinates, cudaStream_t stream ) { m_candidate_validity_indicator.ResizeArrayOrException(vertex_array.Size()); dim3 blk(64); dim3 grid(divUp(vertex_array.Size(), blk.x)); device::labelVertexCandidateKernel<<<grid, blk, 0, stream>>>( vertex_array, vertex_knn.RawPtr(), node_coordinates.RawPtr(), m_candidate_validity_indicator.Ptr() ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif //Do a prefix sum SURFELWARP_CHECK(vertex_array.Size() == m_candidate_validity_indicator.ArraySize()); m_validity_indicator_prefixsum.InclusiveSum(m_candidate_validity_indicator.ArrayView(), stream); //Do compaction device::compactCandidateKernel<<<grid, blk, 0, stream>>>( m_candidate_validity_indicator.ArrayView(), m_validity_indicator_prefixsum.valid_prefixsum_array.ptr(), vertex_array.RawPtr(), m_candidate_vertex_array.DevicePtr() ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } void surfelwarp::WarpFieldExtender::syncQueryUncoveredNodeCandidateSize( cudaStream_t stream ) { //Check the size const auto& prefixsum_array = m_validity_indicator_prefixsum.valid_prefixsum_array; SURFELWARP_CHECK(prefixsum_array.size() == m_candidate_validity_indicator.ArraySize()); //The device ptr const unsigned* candidate_size_dev = prefixsum_array.ptr() + prefixsum_array.size() - 1; unsigned candidate_size; cudaSafeCall(cudaMemcpyAsync( &candidate_size, candidate_size_dev, sizeof(unsigned), cudaMemcpyDeviceToHost, stream )); //Sync and check the size cudaSafeCall(cudaStreamSynchronize(stream)); m_candidate_vertex_array.ResizeArrayOrException(candidate_size); if(candidate_size != 0) m_candidate_vertex_array.SynchronizeToHost(stream, true); //Debug method //LOG(INFO) << "The number of node candidates is " << m_candidate_vertex_array.DeviceArraySize(); }
a3b3dfa94de986ae9cd25449f2fceeb0a0e7061f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define Width 32 // size of Width x Width matrix #define TILE_WIDTH 16 __global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //Pvalue is used to store the element of the output matrix // that is computed by the thread float Pvalue = 0; for (int k = 0; k < ncols; ++k){ float Melement = Md[row*ncols+k]; float Nelement = Nd[k*ncols+col]; Pvalue += Melement * Nelement; } Pd[row*ncols+col] = Pvalue; } int main (int argc, char *argv[]){ int i,j; int size = Width * Width * sizeof(float); float M[Width][Width], N[Width][Width], P[Width][Width]; float* Md, *Nd, *Pd; for (i = 0; i < Width; i++){ for (j = 0; j < Width; j++){ M[i][j] = 1; N[i][j] = 2; } } hipMalloc( (void**)&Md, size); hipMalloc( (void**)&Nd, size); hipMalloc( (void**)&Pd, size); hipMemcpy( Md, M, size, hipMemcpyHostToDevice); hipMemcpy( Nd, N, size, hipMemcpyHostToDevice); //Setup the execution configuration dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(Width/TILE_WIDTH, Width/TILE_WIDTH); //Launch the device computation threads! hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, Width); //Read P from the device hipMemcpy( P, Pd, size, hipMemcpyDeviceToHost); //Free device matrices hipFree( Md); hipFree( Nd); hipFree( Pd); for (i = 0; i < Width; i++){ for (j = 0; j < Width; j++){ printf("%.2f ",P[i][j]); } printf("\n"); } }
a3b3dfa94de986ae9cd25449f2fceeb0a0e7061f.cu
#include <stdio.h> #define Width 32 // size of Width x Width matrix #define TILE_WIDTH 16 __global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //Pvalue is used to store the element of the output matrix // that is computed by the thread float Pvalue = 0; for (int k = 0; k < ncols; ++k){ float Melement = Md[row*ncols+k]; float Nelement = Nd[k*ncols+col]; Pvalue += Melement * Nelement; } Pd[row*ncols+col] = Pvalue; } int main (int argc, char *argv[]){ int i,j; int size = Width * Width * sizeof(float); float M[Width][Width], N[Width][Width], P[Width][Width]; float* Md, *Nd, *Pd; for (i = 0; i < Width; i++){ for (j = 0; j < Width; j++){ M[i][j] = 1; N[i][j] = 2; } } cudaMalloc( (void**)&Md, size); cudaMalloc( (void**)&Nd, size); cudaMalloc( (void**)&Pd, size); cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice); cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice); //Setup the execution configuration dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(Width/TILE_WIDTH, Width/TILE_WIDTH); //Launch the device computation threads! MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, Width); //Read P from the device cudaMemcpy( P, Pd, size, cudaMemcpyDeviceToHost); //Free device matrices cudaFree( Md); cudaFree( Nd); cudaFree( Pd); for (i = 0; i < Width; i++){ for (j = 0; j < Width; j++){ printf("%.2f ",P[i][j]); } printf("\n"); } }
f6abb06e8f1fb5c633bd5a38c46a0bcab65b9096.hip
// !!! This is a file automatically generated by hipify!!! /* // This source file contains the Cuda Code for Amplification of a source Image. // It is a part of Cuda Image Processing Library . // Copyright (C) 2011 Remaldeep Singh // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "hip/hip_runtime.h" #include "../API/api.h" #include "assert.h" #include "stdio.h" __global__ void amplify( unsigned char *in, float ampValue, int size) { int x = threadIdx.x + __mul24(blockIdx.x,blockDim.x); int y = threadIdx.y + __mul24(blockIdx.y,blockDim.y); int offset = x + y * __mul24(blockDim.x, gridDim.x); float temp; if( offset < size ) { temp = (float)in[offset]; in[offset] = ((temp * (float)ampValue) > 255) ? 255 : (unsigned char)(temp * (float)ampValue); } } gpu_error_t gpu_amplify(gpu_context_t *ctx, float ampValue) { assert(ampValue); assert(ctx); float elapsedtime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); gpu_error_t error = GPU_OK; int threadsX = ctx->threadsX; int threadsY = ctx->threadsY; int temp1 = ((ctx->width % threadsX) != 0 ? (ctx->width / threadsX) + 1 : ctx->width / threadsX ); int temp2 = ((ctx->height % threadsY) != 0 ? (ctx->height / threadsY) + 1 : ctx->height / threadsY ); /////////////////////////////// Amplify call //////////////////////////////////// dim3 threads(threadsX,threadsY); dim3 blocks(temp1,temp2); hipLaunchKernelGGL(( amplify), dim3(blocks),dim3(threads), 0, 0, ctx->gpu_buffer_1, ampValue, (ctx->width * ctx->height)); ////////////////////////////////////////////////////////////////////////////////// if(hipSuccess != hipMemcpy( ctx->output_buffer_1, ctx->gpu_buffer_1, ctx->width * ctx->height, hipMemcpyDeviceToHost)) error = GPU_ERR_MEM; hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedtime,start,stop); hipEventDestroy(start); hipEventDestroy(stop); //FILE *file; //file = fopen("../timing.txt","a+"); fprintf(stderr,"Amplify:%lf \n",elapsedtime); //fclose(file); return error; }
f6abb06e8f1fb5c633bd5a38c46a0bcab65b9096.cu
/* // This source file contains the Cuda Code for Amplification of a source Image. // It is a part of Cuda Image Processing Library . // Copyright (C) 2011 Remaldeep Singh // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "cuda.h" #include "../API/api.h" #include "assert.h" #include "stdio.h" __global__ void amplify( unsigned char *in, float ampValue, int size) { int x = threadIdx.x + __mul24(blockIdx.x,blockDim.x); int y = threadIdx.y + __mul24(blockIdx.y,blockDim.y); int offset = x + y * __mul24(blockDim.x, gridDim.x); float temp; if( offset < size ) { temp = (float)in[offset]; in[offset] = ((temp * (float)ampValue) > 255) ? 255 : (unsigned char)(temp * (float)ampValue); } } gpu_error_t gpu_amplify(gpu_context_t *ctx, float ampValue) { assert(ampValue); assert(ctx); float elapsedtime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); gpu_error_t error = GPU_OK; int threadsX = ctx->threadsX; int threadsY = ctx->threadsY; int temp1 = ((ctx->width % threadsX) != 0 ? (ctx->width / threadsX) + 1 : ctx->width / threadsX ); int temp2 = ((ctx->height % threadsY) != 0 ? (ctx->height / threadsY) + 1 : ctx->height / threadsY ); /////////////////////////////// Amplify call //////////////////////////////////// dim3 threads(threadsX,threadsY); dim3 blocks(temp1,temp2); amplify<<<blocks,threads>>>( ctx->gpu_buffer_1, ampValue, (ctx->width * ctx->height)); ////////////////////////////////////////////////////////////////////////////////// if(cudaSuccess != cudaMemcpy( ctx->output_buffer_1, ctx->gpu_buffer_1, ctx->width * ctx->height, cudaMemcpyDeviceToHost)) error = GPU_ERR_MEM; cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedtime,start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); //FILE *file; //file = fopen("../timing.txt","a+"); fprintf(stderr,"Amplify:%lf \n",elapsedtime); //fclose(file); return error; }
bd71d63d7e3445bc20abe06f2cefdea52d2c254f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/eltwiseself_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Dtype> void EltwiseSelfLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); switch (op_) { case EltwiseSelfParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseSelfParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseSelfParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Dtype> void EltwiseSelfLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseSelfParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseSelfParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { caffe_copy(count, top_diff, bottom_diff); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseSelfParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, i, mask, bottom_diff); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseSelfLayer); } // namespace caffe
bd71d63d7e3445bc20abe06f2cefdea52d2c254f.cu
#include <cfloat> #include <vector> #include "caffe/layers/eltwiseself_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Dtype> void EltwiseSelfLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); switch (op_) { case EltwiseSelfParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseSelfParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseSelfParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Dtype> void EltwiseSelfLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseSelfParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseSelfParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { caffe_copy(count, top_diff, bottom_diff); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseSelfParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, i, mask, bottom_diff); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseSelfLayer); } // namespace caffe
c0df2d41d55539b87307af7c39c362468720c097.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/cudaarithm.hpp" #include "opencv2/cudev.hpp" #include "opencv2/core/private.cuda.hpp" using namespace cv; using namespace cv::cuda; using namespace cv::cudev; namespace { template <typename T1, typename T2, typename D, typename S> struct AddWeightedOp : binary_function<T1, T2, D> { S alpha; S beta; S gamma; __device__ __forceinline__ D operator ()(T1 a, T2 b) const { return cudev::saturate_cast<D>(a * alpha + b * beta + gamma); } }; template <typename ScalarDepth> struct TransformPolicy : DefaultTransformPolicy { }; template <> struct TransformPolicy<double> : DefaultTransformPolicy { enum { shift = 1 }; }; template <typename T1, typename T2, typename D> void addWeightedImpl(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, Stream& stream) { typedef typename LargerType<T1, T2>::type larger_type1; typedef typename LargerType<larger_type1, D>::type larger_type2; typedef typename LargerType<larger_type2, float>::type scalar_type; AddWeightedOp<T1, T2, D, scalar_type> op; op.alpha = static_cast<scalar_type>(alpha); op.beta = static_cast<scalar_type>(beta); op.gamma = static_cast<scalar_type>(gamma); gridTransformBinary_< TransformPolicy<scalar_type> >(globPtr<T1>(src1), globPtr<T2>(src2), globPtr<D>(dst), op, stream); } } void cv::cuda::addWeighted(InputArray _src1, double alpha, InputArray _src2, double beta, double gamma, OutputArray _dst, int ddepth, Stream& stream) { typedef void (*func_t)(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, Stream& stream); static const func_t funcs[7][7][7] = { { { addWeightedImpl<uchar, uchar, uchar >, addWeightedImpl<uchar, uchar, schar >, addWeightedImpl<uchar, uchar, ushort>, addWeightedImpl<uchar, uchar, short >, addWeightedImpl<uchar, uchar, int >, addWeightedImpl<uchar, uchar, float >, addWeightedImpl<uchar, uchar, double> }, { addWeightedImpl<uchar, schar, uchar >, addWeightedImpl<uchar, schar, schar >, addWeightedImpl<uchar, schar, ushort>, addWeightedImpl<uchar, schar, short >, addWeightedImpl<uchar, schar, int >, addWeightedImpl<uchar, schar, float >, addWeightedImpl<uchar, schar, double> }, { addWeightedImpl<uchar, ushort, uchar >, addWeightedImpl<uchar, ushort, schar >, addWeightedImpl<uchar, ushort, ushort>, addWeightedImpl<uchar, ushort, short >, addWeightedImpl<uchar, ushort, int >, addWeightedImpl<uchar, ushort, float >, addWeightedImpl<uchar, ushort, double> }, { addWeightedImpl<uchar, short, uchar >, addWeightedImpl<uchar, short, schar >, addWeightedImpl<uchar, short, ushort>, addWeightedImpl<uchar, short, short >, addWeightedImpl<uchar, short, int >, addWeightedImpl<uchar, short, float >, addWeightedImpl<uchar, short, double> }, { addWeightedImpl<uchar, int, uchar >, addWeightedImpl<uchar, int, schar >, addWeightedImpl<uchar, int, ushort>, addWeightedImpl<uchar, int, short >, addWeightedImpl<uchar, int, int >, addWeightedImpl<uchar, int, float >, addWeightedImpl<uchar, int, double> }, { addWeightedImpl<uchar, float, uchar >, addWeightedImpl<uchar, float, schar >, addWeightedImpl<uchar, float, ushort>, addWeightedImpl<uchar, float, short >, addWeightedImpl<uchar, float, int >, addWeightedImpl<uchar, float, float >, addWeightedImpl<uchar, float, double> }, { addWeightedImpl<uchar, double, uchar >, addWeightedImpl<uchar, double, schar >, addWeightedImpl<uchar, double, ushort>, addWeightedImpl<uchar, double, short >, addWeightedImpl<uchar, double, int >, addWeightedImpl<uchar, double, float >, addWeightedImpl<uchar, double, double> } }, { { 0/*addWeightedImpl<schar, uchar, uchar >*/, 0/*addWeightedImpl<schar, uchar, schar >*/, 0/*addWeightedImpl<schar, uchar, ushort>*/, 0/*addWeightedImpl<schar, uchar, short >*/, 0/*addWeightedImpl<schar, uchar, int >*/, 0/*addWeightedImpl<schar, uchar, float >*/, 0/*addWeightedImpl<schar, uchar, double>*/ }, { addWeightedImpl<schar, schar, uchar >, addWeightedImpl<schar, schar, schar >, addWeightedImpl<schar, schar, ushort>, addWeightedImpl<schar, schar, short >, addWeightedImpl<schar, schar, int >, addWeightedImpl<schar, schar, float >, addWeightedImpl<schar, schar, double> }, { addWeightedImpl<schar, ushort, uchar >, addWeightedImpl<schar, ushort, schar >, addWeightedImpl<schar, ushort, ushort>, addWeightedImpl<schar, ushort, short >, addWeightedImpl<schar, ushort, int >, addWeightedImpl<schar, ushort, float >, addWeightedImpl<schar, ushort, double> }, { addWeightedImpl<schar, short, uchar >, addWeightedImpl<schar, short, schar >, addWeightedImpl<schar, short, ushort>, addWeightedImpl<schar, short, short >, addWeightedImpl<schar, short, int >, addWeightedImpl<schar, short, float >, addWeightedImpl<schar, short, double> }, { addWeightedImpl<schar, int, uchar >, addWeightedImpl<schar, int, schar >, addWeightedImpl<schar, int, ushort>, addWeightedImpl<schar, int, short >, addWeightedImpl<schar, int, int >, addWeightedImpl<schar, int, float >, addWeightedImpl<schar, int, double> }, { addWeightedImpl<schar, float, uchar >, addWeightedImpl<schar, float, schar >, addWeightedImpl<schar, float, ushort>, addWeightedImpl<schar, float, short >, addWeightedImpl<schar, float, int >, addWeightedImpl<schar, float, float >, addWeightedImpl<schar, float, double> }, { addWeightedImpl<schar, double, uchar >, addWeightedImpl<schar, double, schar >, addWeightedImpl<schar, double, ushort>, addWeightedImpl<schar, double, short >, addWeightedImpl<schar, double, int >, addWeightedImpl<schar, double, float >, addWeightedImpl<schar, double, double> } }, { { 0/*addWeightedImpl<ushort, uchar, uchar >*/, 0/*addWeightedImpl<ushort, uchar, schar >*/, 0/*addWeightedImpl<ushort, uchar, ushort>*/, 0/*addWeightedImpl<ushort, uchar, short >*/, 0/*addWeightedImpl<ushort, uchar, int >*/, 0/*addWeightedImpl<ushort, uchar, float >*/, 0/*addWeightedImpl<ushort, uchar, double>*/ }, { 0/*addWeightedImpl<ushort, schar, uchar >*/, 0/*addWeightedImpl<ushort, schar, schar >*/, 0/*addWeightedImpl<ushort, schar, ushort>*/, 0/*addWeightedImpl<ushort, schar, short >*/, 0/*addWeightedImpl<ushort, schar, int >*/, 0/*addWeightedImpl<ushort, schar, float >*/, 0/*addWeightedImpl<ushort, schar, double>*/ }, { addWeightedImpl<ushort, ushort, uchar >, addWeightedImpl<ushort, ushort, schar >, addWeightedImpl<ushort, ushort, ushort>, addWeightedImpl<ushort, ushort, short >, addWeightedImpl<ushort, ushort, int >, addWeightedImpl<ushort, ushort, float >, addWeightedImpl<ushort, ushort, double> }, { addWeightedImpl<ushort, short, uchar >, addWeightedImpl<ushort, short, schar >, addWeightedImpl<ushort, short, ushort>, addWeightedImpl<ushort, short, short >, addWeightedImpl<ushort, short, int >, addWeightedImpl<ushort, short, float >, addWeightedImpl<ushort, short, double> }, { addWeightedImpl<ushort, int, uchar >, addWeightedImpl<ushort, int, schar >, addWeightedImpl<ushort, int, ushort>, addWeightedImpl<ushort, int, short >, addWeightedImpl<ushort, int, int >, addWeightedImpl<ushort, int, float >, addWeightedImpl<ushort, int, double> }, { addWeightedImpl<ushort, float, uchar >, addWeightedImpl<ushort, float, schar >, addWeightedImpl<ushort, float, ushort>, addWeightedImpl<ushort, float, short >, addWeightedImpl<ushort, float, int >, addWeightedImpl<ushort, float, float >, addWeightedImpl<ushort, float, double> }, { addWeightedImpl<ushort, double, uchar >, addWeightedImpl<ushort, double, schar >, addWeightedImpl<ushort, double, ushort>, addWeightedImpl<ushort, double, short >, addWeightedImpl<ushort, double, int >, addWeightedImpl<ushort, double, float >, addWeightedImpl<ushort, double, double> } }, { { 0/*addWeightedImpl<short, uchar, uchar >*/, 0/*addWeightedImpl<short, uchar, schar >*/, 0/*addWeightedImpl<short, uchar, ushort>*/, 0/*addWeightedImpl<short, uchar, short >*/, 0/*addWeightedImpl<short, uchar, int >*/, 0/*addWeightedImpl<short, uchar, float >*/, 0/*addWeightedImpl<short, uchar, double>*/ }, { 0/*addWeightedImpl<short, schar, uchar >*/, 0/*addWeightedImpl<short, schar, schar >*/, 0/*addWeightedImpl<short, schar, ushort>*/, 0/*addWeightedImpl<short, schar, short >*/, 0/*addWeightedImpl<short, schar, int >*/, 0/*addWeightedImpl<short, schar, float >*/, 0/*addWeightedImpl<short, schar, double>*/ }, { 0/*addWeightedImpl<short, ushort, uchar >*/, 0/*addWeightedImpl<short, ushort, schar >*/, 0/*addWeightedImpl<short, ushort, ushort>*/, 0/*addWeightedImpl<short, ushort, short >*/, 0/*addWeightedImpl<short, ushort, int >*/, 0/*addWeightedImpl<short, ushort, float >*/, 0/*addWeightedImpl<short, ushort, double>*/ }, { addWeightedImpl<short, short, uchar >, addWeightedImpl<short, short, schar >, addWeightedImpl<short, short, ushort>, addWeightedImpl<short, short, short >, addWeightedImpl<short, short, int >, addWeightedImpl<short, short, float >, addWeightedImpl<short, short, double> }, { addWeightedImpl<short, int, uchar >, addWeightedImpl<short, int, schar >, addWeightedImpl<short, int, ushort>, addWeightedImpl<short, int, short >, addWeightedImpl<short, int, int >, addWeightedImpl<short, int, float >, addWeightedImpl<short, int, double> }, { addWeightedImpl<short, float, uchar >, addWeightedImpl<short, float, schar >, addWeightedImpl<short, float, ushort>, addWeightedImpl<short, float, short >, addWeightedImpl<short, float, int >, addWeightedImpl<short, float, float >, addWeightedImpl<short, float, double> }, { addWeightedImpl<short, double, uchar >, addWeightedImpl<short, double, schar >, addWeightedImpl<short, double, ushort>, addWeightedImpl<short, double, short >, addWeightedImpl<short, double, int >, addWeightedImpl<short, double, float >, addWeightedImpl<short, double, double> } }, { { 0/*addWeightedImpl<int, uchar, uchar >*/, 0/*addWeightedImpl<int, uchar, schar >*/, 0/*addWeightedImpl<int, uchar, ushort>*/, 0/*addWeightedImpl<int, uchar, short >*/, 0/*addWeightedImpl<int, uchar, int >*/, 0/*addWeightedImpl<int, uchar, float >*/, 0/*addWeightedImpl<int, uchar, double>*/ }, { 0/*addWeightedImpl<int, schar, uchar >*/, 0/*addWeightedImpl<int, schar, schar >*/, 0/*addWeightedImpl<int, schar, ushort>*/, 0/*addWeightedImpl<int, schar, short >*/, 0/*addWeightedImpl<int, schar, int >*/, 0/*addWeightedImpl<int, schar, float >*/, 0/*addWeightedImpl<int, schar, double>*/ }, { 0/*addWeightedImpl<int, ushort, uchar >*/, 0/*addWeightedImpl<int, ushort, schar >*/, 0/*addWeightedImpl<int, ushort, ushort>*/, 0/*addWeightedImpl<int, ushort, short >*/, 0/*addWeightedImpl<int, ushort, int >*/, 0/*addWeightedImpl<int, ushort, float >*/, 0/*addWeightedImpl<int, ushort, double>*/ }, { 0/*addWeightedImpl<int, short, uchar >*/, 0/*addWeightedImpl<int, short, schar >*/, 0/*addWeightedImpl<int, short, ushort>*/, 0/*addWeightedImpl<int, short, short >*/, 0/*addWeightedImpl<int, short, int >*/, 0/*addWeightedImpl<int, short, float >*/, 0/*addWeightedImpl<int, short, double>*/ }, { addWeightedImpl<int, int, uchar >, addWeightedImpl<int, int, schar >, addWeightedImpl<int, int, ushort>, addWeightedImpl<int, int, short >, addWeightedImpl<int, int, int >, addWeightedImpl<int, int, float >, addWeightedImpl<int, int, double> }, { addWeightedImpl<int, float, uchar >, addWeightedImpl<int, float, schar >, addWeightedImpl<int, float, ushort>, addWeightedImpl<int, float, short >, addWeightedImpl<int, float, int >, addWeightedImpl<int, float, float >, addWeightedImpl<int, float, double> }, { addWeightedImpl<int, double, uchar >, addWeightedImpl<int, double, schar >, addWeightedImpl<int, double, ushort>, addWeightedImpl<int, double, short >, addWeightedImpl<int, double, int >, addWeightedImpl<int, double, float >, addWeightedImpl<int, double, double> } }, { { 0/*addWeightedImpl<float, uchar, uchar >*/, 0/*addWeightedImpl<float, uchar, schar >*/, 0/*addWeightedImpl<float, uchar, ushort>*/, 0/*addWeightedImpl<float, uchar, short >*/, 0/*addWeightedImpl<float, uchar, int >*/, 0/*addWeightedImpl<float, uchar, float >*/, 0/*addWeightedImpl<float, uchar, double>*/ }, { 0/*addWeightedImpl<float, schar, uchar >*/, 0/*addWeightedImpl<float, schar, schar >*/, 0/*addWeightedImpl<float, schar, ushort>*/, 0/*addWeightedImpl<float, schar, short >*/, 0/*addWeightedImpl<float, schar, int >*/, 0/*addWeightedImpl<float, schar, float >*/, 0/*addWeightedImpl<float, schar, double>*/ }, { 0/*addWeightedImpl<float, ushort, uchar >*/, 0/*addWeightedImpl<float, ushort, schar >*/, 0/*addWeightedImpl<float, ushort, ushort>*/, 0/*addWeightedImpl<float, ushort, short >*/, 0/*addWeightedImpl<float, ushort, int >*/, 0/*addWeightedImpl<float, ushort, float >*/, 0/*addWeightedImpl<float, ushort, double>*/ }, { 0/*addWeightedImpl<float, short, uchar >*/, 0/*addWeightedImpl<float, short, schar >*/, 0/*addWeightedImpl<float, short, ushort>*/, 0/*addWeightedImpl<float, short, short >*/, 0/*addWeightedImpl<float, short, int >*/, 0/*addWeightedImpl<float, short, float >*/, 0/*addWeightedImpl<float, short, double>*/ }, { 0/*addWeightedImpl<float, int, uchar >*/, 0/*addWeightedImpl<float, int, schar >*/, 0/*addWeightedImpl<float, int, ushort>*/, 0/*addWeightedImpl<float, int, short >*/, 0/*addWeightedImpl<float, int, int >*/, 0/*addWeightedImpl<float, int, float >*/, 0/*addWeightedImpl<float, int, double>*/ }, { addWeightedImpl<float, float, uchar >, addWeightedImpl<float, float, schar >, addWeightedImpl<float, float, ushort>, addWeightedImpl<float, float, short >, addWeightedImpl<float, float, int >, addWeightedImpl<float, float, float >, addWeightedImpl<float, float, double> }, { addWeightedImpl<float, double, uchar >, addWeightedImpl<float, double, schar >, addWeightedImpl<float, double, ushort>, addWeightedImpl<float, double, short >, addWeightedImpl<float, double, int >, addWeightedImpl<float, double, float >, addWeightedImpl<float, double, double> } }, { { 0/*addWeightedImpl<double, uchar, uchar >*/, 0/*addWeightedImpl<double, uchar, schar >*/, 0/*addWeightedImpl<double, uchar, ushort>*/, 0/*addWeightedImpl<double, uchar, short >*/, 0/*addWeightedImpl<double, uchar, int >*/, 0/*addWeightedImpl<double, uchar, float >*/, 0/*addWeightedImpl<double, uchar, double>*/ }, { 0/*addWeightedImpl<double, schar, uchar >*/, 0/*addWeightedImpl<double, schar, schar >*/, 0/*addWeightedImpl<double, schar, ushort>*/, 0/*addWeightedImpl<double, schar, short >*/, 0/*addWeightedImpl<double, schar, int >*/, 0/*addWeightedImpl<double, schar, float >*/, 0/*addWeightedImpl<double, schar, double>*/ }, { 0/*addWeightedImpl<double, ushort, uchar >*/, 0/*addWeightedImpl<double, ushort, schar >*/, 0/*addWeightedImpl<double, ushort, ushort>*/, 0/*addWeightedImpl<double, ushort, short >*/, 0/*addWeightedImpl<double, ushort, int >*/, 0/*addWeightedImpl<double, ushort, float >*/, 0/*addWeightedImpl<double, ushort, double>*/ }, { 0/*addWeightedImpl<double, short, uchar >*/, 0/*addWeightedImpl<double, short, schar >*/, 0/*addWeightedImpl<double, short, ushort>*/, 0/*addWeightedImpl<double, short, short >*/, 0/*addWeightedImpl<double, short, int >*/, 0/*addWeightedImpl<double, short, float >*/, 0/*addWeightedImpl<double, short, double>*/ }, { 0/*addWeightedImpl<double, int, uchar >*/, 0/*addWeightedImpl<double, int, schar >*/, 0/*addWeightedImpl<double, int, ushort>*/, 0/*addWeightedImpl<double, int, short >*/, 0/*addWeightedImpl<double, int, int >*/, 0/*addWeightedImpl<double, int, float >*/, 0/*addWeightedImpl<double, int, double>*/ }, { 0/*addWeightedImpl<double, float, uchar >*/, 0/*addWeightedImpl<double, float, schar >*/, 0/*addWeightedImpl<double, float, ushort>*/, 0/*addWeightedImpl<double, float, short >*/, 0/*addWeightedImpl<double, float, int >*/, 0/*addWeightedImpl<double, float, float >*/, 0/*addWeightedImpl<double, float, double>*/ }, { addWeightedImpl<double, double, uchar >, addWeightedImpl<double, double, schar >, addWeightedImpl<double, double, ushort>, addWeightedImpl<double, double, short >, addWeightedImpl<double, double, int >, addWeightedImpl<double, double, float >, addWeightedImpl<double, double, double> } } }; GpuMat src1 = getInputMat(_src1, stream); GpuMat src2 = getInputMat(_src2, stream); int sdepth1 = src1.depth(); int sdepth2 = src2.depth(); ddepth = ddepth >= 0 ? CV_MAT_DEPTH(ddepth) : ::max(sdepth1, sdepth2); const int cn = src1.channels(); CV_Assert( src2.size() == src1.size() && src2.channels() == cn ); CV_Assert( sdepth1 <= CV_64F && sdepth2 <= CV_64F && ddepth <= CV_64F ); GpuMat dst = getOutputMat(_dst, src1.size(), CV_MAKE_TYPE(ddepth, cn), stream); GpuMat src1_single = src1.reshape(1); GpuMat src2_single = src2.reshape(1); GpuMat dst_single = dst.reshape(1); if (sdepth1 > sdepth2) { src1_single.swap(src2_single); std::swap(alpha, beta); std::swap(sdepth1, sdepth2); } const func_t func = funcs[sdepth1][sdepth2][ddepth]; if (!func) CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types"); func(src1_single, alpha, src2_single, beta, gamma, dst_single, stream); syncOutput(dst, _dst, stream); } #endif
c0df2d41d55539b87307af7c39c362468720c097.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/cudaarithm.hpp" #include "opencv2/cudev.hpp" #include "opencv2/core/private.cuda.hpp" using namespace cv; using namespace cv::cuda; using namespace cv::cudev; namespace { template <typename T1, typename T2, typename D, typename S> struct AddWeightedOp : binary_function<T1, T2, D> { S alpha; S beta; S gamma; __device__ __forceinline__ D operator ()(T1 a, T2 b) const { return cudev::saturate_cast<D>(a * alpha + b * beta + gamma); } }; template <typename ScalarDepth> struct TransformPolicy : DefaultTransformPolicy { }; template <> struct TransformPolicy<double> : DefaultTransformPolicy { enum { shift = 1 }; }; template <typename T1, typename T2, typename D> void addWeightedImpl(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, Stream& stream) { typedef typename LargerType<T1, T2>::type larger_type1; typedef typename LargerType<larger_type1, D>::type larger_type2; typedef typename LargerType<larger_type2, float>::type scalar_type; AddWeightedOp<T1, T2, D, scalar_type> op; op.alpha = static_cast<scalar_type>(alpha); op.beta = static_cast<scalar_type>(beta); op.gamma = static_cast<scalar_type>(gamma); gridTransformBinary_< TransformPolicy<scalar_type> >(globPtr<T1>(src1), globPtr<T2>(src2), globPtr<D>(dst), op, stream); } } void cv::cuda::addWeighted(InputArray _src1, double alpha, InputArray _src2, double beta, double gamma, OutputArray _dst, int ddepth, Stream& stream) { typedef void (*func_t)(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, Stream& stream); static const func_t funcs[7][7][7] = { { { addWeightedImpl<uchar, uchar, uchar >, addWeightedImpl<uchar, uchar, schar >, addWeightedImpl<uchar, uchar, ushort>, addWeightedImpl<uchar, uchar, short >, addWeightedImpl<uchar, uchar, int >, addWeightedImpl<uchar, uchar, float >, addWeightedImpl<uchar, uchar, double> }, { addWeightedImpl<uchar, schar, uchar >, addWeightedImpl<uchar, schar, schar >, addWeightedImpl<uchar, schar, ushort>, addWeightedImpl<uchar, schar, short >, addWeightedImpl<uchar, schar, int >, addWeightedImpl<uchar, schar, float >, addWeightedImpl<uchar, schar, double> }, { addWeightedImpl<uchar, ushort, uchar >, addWeightedImpl<uchar, ushort, schar >, addWeightedImpl<uchar, ushort, ushort>, addWeightedImpl<uchar, ushort, short >, addWeightedImpl<uchar, ushort, int >, addWeightedImpl<uchar, ushort, float >, addWeightedImpl<uchar, ushort, double> }, { addWeightedImpl<uchar, short, uchar >, addWeightedImpl<uchar, short, schar >, addWeightedImpl<uchar, short, ushort>, addWeightedImpl<uchar, short, short >, addWeightedImpl<uchar, short, int >, addWeightedImpl<uchar, short, float >, addWeightedImpl<uchar, short, double> }, { addWeightedImpl<uchar, int, uchar >, addWeightedImpl<uchar, int, schar >, addWeightedImpl<uchar, int, ushort>, addWeightedImpl<uchar, int, short >, addWeightedImpl<uchar, int, int >, addWeightedImpl<uchar, int, float >, addWeightedImpl<uchar, int, double> }, { addWeightedImpl<uchar, float, uchar >, addWeightedImpl<uchar, float, schar >, addWeightedImpl<uchar, float, ushort>, addWeightedImpl<uchar, float, short >, addWeightedImpl<uchar, float, int >, addWeightedImpl<uchar, float, float >, addWeightedImpl<uchar, float, double> }, { addWeightedImpl<uchar, double, uchar >, addWeightedImpl<uchar, double, schar >, addWeightedImpl<uchar, double, ushort>, addWeightedImpl<uchar, double, short >, addWeightedImpl<uchar, double, int >, addWeightedImpl<uchar, double, float >, addWeightedImpl<uchar, double, double> } }, { { 0/*addWeightedImpl<schar, uchar, uchar >*/, 0/*addWeightedImpl<schar, uchar, schar >*/, 0/*addWeightedImpl<schar, uchar, ushort>*/, 0/*addWeightedImpl<schar, uchar, short >*/, 0/*addWeightedImpl<schar, uchar, int >*/, 0/*addWeightedImpl<schar, uchar, float >*/, 0/*addWeightedImpl<schar, uchar, double>*/ }, { addWeightedImpl<schar, schar, uchar >, addWeightedImpl<schar, schar, schar >, addWeightedImpl<schar, schar, ushort>, addWeightedImpl<schar, schar, short >, addWeightedImpl<schar, schar, int >, addWeightedImpl<schar, schar, float >, addWeightedImpl<schar, schar, double> }, { addWeightedImpl<schar, ushort, uchar >, addWeightedImpl<schar, ushort, schar >, addWeightedImpl<schar, ushort, ushort>, addWeightedImpl<schar, ushort, short >, addWeightedImpl<schar, ushort, int >, addWeightedImpl<schar, ushort, float >, addWeightedImpl<schar, ushort, double> }, { addWeightedImpl<schar, short, uchar >, addWeightedImpl<schar, short, schar >, addWeightedImpl<schar, short, ushort>, addWeightedImpl<schar, short, short >, addWeightedImpl<schar, short, int >, addWeightedImpl<schar, short, float >, addWeightedImpl<schar, short, double> }, { addWeightedImpl<schar, int, uchar >, addWeightedImpl<schar, int, schar >, addWeightedImpl<schar, int, ushort>, addWeightedImpl<schar, int, short >, addWeightedImpl<schar, int, int >, addWeightedImpl<schar, int, float >, addWeightedImpl<schar, int, double> }, { addWeightedImpl<schar, float, uchar >, addWeightedImpl<schar, float, schar >, addWeightedImpl<schar, float, ushort>, addWeightedImpl<schar, float, short >, addWeightedImpl<schar, float, int >, addWeightedImpl<schar, float, float >, addWeightedImpl<schar, float, double> }, { addWeightedImpl<schar, double, uchar >, addWeightedImpl<schar, double, schar >, addWeightedImpl<schar, double, ushort>, addWeightedImpl<schar, double, short >, addWeightedImpl<schar, double, int >, addWeightedImpl<schar, double, float >, addWeightedImpl<schar, double, double> } }, { { 0/*addWeightedImpl<ushort, uchar, uchar >*/, 0/*addWeightedImpl<ushort, uchar, schar >*/, 0/*addWeightedImpl<ushort, uchar, ushort>*/, 0/*addWeightedImpl<ushort, uchar, short >*/, 0/*addWeightedImpl<ushort, uchar, int >*/, 0/*addWeightedImpl<ushort, uchar, float >*/, 0/*addWeightedImpl<ushort, uchar, double>*/ }, { 0/*addWeightedImpl<ushort, schar, uchar >*/, 0/*addWeightedImpl<ushort, schar, schar >*/, 0/*addWeightedImpl<ushort, schar, ushort>*/, 0/*addWeightedImpl<ushort, schar, short >*/, 0/*addWeightedImpl<ushort, schar, int >*/, 0/*addWeightedImpl<ushort, schar, float >*/, 0/*addWeightedImpl<ushort, schar, double>*/ }, { addWeightedImpl<ushort, ushort, uchar >, addWeightedImpl<ushort, ushort, schar >, addWeightedImpl<ushort, ushort, ushort>, addWeightedImpl<ushort, ushort, short >, addWeightedImpl<ushort, ushort, int >, addWeightedImpl<ushort, ushort, float >, addWeightedImpl<ushort, ushort, double> }, { addWeightedImpl<ushort, short, uchar >, addWeightedImpl<ushort, short, schar >, addWeightedImpl<ushort, short, ushort>, addWeightedImpl<ushort, short, short >, addWeightedImpl<ushort, short, int >, addWeightedImpl<ushort, short, float >, addWeightedImpl<ushort, short, double> }, { addWeightedImpl<ushort, int, uchar >, addWeightedImpl<ushort, int, schar >, addWeightedImpl<ushort, int, ushort>, addWeightedImpl<ushort, int, short >, addWeightedImpl<ushort, int, int >, addWeightedImpl<ushort, int, float >, addWeightedImpl<ushort, int, double> }, { addWeightedImpl<ushort, float, uchar >, addWeightedImpl<ushort, float, schar >, addWeightedImpl<ushort, float, ushort>, addWeightedImpl<ushort, float, short >, addWeightedImpl<ushort, float, int >, addWeightedImpl<ushort, float, float >, addWeightedImpl<ushort, float, double> }, { addWeightedImpl<ushort, double, uchar >, addWeightedImpl<ushort, double, schar >, addWeightedImpl<ushort, double, ushort>, addWeightedImpl<ushort, double, short >, addWeightedImpl<ushort, double, int >, addWeightedImpl<ushort, double, float >, addWeightedImpl<ushort, double, double> } }, { { 0/*addWeightedImpl<short, uchar, uchar >*/, 0/*addWeightedImpl<short, uchar, schar >*/, 0/*addWeightedImpl<short, uchar, ushort>*/, 0/*addWeightedImpl<short, uchar, short >*/, 0/*addWeightedImpl<short, uchar, int >*/, 0/*addWeightedImpl<short, uchar, float >*/, 0/*addWeightedImpl<short, uchar, double>*/ }, { 0/*addWeightedImpl<short, schar, uchar >*/, 0/*addWeightedImpl<short, schar, schar >*/, 0/*addWeightedImpl<short, schar, ushort>*/, 0/*addWeightedImpl<short, schar, short >*/, 0/*addWeightedImpl<short, schar, int >*/, 0/*addWeightedImpl<short, schar, float >*/, 0/*addWeightedImpl<short, schar, double>*/ }, { 0/*addWeightedImpl<short, ushort, uchar >*/, 0/*addWeightedImpl<short, ushort, schar >*/, 0/*addWeightedImpl<short, ushort, ushort>*/, 0/*addWeightedImpl<short, ushort, short >*/, 0/*addWeightedImpl<short, ushort, int >*/, 0/*addWeightedImpl<short, ushort, float >*/, 0/*addWeightedImpl<short, ushort, double>*/ }, { addWeightedImpl<short, short, uchar >, addWeightedImpl<short, short, schar >, addWeightedImpl<short, short, ushort>, addWeightedImpl<short, short, short >, addWeightedImpl<short, short, int >, addWeightedImpl<short, short, float >, addWeightedImpl<short, short, double> }, { addWeightedImpl<short, int, uchar >, addWeightedImpl<short, int, schar >, addWeightedImpl<short, int, ushort>, addWeightedImpl<short, int, short >, addWeightedImpl<short, int, int >, addWeightedImpl<short, int, float >, addWeightedImpl<short, int, double> }, { addWeightedImpl<short, float, uchar >, addWeightedImpl<short, float, schar >, addWeightedImpl<short, float, ushort>, addWeightedImpl<short, float, short >, addWeightedImpl<short, float, int >, addWeightedImpl<short, float, float >, addWeightedImpl<short, float, double> }, { addWeightedImpl<short, double, uchar >, addWeightedImpl<short, double, schar >, addWeightedImpl<short, double, ushort>, addWeightedImpl<short, double, short >, addWeightedImpl<short, double, int >, addWeightedImpl<short, double, float >, addWeightedImpl<short, double, double> } }, { { 0/*addWeightedImpl<int, uchar, uchar >*/, 0/*addWeightedImpl<int, uchar, schar >*/, 0/*addWeightedImpl<int, uchar, ushort>*/, 0/*addWeightedImpl<int, uchar, short >*/, 0/*addWeightedImpl<int, uchar, int >*/, 0/*addWeightedImpl<int, uchar, float >*/, 0/*addWeightedImpl<int, uchar, double>*/ }, { 0/*addWeightedImpl<int, schar, uchar >*/, 0/*addWeightedImpl<int, schar, schar >*/, 0/*addWeightedImpl<int, schar, ushort>*/, 0/*addWeightedImpl<int, schar, short >*/, 0/*addWeightedImpl<int, schar, int >*/, 0/*addWeightedImpl<int, schar, float >*/, 0/*addWeightedImpl<int, schar, double>*/ }, { 0/*addWeightedImpl<int, ushort, uchar >*/, 0/*addWeightedImpl<int, ushort, schar >*/, 0/*addWeightedImpl<int, ushort, ushort>*/, 0/*addWeightedImpl<int, ushort, short >*/, 0/*addWeightedImpl<int, ushort, int >*/, 0/*addWeightedImpl<int, ushort, float >*/, 0/*addWeightedImpl<int, ushort, double>*/ }, { 0/*addWeightedImpl<int, short, uchar >*/, 0/*addWeightedImpl<int, short, schar >*/, 0/*addWeightedImpl<int, short, ushort>*/, 0/*addWeightedImpl<int, short, short >*/, 0/*addWeightedImpl<int, short, int >*/, 0/*addWeightedImpl<int, short, float >*/, 0/*addWeightedImpl<int, short, double>*/ }, { addWeightedImpl<int, int, uchar >, addWeightedImpl<int, int, schar >, addWeightedImpl<int, int, ushort>, addWeightedImpl<int, int, short >, addWeightedImpl<int, int, int >, addWeightedImpl<int, int, float >, addWeightedImpl<int, int, double> }, { addWeightedImpl<int, float, uchar >, addWeightedImpl<int, float, schar >, addWeightedImpl<int, float, ushort>, addWeightedImpl<int, float, short >, addWeightedImpl<int, float, int >, addWeightedImpl<int, float, float >, addWeightedImpl<int, float, double> }, { addWeightedImpl<int, double, uchar >, addWeightedImpl<int, double, schar >, addWeightedImpl<int, double, ushort>, addWeightedImpl<int, double, short >, addWeightedImpl<int, double, int >, addWeightedImpl<int, double, float >, addWeightedImpl<int, double, double> } }, { { 0/*addWeightedImpl<float, uchar, uchar >*/, 0/*addWeightedImpl<float, uchar, schar >*/, 0/*addWeightedImpl<float, uchar, ushort>*/, 0/*addWeightedImpl<float, uchar, short >*/, 0/*addWeightedImpl<float, uchar, int >*/, 0/*addWeightedImpl<float, uchar, float >*/, 0/*addWeightedImpl<float, uchar, double>*/ }, { 0/*addWeightedImpl<float, schar, uchar >*/, 0/*addWeightedImpl<float, schar, schar >*/, 0/*addWeightedImpl<float, schar, ushort>*/, 0/*addWeightedImpl<float, schar, short >*/, 0/*addWeightedImpl<float, schar, int >*/, 0/*addWeightedImpl<float, schar, float >*/, 0/*addWeightedImpl<float, schar, double>*/ }, { 0/*addWeightedImpl<float, ushort, uchar >*/, 0/*addWeightedImpl<float, ushort, schar >*/, 0/*addWeightedImpl<float, ushort, ushort>*/, 0/*addWeightedImpl<float, ushort, short >*/, 0/*addWeightedImpl<float, ushort, int >*/, 0/*addWeightedImpl<float, ushort, float >*/, 0/*addWeightedImpl<float, ushort, double>*/ }, { 0/*addWeightedImpl<float, short, uchar >*/, 0/*addWeightedImpl<float, short, schar >*/, 0/*addWeightedImpl<float, short, ushort>*/, 0/*addWeightedImpl<float, short, short >*/, 0/*addWeightedImpl<float, short, int >*/, 0/*addWeightedImpl<float, short, float >*/, 0/*addWeightedImpl<float, short, double>*/ }, { 0/*addWeightedImpl<float, int, uchar >*/, 0/*addWeightedImpl<float, int, schar >*/, 0/*addWeightedImpl<float, int, ushort>*/, 0/*addWeightedImpl<float, int, short >*/, 0/*addWeightedImpl<float, int, int >*/, 0/*addWeightedImpl<float, int, float >*/, 0/*addWeightedImpl<float, int, double>*/ }, { addWeightedImpl<float, float, uchar >, addWeightedImpl<float, float, schar >, addWeightedImpl<float, float, ushort>, addWeightedImpl<float, float, short >, addWeightedImpl<float, float, int >, addWeightedImpl<float, float, float >, addWeightedImpl<float, float, double> }, { addWeightedImpl<float, double, uchar >, addWeightedImpl<float, double, schar >, addWeightedImpl<float, double, ushort>, addWeightedImpl<float, double, short >, addWeightedImpl<float, double, int >, addWeightedImpl<float, double, float >, addWeightedImpl<float, double, double> } }, { { 0/*addWeightedImpl<double, uchar, uchar >*/, 0/*addWeightedImpl<double, uchar, schar >*/, 0/*addWeightedImpl<double, uchar, ushort>*/, 0/*addWeightedImpl<double, uchar, short >*/, 0/*addWeightedImpl<double, uchar, int >*/, 0/*addWeightedImpl<double, uchar, float >*/, 0/*addWeightedImpl<double, uchar, double>*/ }, { 0/*addWeightedImpl<double, schar, uchar >*/, 0/*addWeightedImpl<double, schar, schar >*/, 0/*addWeightedImpl<double, schar, ushort>*/, 0/*addWeightedImpl<double, schar, short >*/, 0/*addWeightedImpl<double, schar, int >*/, 0/*addWeightedImpl<double, schar, float >*/, 0/*addWeightedImpl<double, schar, double>*/ }, { 0/*addWeightedImpl<double, ushort, uchar >*/, 0/*addWeightedImpl<double, ushort, schar >*/, 0/*addWeightedImpl<double, ushort, ushort>*/, 0/*addWeightedImpl<double, ushort, short >*/, 0/*addWeightedImpl<double, ushort, int >*/, 0/*addWeightedImpl<double, ushort, float >*/, 0/*addWeightedImpl<double, ushort, double>*/ }, { 0/*addWeightedImpl<double, short, uchar >*/, 0/*addWeightedImpl<double, short, schar >*/, 0/*addWeightedImpl<double, short, ushort>*/, 0/*addWeightedImpl<double, short, short >*/, 0/*addWeightedImpl<double, short, int >*/, 0/*addWeightedImpl<double, short, float >*/, 0/*addWeightedImpl<double, short, double>*/ }, { 0/*addWeightedImpl<double, int, uchar >*/, 0/*addWeightedImpl<double, int, schar >*/, 0/*addWeightedImpl<double, int, ushort>*/, 0/*addWeightedImpl<double, int, short >*/, 0/*addWeightedImpl<double, int, int >*/, 0/*addWeightedImpl<double, int, float >*/, 0/*addWeightedImpl<double, int, double>*/ }, { 0/*addWeightedImpl<double, float, uchar >*/, 0/*addWeightedImpl<double, float, schar >*/, 0/*addWeightedImpl<double, float, ushort>*/, 0/*addWeightedImpl<double, float, short >*/, 0/*addWeightedImpl<double, float, int >*/, 0/*addWeightedImpl<double, float, float >*/, 0/*addWeightedImpl<double, float, double>*/ }, { addWeightedImpl<double, double, uchar >, addWeightedImpl<double, double, schar >, addWeightedImpl<double, double, ushort>, addWeightedImpl<double, double, short >, addWeightedImpl<double, double, int >, addWeightedImpl<double, double, float >, addWeightedImpl<double, double, double> } } }; GpuMat src1 = getInputMat(_src1, stream); GpuMat src2 = getInputMat(_src2, stream); int sdepth1 = src1.depth(); int sdepth2 = src2.depth(); ddepth = ddepth >= 0 ? CV_MAT_DEPTH(ddepth) : std::max(sdepth1, sdepth2); const int cn = src1.channels(); CV_Assert( src2.size() == src1.size() && src2.channels() == cn ); CV_Assert( sdepth1 <= CV_64F && sdepth2 <= CV_64F && ddepth <= CV_64F ); GpuMat dst = getOutputMat(_dst, src1.size(), CV_MAKE_TYPE(ddepth, cn), stream); GpuMat src1_single = src1.reshape(1); GpuMat src2_single = src2.reshape(1); GpuMat dst_single = dst.reshape(1); if (sdepth1 > sdepth2) { src1_single.swap(src2_single); std::swap(alpha, beta); std::swap(sdepth1, sdepth2); } const func_t func = funcs[sdepth1][sdepth2][ddepth]; if (!func) CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types"); func(src1_single, alpha, src2_single, beta, gamma, dst_single, stream); syncOutput(dst, _dst, stream); } #endif
3ab443cb012dfa97455a8040c003f713bc4dc6bb.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated d Wed Nov 14 22:53:54 2012 @author Ichitaro Yamazaki */ #include "common_magma.h" #define PRECISION_d #include "commonblas.h" // // m, n - dimensions in the source (input) matrix. // This routine copies the ha matrix from the CPU // to dat on the GPU. In addition, the output matrix // is transposed. The routine uses a buffer of size // 2*lddb*nb pointed to by dB (lddb > m) on the GPU. // Note that lda >= m and lddat >= n. // extern "C" void magmablas_dsetmatrix_transpose_mgpu( magma_int_t ngpus, hipStream_t stream[][2], const double *ha, magma_int_t lda, double **dat, magma_int_t ldda, double **db, magma_int_t lddb, magma_int_t m, magma_int_t n, magma_int_t nb) { #define A(j) (ha + (j)*lda) #define dB(d, j) (db[(d)] + (j)*nb*lddb) #define dAT(d, j) (dat[(d)] + (j)*nb) int nstreams = 2, d, j, j_local, id, ib; /* Quick return */ if ( (m == 0) || (n == 0) ) return; if (lda < m || ngpus*ldda < n || lddb < m){ printf( "Wrong arguments in magmablas_dsetmatrix_transpose_mgpu (%d<%d), (%d*%d<%d), or (%d<%d).\n", (int) lda, (int) m, (int) ngpus, (int) ldda, (int) n, (int) lddb, (int) m ); return; } /* Move data from CPU to GPU by block columns and transpose it */ for(j=0; j<n; j+=nb){ d = (j/nb)%ngpus; j_local = (j/nb)/ngpus; id = j_local%nstreams; magma_setdevice(d); ib = min(n-j, nb); magma_dsetmatrix_async( m, ib, A(j), lda, dB(d, id), lddb, stream[d][id] ); magmablasSetKernelStream(stream[d][id]); magmablas_dtranspose2(dAT(d, j_local), ldda, dB(d, id), lddb, m, ib); } }
3ab443cb012dfa97455a8040c003f713bc4dc6bb.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated d Wed Nov 14 22:53:54 2012 @author Ichitaro Yamazaki */ #include "common_magma.h" #define PRECISION_d #include "commonblas.h" // // m, n - dimensions in the source (input) matrix. // This routine copies the ha matrix from the CPU // to dat on the GPU. In addition, the output matrix // is transposed. The routine uses a buffer of size // 2*lddb*nb pointed to by dB (lddb > m) on the GPU. // Note that lda >= m and lddat >= n. // extern "C" void magmablas_dsetmatrix_transpose_mgpu( magma_int_t ngpus, cudaStream_t stream[][2], const double *ha, magma_int_t lda, double **dat, magma_int_t ldda, double **db, magma_int_t lddb, magma_int_t m, magma_int_t n, magma_int_t nb) { #define A(j) (ha + (j)*lda) #define dB(d, j) (db[(d)] + (j)*nb*lddb) #define dAT(d, j) (dat[(d)] + (j)*nb) int nstreams = 2, d, j, j_local, id, ib; /* Quick return */ if ( (m == 0) || (n == 0) ) return; if (lda < m || ngpus*ldda < n || lddb < m){ printf( "Wrong arguments in magmablas_dsetmatrix_transpose_mgpu (%d<%d), (%d*%d<%d), or (%d<%d).\n", (int) lda, (int) m, (int) ngpus, (int) ldda, (int) n, (int) lddb, (int) m ); return; } /* Move data from CPU to GPU by block columns and transpose it */ for(j=0; j<n; j+=nb){ d = (j/nb)%ngpus; j_local = (j/nb)/ngpus; id = j_local%nstreams; magma_setdevice(d); ib = min(n-j, nb); magma_dsetmatrix_async( m, ib, A(j), lda, dB(d, id), lddb, stream[d][id] ); magmablasSetKernelStream(stream[d][id]); magmablas_dtranspose2(dAT(d, j_local), ldda, dB(d, id), lddb, m, ib); } }
4c533487f4ad0bd1c6109b81ae79de1bc88f87da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hip/driver_types.h> #include "RegTools.h" #include <hipfft.h> #define _USE_MATH_DEFINES #include "math.h" // for M_PI_2, cos, sin #include <float.h> // thrust functions // important note: thrust is not compatible with multi-thread environment, thus not compativle with multi-GPU in this library #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/fill.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/count.h> #include <assert.h> #define USE_TEXTURE 0 // When we use texture for small image (less than 256x256 maybe), padDataClampToBorder() (SET_FLOAT_BASE) crashes, for some reason #if(USE_TEXTURE) texture<float, 1, hipReadModeElementType> texFloat; #define LOAD_FLOAT(i) tex1Dfetch(texFloat, i) #define SET_FLOAT_BASE cutilSafeCall( hipBindTexture(0, texFloat, d_Src) ) #else #define LOAD_FLOAT(i) d_Src[i] #define SET_FLOAT_BASE #endif extern FILE *m_LogFile; //////////////////////////////////////////////////////////////////////////////// // 2D R2C / C2R post/preprocessing kernels //////////////////////////////////////////////////////////////////////////////// #if(USE_TEXTURE) texture<fComplex, 1, hipReadModeElementType> texComplexA; texture<fComplex, 1, hipReadModeElementType> texComplexB; #define LOAD_FCOMPLEX(i) tex1Dfetch(texComplexA, i) #define LOAD_FCOMPLEX_A(i) tex1Dfetch(texComplexA, i) #define LOAD_FCOMPLEX_B(i) tex1Dfetch(texComplexB, i) #define SET_FCOMPLEX_BASE cutilSafeCall( hipBindTexture(0, texComplexA, d_Src) ) #define SET_FCOMPLEX_BASE_A cutilSafeCall( hipBindTexture(0, texComplexA, d_SrcA) ) #define SET_FCOMPLEX_BASE_B cutilSafeCall( hipBindTexture(0, texComplexB, d_SrcB) ) #else #define LOAD_FCOMPLEX(i) d_Src[i] #define LOAD_FCOMPLEX_A(i) d_SrcA[i] #define LOAD_FCOMPLEX_B(i) d_SrcB[i] #define SET_FCOMPLEX_BASE #define SET_FCOMPLEX_BASE_A #define SET_FCOMPLEX_BASE_B #endif extern "C" int snapTransformSize(int dataSize){ int hiBit; unsigned int lowPOT, hiPOT; dataSize = iAlignUp(dataSize, 16); for(hiBit = 31; hiBit >= 0; hiBit--) if(dataSize & (1U << hiBit)) break; lowPOT = 1U << hiBit; if(lowPOT == dataSize) return dataSize; hiPOT = 1U << (hiBit + 1); if(hiPOT <= 1024) return hiPOT; else return iAlignUp(dataSize, 512); } extern "C" void computeKernelSpectrumGPU(float *h_Kernel, int dataH, int dataW, int kernelH, int kernelW, int kernelY, int kernelX , fComplex *d_KernelSpectrum, hipfftHandle fftPlanFwd) { // this function is copied from CUDA SDK (convolutionFFT2D) and modified by Yoshito Otake // Note: make sure that there is no 'bad' number in d_Data (e.g., Inf, Nan, etc.) // if there is any 'bad' number, it breaks the entire result. const int fftH = snapTransformSize(dataH + kernelH - 1); const int fftW = snapTransformSize(dataW + kernelW - 1); float *d_Kernel, *d_PaddedKernel; cutilSafeCall( hipMalloc(&(d_Kernel), kernelH*kernelW*sizeof(float)) ); cutilSafeCall( hipMalloc(&(d_PaddedKernel), fftH*fftW*sizeof(float)) ); // fComplex *d_KernelSpectrum = temp_spectrum+ (fftH * (fftW / 2 + 1)); //print_and_log("kernelH: %d, kernelW: %d, fftH: %d, fftW: %d\n", kernelH, kernelW, fftH, fftW); cutilSafeCall( hipMemcpy(d_Kernel, h_Kernel, kernelH * kernelW * sizeof(float), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemset(d_PaddedKernel, 0, fftH * fftW * sizeof(float)) ); #if defined RegTools_VERBOSE_MESSAGE print_and_log("h_Kernel:\n"); for(int i=0;i<kernelW;i++){ for(int j=0;j<kernelH;j++) print_and_log("%f ", h_Kernel[i*kernelH+j]); print_and_log("\n"); } #endif padKernel( d_PaddedKernel, d_Kernel, fftH, fftW, kernelH, kernelW, kernelY, kernelX ); #if defined RegTools_VERBOSE_MESSAGE print_and_log("h_PaddedKernel:\n"); float *h_PaddedKernel = new float[fftH*fftW]; hipMemcpy(h_PaddedKernel, d_PaddedKernel, fftH*fftW*sizeof(float), hipMemcpyDeviceToHost); for(int i=0;i<fftW;i++){ for(int j=0;j<fftH;j++) print_and_log("%f ", h_PaddedKernel[i*fftH+j]); print_and_log("\n"); } delete[] h_PaddedKernel; #endif int error; if( (error=hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_PaddedKernel, (hipfftComplex *)d_KernelSpectrum)) != HIPFFT_SUCCESS) { print_and_log("ERROR at hipfftExecR2C in computeKernelSpectrumGPU (6 means HIPFFT_EXEC_FAILED), error code: %d\n", error); } cutilSafeCall( hipFree( d_Kernel ) ); cutilSafeCall( hipFree( d_PaddedKernel ) ); } extern "C" unsigned int getKernelSize(float sigma) { // compute kernel size based on sigma const float epsilon = 1e-2; return ceil(sigma*sqrt(-2*log(sqrt(2*M_PI)*sigma*epsilon))); } extern "C" bool computeGaussianGradientGPUMulti(float *d_input_images, int *dim, int num_image_sets, float *d_output_imagesX, float *d_output_imagesY, float sigma , fComplex *d_x_kernel_spectrum, fComplex *d_y_kernel_spectrum, float *temp_padded, fComplex *temp_spectrum , hipfftHandle fftPlanManyFwd, hipfftHandle fftPlanManyInv, float GI_threshold ) { const unsigned int kernelY = getKernelSize(sigma), kernelX = kernelY; const unsigned int kernelH = 2*kernelY+1, kernelW = kernelH; const int fftH = snapTransformSize(dim[1] + kernelH - 1), dataH = dim[1]; const int fftW = snapTransformSize(dim[0] + kernelW - 1), dataW = dim[0]; const int spectrum_size = fftH*(fftW/2+1), padded_size = fftH*fftW; const int y_offset = dim[2]*num_image_sets; // FFT/iFFT for all images simultaneously (fftPlanMany needs to be created for exactly the same number of images) padDataClampToBorder( temp_padded, d_input_images, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets ); // Note: the following FFT fails if input image contains NaN (even only one pixel!) cufftSafeCall( hipfftExecR2C( fftPlanManyFwd, (hipfftReal *)temp_padded, (hipfftComplex *)temp_spectrum ) ); cutilSafeCall( hipMemcpy(temp_spectrum+spectrum_size*y_offset, temp_spectrum, spectrum_size*y_offset*sizeof(fComplex), hipMemcpyDeviceToDevice) ); // modulate all spectrums modulateAndNormalize(temp_spectrum, d_x_kernel_spectrum, fftH, fftW, dim[2]*num_image_sets, 1); modulateAndNormalize(temp_spectrum+spectrum_size*y_offset, d_y_kernel_spectrum, fftH, fftW, dim[2]*num_image_sets, 1); // print_and_log("computeGaussianGradientGPUMulti(), dim: %dx%dx%d, fft size: %dx%d, kernelXY: %dx%d, kernelWH: %dx%d, spectrum_size: %d, padded_size: %d, fftPlanManyInv: %d, GI_threshold: %f\n", // dim[0], dim[1], dim[2], fftW, fftH, kernelX, kernelY, kernelW, kernelH, spectrum_size, padded_size, fftPlanManyInv, GI_threshold); // inverse FFT cufftSafeCall( hipfftExecC2R(fftPlanManyInv, (hipfftComplex *)temp_spectrum, (hipfftReal *)temp_padded) ); #if defined RegTools_GI_BACKGROUND_EDGE_CHECK // fill zero in (kernelW x kernelH) pixels of the gradient image around the pixel which has an intensity lower than threshold value in the original image fillEdges( temp_padded, d_input_images, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets, GI_threshold ); fillEdges( temp_padded+padded_size*y_offset, d_input_images, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets, GI_threshold ); #endif // unPad all images unPadDataClampToBorder(d_output_imagesX, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2]*num_image_sets); unPadDataClampToBorder(d_output_imagesY, temp_padded+padded_size*y_offset, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2]*num_image_sets); return true; } struct sqrt_inverse_functor { __host__ __device__ float operator()(const float &x) const { return 1/sqrt(x); } }; struct square_functor { __host__ __device__ float operator()(const float &x) const { return x*x; } }; __global__ void apply_floating_mask_kernel(float *d_images, float *d_floating_mask, int size) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; if(d_floating_mask[index]<1e-6) d_images[index] = 0; } __global__ void square_functor_kernel(float *d_input, float *d_output, int size) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; d_output[index] = d_input[index] * d_input[index]; } __global__ void sqrt_inverse_functor_kernel(float *d_input, float *d_output, int size) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; d_output[index] = 1/sqrt(d_input[index]); } extern "C" bool zeromean_stddivide_Images(float *d_images, float *d_temp, int image_size, int num_image_sets, hipblasHandle_t cublasHandle, float *d_mean_std, float *d_OneVector, float h_NormalizationFactor, float *d_mask_weight, float *d_floating_mask) { float alpha, beta; // (OPTIONAL) apply mask to each image (column). Note that mask_weight is single image set (single column vector) //print_and_log("zeromean_stddivide_Images(), pass0, d_images: %d, d_temp: %d, image_size: %d, num_image_sets: %d, cublasHandle: %d\n", d_images, d_temp, image_size, num_image_sets, cublasHandle); if(d_mask_weight) hipblasSdgmm(cublasHandle, HIPBLAS_SIDE_LEFT, image_size, num_image_sets, d_images, image_size, d_mask_weight, 1, d_images, image_size); // compute mean of each image set (dim[2] images are considered as one set) beta = 0.0; hipblasSgemv(cublasHandle, HIPBLAS_OP_T, image_size, num_image_sets, &h_NormalizationFactor, d_images, image_size, d_OneVector, 1, &beta, d_mean_std, 1); //print_and_log("zeromean_stddivide_Images(), pass1\n"); // subtract mean from each image set (column-wise subtraction) alpha = -1.0f; beta = 1.0; hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, image_size, num_image_sets, 1, &alpha, d_OneVector, image_size, d_mean_std, 1, &beta, d_images, image_size); // is this correct? maybe below? (d_mean_std and d_OneVector should be opposite order) //hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, image_size, num_image_sets, 1, &alpha, d_mean_std, image_size, d_OneVector, 1, &beta, d_images, image_size); //print_and_log("zeromean_stddivide_Images(), pass2, image_size: %d, num_image_sets: %d\n", image_size, num_image_sets); const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; // apply floating mask if needed (set zero for the pixels with d_floating_mask>0) if(d_floating_mask) apply_floating_mask_kernel << <iDivUp(image_size*num_image_sets, b), b >> > (d_images, d_floating_mask, image_size*num_image_sets); // compute std of each image set (column-wise) //thrust::transform(thrust::device_ptr<float>(d_images), thrust::device_ptr<float>(d_images)+image_size*num_image_sets, thrust::device_ptr<float>(d_temp), square_functor()); square_functor_kernel << <iDivUp(image_size*num_image_sets, b), b >> > (d_images, d_temp, image_size*num_image_sets); //print_and_log("zeromean_stddivide_Images(), pass3\n"); alpha = 1.0f; beta = 0.0; hipblasSgemv(cublasHandle, HIPBLAS_OP_T, image_size, num_image_sets, &alpha, d_temp, image_size, d_OneVector, 1, &beta, d_mean_std, 1); //print_and_log("zeromean_stddivide_Images(), pass4\n"); //thrust::transform(thrust::device_ptr<float>(d_mean_std), thrust::device_ptr<float>(d_mean_std)+num_image_sets, thrust::device_ptr<float>(d_mean_std), sqrt_inverse_functor()); sqrt_inverse_functor_kernel << <iDivUp(num_image_sets, b), b >> > (d_mean_std, d_mean_std, num_image_sets); // devide each image set by std (column-wise division) hipblasSdgmm(cublasHandle, HIPBLAS_SIDE_RIGHT, image_size, num_image_sets, d_images, image_size, d_mean_std, 1, d_images, image_size); /* float *h_mean_std = new float[num_image_sets]; cutilSafeCall( hipMemcpy(h_mean_std, d_mean_std, num_image_sets*sizeof(float), hipMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++){ print_and_log("d_images[%d] mean: %f\n", i, h_mean_std[i]); } delete[] h_mean_std; */ return true; } extern "C" bool computeNormalizedCrossCorrelation_Pixels(float *d_images1_zeromean_multi, float *d_images2_zeromean_single, int image_size, int num_image_sets, hipblasHandle_t cublasHandle) { // (numerator) compute normalized covariance (pixel-wise multiplication between images1 and images2) // overwrite onto images1 hipblasSdgmm(cublasHandle, HIPBLAS_SIDE_LEFT, image_size, num_image_sets, d_images1_zeromean_multi, image_size, d_images2_zeromean_single, 1, d_images1_zeromean_multi, image_size); return true; } extern "C" bool computeMeanSquaredError(float *d_images1_multi, float *d_images2_single, float *d_OneVector, double *MSE, int image_size, int num_image_sets, float *d_temp_MSE, float *h_temp_MSE, hipblasHandle_t cublasHandle) { // subtract images2_single from each image set in d_images1_multi (column-wise subtraction) float alpha = -1.0f, beta = 1.0f; // hipMemcpy(d_images1_multi, d_images2_single, image_size*sizeof(float), hipMemcpyDeviceToDevice); hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, image_size, num_image_sets, 1, &alpha, d_images2_single, image_size, d_OneVector, 1, &beta, d_images1_multi, image_size); // square d_images1_multi //thrust::transform(thrust::device_ptr<float>(d_images1_multi), thrust::device_ptr<float>(d_images1_multi)+image_size*num_image_sets, thrust::device_ptr<float>(d_images1_multi), square_functor()); const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; square_functor_kernel << <iDivUp(image_size*num_image_sets, b), b >> > (d_images1_multi, d_images1_multi, image_size*num_image_sets); // average over each image alpha = 1.0f/(float)image_size; beta = 0.0; hipblasSgemv(cublasHandle, HIPBLAS_OP_T, image_size, num_image_sets, &alpha, d_images1_multi, image_size, d_OneVector, 1, &beta, d_temp_MSE, 1); // copy the results back to host cutilSafeCall( hipMemcpy(h_temp_MSE, d_temp_MSE, num_image_sets*sizeof(float), hipMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++) MSE[i] += (double)(h_temp_MSE[i]); // float -> double conversion (result is accumulated. make sure zeroing the initial array) return true; } struct absolute_value_functor { __host__ __device__ float operator()(const float &x) const { return fabs(x); } }; extern "C" bool computeNormalizedCrossCorrelation_Sum(float *d_images1_multi, float *d_images2_multi, float *d_output_multi, double *NCC, int image_size, int num_image_sets , hipblasHandle_t cublasHandle, float *d_temp_NCC, float *h_temp_NCC, float *d_OneVector) { if(d_images2_multi){ float alpha = 0.5f; // pixel-wise mean (for Gradient Correlation) hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, image_size, num_image_sets, &alpha, d_images1_multi, image_size, &alpha, d_images2_multi, image_size, d_output_multi, image_size); // compute absolute value // thrust::transform( thrust::device_ptr<float>(d_output_multi), thrust::device_ptr<float>(d_output_multi)+image_size*num_image_sets, // thrust::device_ptr<float>(d_output_multi), absolute_value_functor() ); } else if(d_images1_multi){ // just memory copy (for NCC) cutilSafeCall( hipMemcpy(d_output_multi, d_images1_multi, num_image_sets*image_size*sizeof(float), hipMemcpyDeviceToDevice) ); } // summation (column-wise) float alpha = 1.0f, beta = 0.0f; hipblasSgemv(cublasHandle, HIPBLAS_OP_T, image_size, num_image_sets, &alpha, d_output_multi, image_size, d_OneVector, 1, &beta, d_temp_NCC, 1); // copy the results back to host // print_and_log("computeNormalizedCrossCorrelation_Sum(), d_temp_NCC: %d\n", d_temp_NCC); // for (int i = 0; i < num_image_sets; i++) print_and_log("%f, ", h_temp_NCC[i]); // print_and_log("\n"); //cutilSafeCall(hipMemcpy(d_temp_NCC, h_temp_NCC, num_image_sets * sizeof(float), hipMemcpyHostToDevice)); cutilSafeCall( hipMemcpy(h_temp_NCC, d_temp_NCC, num_image_sets*sizeof(float), hipMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++) NCC[i] += (double)(h_temp_NCC[i]); // float -> double conversion (result is accumulated. make sure zeroing the initial array) // for(int i=0;i<num_image_sets;i++){ print_and_log("NCC[%d]: %f\n", i, NCC[i]); } return true; } struct subtract_square_functor { __host__ __device__ float operator()(const float &x, const float &y) const { return (x-y)*(x-y); } }; __global__ void subtract_square_functor_kernel(float *d_input1, float *d_input2, float *d_output, int size) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; d_output[index] = (d_input1[index]-d_input2[index]) * (d_input1[index] - d_input2[index]); } extern "C" bool computeGaussianGPUMulti(float *d_input_images, int *dim, int num_image_sets, float *d_output_mu, float *d_output_sigma_sq, float sigma , fComplex *d_kernel_spectrum, float *temp_padded, fComplex *temp_spectrum , hipfftHandle fftPlanManyFwd, hipfftHandle fftPlanManyInv ) { const unsigned int kernelY = getKernelSize(sigma), kernelX = kernelY; const unsigned int kernelH = 2*kernelY+1, kernelW = kernelH; const int fftH = snapTransformSize(dim[1] + kernelH - 1), dataH = dim[1]; const int fftW = snapTransformSize(dim[0] + kernelW - 1), dataW = dim[0]; const int spectrum_size = fftH*(fftW/2+1), padded_size = fftH*fftW; const int total_size = dim[0]*dim[1]*dim[2]*num_image_sets; // compute mean images // FFT/iFFT for all images simultaneously (fftPlanMany needs to be created for exactly the same number of images) // Note: the following FFT fails if input image contains NaN (even only one pixel!) // compute mu images padDataClampToBorder( temp_padded, d_input_images, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets ); cufftSafeCall( hipfftExecR2C( fftPlanManyFwd, (hipfftReal *)temp_padded, (hipfftComplex *)temp_spectrum ) ); modulateAndNormalize(temp_spectrum, d_kernel_spectrum, fftH, fftW, dim[2]*num_image_sets, 1); cufftSafeCall( hipfftExecC2R(fftPlanManyInv, (hipfftComplex *)temp_spectrum, (hipfftReal *)temp_padded) ); unPadDataClampToBorder(d_output_mu, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2]*num_image_sets); // compute sigma images (gaussian filter after mean subtraction and square. // thrust::transform( thrust::device_ptr<float>(d_input_images), thrust::device_ptr<float>(d_input_images)+total_size, // thrust::device_ptr<float>(d_output_mu), thrust::device_ptr<float>(d_output_sigma_sq), subtract_square_functor() ); const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; subtract_square_functor_kernel << <iDivUp(total_size, b), b >> > (d_input_images, d_output_mu, d_output_sigma_sq, total_size); padDataClampToBorder( temp_padded, d_output_sigma_sq, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets ); cufftSafeCall( hipfftExecR2C( fftPlanManyFwd, (hipfftReal *)temp_padded, (hipfftComplex *)temp_spectrum ) ); // fft modulateAndNormalize(temp_spectrum, d_kernel_spectrum, fftH, fftW, dim[2]*num_image_sets, 1); cufftSafeCall( hipfftExecC2R(fftPlanManyInv, (hipfftComplex *)temp_spectrum, (hipfftReal *)temp_padded) ); // inverse fft unPadDataClampToBorder(d_output_sigma_sq, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2]*num_image_sets); // print_and_log("computeGaussianGPUMulti(), dim: %dx%dx%d, fft size: %dx%d, kernelXY: %dx%d, kernelWH: %dx%d, spectrum_size: %d, padded_size: %d, fftPlanManyInv: %d, total_size: %d\n", // dim[0], dim[1], dim[2], fftW, fftH, kernelX, kernelY, kernelW, kernelH, spectrum_size, padded_size, fftPlanManyInv, total_size); return true; } struct sqrt_op { __host__ __device__ float operator()(const float& x) const { return x < 1e-8f ? 0.0f : sqrt(x); } }; struct checked_div { __host__ __device__ float operator()(const float& numerator, const float& denominator) const { return abs(denominator) < 1e-8f ? 0.0f : numerator / denominator; } }; extern "C" bool computeLocalContrastNormalizationGPUMulti(float *d_input_images, int *dim, int num_image_sets, float *d_output_centered, float *d_output_std, float *d_output, float sigma , fComplex *d_kernel_spectrum, float *temp_padded, fComplex *temp_spectrum , hipfftHandle fftPlanManyFwd, hipfftHandle fftPlanManyInv) { float *d_temp_std = d_output_std ? d_output_std : d_output; // d_output_std is just for debugging. set to NULL if no std image output is necessary const unsigned int kernelY = getKernelSize(sigma), kernelX = kernelY; const unsigned int kernelH = 2 * kernelY + 1, kernelW = kernelH; const int fftH = snapTransformSize(dim[1] + kernelH - 1), dataH = dim[1]; const int fftW = snapTransformSize(dim[0] + kernelW - 1), dataW = dim[0]; const int spectrum_size = fftH*(fftW / 2 + 1), padded_size = fftH*fftW; const int total_size = dim[0] * dim[1] * dim[2] * num_image_sets; // compute centered images // FFT/iFFT for all images simultaneously (fftPlanMany needs to be created for exactly the same number of images) // Note: the following FFT fails if input image contains NaN (even only one pixel!) // compute mu images padDataClampToBorder(temp_padded, d_input_images, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2] * num_image_sets); cufftSafeCall(hipfftExecR2C(fftPlanManyFwd, (hipfftReal *)temp_padded, (hipfftComplex *)temp_spectrum)); modulateAndNormalize(temp_spectrum, d_kernel_spectrum, fftH, fftW, dim[2] * num_image_sets, 1); cufftSafeCall(hipfftExecC2R(fftPlanManyInv, (hipfftComplex *)temp_spectrum, (hipfftReal *)temp_padded)); unPadDataClampToBorder(d_output_centered, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2] * num_image_sets); thrust::transform(thrust::device_ptr<float>(d_input_images), thrust::device_ptr<float>(d_input_images) + total_size, thrust::device_ptr<float>(d_output_centered), thrust::device_ptr<float>(d_output_centered), thrust::minus<float>()); // compute std images (gaussian filtering on squared centered images). thrust::transform(thrust::device_ptr<float>(d_output_centered), thrust::device_ptr<float>(d_output_centered) + total_size, thrust::device_ptr<float>(d_temp_std), thrust::square<float>()); padDataClampToBorder(temp_padded, d_temp_std, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2] * num_image_sets); cufftSafeCall(hipfftExecR2C(fftPlanManyFwd, (hipfftReal *)temp_padded, (hipfftComplex *)temp_spectrum)); // fft modulateAndNormalize(temp_spectrum, d_kernel_spectrum, fftH, fftW, dim[2] * num_image_sets, 1); cufftSafeCall(hipfftExecC2R(fftPlanManyInv, (hipfftComplex *)temp_spectrum, (hipfftReal *)temp_padded)); // inverse fft unPadDataClampToBorder(d_temp_std, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2] * num_image_sets); thrust::transform(thrust::device_ptr<float>(d_temp_std), thrust::device_ptr<float>(d_temp_std) + total_size, thrust::device_ptr<float>(d_temp_std), sqrt_op()); thrust::transform(thrust::device_ptr<float>(d_output_centered), thrust::device_ptr<float>(d_output_centered) + total_size, thrust::device_ptr<float>(d_temp_std), thrust::device_ptr<float>(d_output), checked_div()); // thrust::transform(thrust::device_ptr<float>(d_input_images), thrust::device_ptr<float>(d_input_images) + total_size, // thrust::device_ptr<float>(d_input_images), thrust::device_ptr<float>(d_output), checked_div()); // print_and_log("computeLocalContrastNormalizationGPUMulti(), dim: %dx%dx%d, fft size: %dx%d, kernelXY: %dx%d, kernelWH: %dx%d, spectrum_size: %d, padded_size: %d, fftPlanManyInv: %d, total_size: %d\n", // dim[0], dim[1], dim[2], fftW, fftH, kernelX, kernelY, kernelW, kernelH, spectrum_size, padded_size, fftPlanManyInv, total_size); return true; } extern "C" bool computeCovarianceGPUMulti(float *d_input_images1_multi, float *d_mu1_multi, float *d_input_images2_single, float *d_mu2_single, int *dim, int num_image_sets , float *d_output_images_multi, float sigma, fComplex *d_kernel_spectrum, float *temp_padded, fComplex *temp_spectrum , hipfftHandle fftPlanManyFwd, hipfftHandle fftPlanManyInv ) { const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; const int image_size = dim[0]*dim[1]*dim[2]; hipLaunchKernelGGL(( computeCovariance_kernel), dim3(iDivUp(image_size*num_image_sets, b)), dim3(b), 0, 0, d_input_images1_multi, d_mu1_multi, d_input_images2_single, d_mu2_single, d_output_images_multi, image_size, num_image_sets); const unsigned int kernelY = getKernelSize(sigma), kernelX = kernelY; const unsigned int kernelH = 2*kernelY+1, kernelW = kernelH; const int fftH = snapTransformSize(dim[1] + kernelH - 1), dataH = dim[1]; const int fftW = snapTransformSize(dim[0] + kernelW - 1), dataW = dim[0]; const int spectrum_size = fftH*(fftW/2+1), padded_size = fftH*fftW; // gaussian filter (local sum) padDataClampToBorder( temp_padded, d_output_images_multi, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets ); cufftSafeCall( hipfftExecR2C( fftPlanManyFwd, (hipfftReal *)temp_padded, (hipfftComplex *)temp_spectrum ) ); // fft modulateAndNormalize(temp_spectrum, d_kernel_spectrum, fftH, fftW, dim[2]*num_image_sets, 1); // one kernel spectrum is repeatedly mulplied to a set of images cufftSafeCall( hipfftExecC2R(fftPlanManyInv, (hipfftComplex *)temp_spectrum, (hipfftReal *)temp_padded) ); // inverse fft unPadDataClampToBorder(d_output_images_multi, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2]*num_image_sets); // print_and_log("computeCovarianceGPUMulti(), dim: %dx%dx%d, fft size: %dx%d, kernelXY: %dx%d, kernelWH: %dx%d, spectrum_size: %d, padded_size: %d, fftPlanManyInv: %d\n", // dim[0], dim[1], dim[2], fftW, fftH, kernelX, kernelY, kernelW, kernelH, spectrum_size, padded_size, fftPlanManyInv); return true; } __global__ void computeCovariance_kernel(float *d_img1_multi, float *d_mu1_multi, float *d_img2_single, float *d_mu2_single, float *d_output_multi, int image_size, int num_image_sets) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if(index >= image_size*num_image_sets) return; const int index2 = index % image_size; d_output_multi[index] = (d_img2_single[index2]-d_mu2_single[index2])*(d_img1_multi[index]-d_mu1_multi[index]); } extern "C" value_type gauss(value_type x, value_type sigma) { // Gaussian return exp(-x*x/(2*sigma*sigma)) / (sigma*sqrt(2*M_PI)); } extern "C" value_type dgauss(value_type x, value_type sigma) { // first order derivative of Gaussian return -x * gauss(x,sigma) / (sigma*sigma); } extern "C" int getGaussianGradientKernel(const value_type sigma, const int kernel_size, value_type *h_kernel) { // we assume kernel_size is odd number, which is computed by something like the following // epsilon=1e-2; // halfsize=ceil(sigma*sqrt(-2*log(sqrt(2*pi)*sigma*epsilon))); // size=2*halfsize+1; int half_size = (kernel_size-1)/2; value_type sum = 0.0; for(int i=0;i<kernel_size;i++){ for(int j=0;j<kernel_size;j++){ int index = j*kernel_size+i; h_kernel[index] = gauss(i-half_size, sigma)*dgauss(j-half_size, sigma); sum += abs(h_kernel[index])*abs(h_kernel[index]); } } sum = sqrt(sum); value_type* end_ptr = &(h_kernel[kernel_size*kernel_size]); for(value_type *ptr = h_kernel;ptr != end_ptr;ptr++) *ptr /= sum; return true; } extern "C" int getGaussianKernel(const value_type sigma, const int kernel_size, value_type *h_kernel) { // we assume kernel_size is odd number, which is computed by something like the following value_type half_size = (kernel_size-1)/2; value_type sum = 0.0; for(int i=0;i<kernel_size;i++){ for(int j=0;j<kernel_size;j++){ int index = j*kernel_size+i; h_kernel[index] = gauss(sqrt((i-half_size)*(i-half_size)+(j-half_size)*(j-half_size)), sigma); sum += h_kernel[index]; } } value_type* end_ptr = &(h_kernel[kernel_size*kernel_size]); for(value_type *ptr = h_kernel;ptr != end_ptr;ptr++) *ptr /= sum; return true; } extern "C" bool computeGaussianGradientKernelSpectrum(float sigma, int x_dim, int y_dim, int z_dim, fComplex *x_kernel_spectrum, fComplex *y_kernel_spectrum, hipfftHandle fftPlanFwd) { unsigned int halfsize = getKernelSize(sigma); unsigned int kernel_size=2*halfsize+1; // print_and_log("computeGaussianGradientKernel(), kernel_size: %d\n", kernel_size); float *x_kernel = new float[kernel_size*kernel_size], *y_kernel = new float[kernel_size*kernel_size]; // compute gaussian gradient kernel for x direction getGaussianGradientKernel(sigma, kernel_size, x_kernel); // compute gaussian gradient kernel for y direction (just transpose x kernel) for(unsigned int i=0;i<kernel_size;i++) for(unsigned int j=0;j<kernel_size;j++) y_kernel[j*kernel_size+i] = x_kernel[i*kernel_size+j]; computeKernelSpectrumGPU( x_kernel, y_dim, x_dim, kernel_size, kernel_size, halfsize, halfsize, x_kernel_spectrum, fftPlanFwd ); computeKernelSpectrumGPU( y_kernel, y_dim, x_dim, kernel_size, kernel_size, halfsize, halfsize, y_kernel_spectrum, fftPlanFwd ); delete[] x_kernel; delete[] y_kernel; return true; } extern "C" bool computeGaussianKernelSpectrum(float sigma, int x_dim, int y_dim, int z_dim, fComplex *kernel_spectrum, hipfftHandle fftPlanFwd) { unsigned int halfsize = getKernelSize(sigma); unsigned int kernel_size=2*halfsize+1; //print_and_log("computeGaussianKernel(), kernel_size: %d, sigma: %f\n", kernel_size, sigma); float *kernel = new float[kernel_size*kernel_size]; // compute gaussian gradient kernel getGaussianKernel(sigma, kernel_size, kernel); //for (int i = 0; i < kernel_size*kernel_size; i++) print_and_log("%f,", kernel[i]); //print_and_log("\n"); computeKernelSpectrumGPU( kernel, y_dim, x_dim, kernel_size, kernel_size, halfsize, halfsize, kernel_spectrum, fftPlanFwd ); delete[] kernel; return true; } extern "C" void computeGradientInformation(float *d_grad1X, float *d_grad1Y, float *d_grad2X, float *d_grad2Y, float *d_output, int image_size , float *d_mask_weight, bool isSingleModality, double *gradient_information, int num_image_sets , hipblasHandle_t cublasHandle, float *d_temp_SM, float *h_temp_SM, float *d_OneVector, float h_NormalizationFactor, int exclusive_norm) { const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; hipLaunchKernelGGL(( computeGradientInformation_kernel), dim3(iDivUp(image_size*num_image_sets, b)), dim3(b), 0, 0, d_grad1X, d_grad1Y, d_grad2X, d_grad2Y, d_output, image_size, isSingleModality, d_mask_weight, num_image_sets, exclusive_norm); // compute sum of each gradient information image // to parallelize the computation, we consider d_output as a matrix of (image_size x num_image_sets)' and multiply one vector of image_size element // computeImageSum_kernel<<<iDivUp(num_image_sets, b), b>>>(d_output, image_size, num_image_sets, d_OneVector, d_temp_SM); // cutilSafeCall( hipMemcpy(h_temp_SM, d_output, num_image_sets*sizeof(float), hipMemcpyDeviceToHost) ); // for(int i=0;i<num_image_sets;i++){ print_and_log("d_output[%d] = %f\n", i, h_temp_SM[i]); } float beta = 0.0f; hipblasStatus_t status = hipblasSgemv(cublasHandle, HIPBLAS_OP_T, image_size, num_image_sets, &h_NormalizationFactor, d_output, image_size, d_OneVector, 1, &beta, d_temp_SM, 1); if (status != HIPBLAS_STATUS_SUCCESS){ print_and_log("!!!! hipblasSgemv execution error\n"); } cutilSafeCall( hipMemcpy(h_temp_SM, d_temp_SM, num_image_sets*sizeof(float), hipMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++) gradient_information[i] = (double)(h_temp_SM[i]); // float -> double conversion // for(int i=0;i<num_image_sets;i++){ print_and_log("GI[%d] = %f\n", i, gradient_information[i]); } } extern "C" void computeGradientInformation_StdNorm(float *d_grad1X, float *d_grad1Y, float *d_grad2X, float *d_grad2Y, float *d_output, int image_size , float *d_mask_weight, bool isSingleModality, double *gradient_information, int num_image_sets , hipblasHandle_t cublasHandle, float *d_temp_SM, float *h_temp_SM, float *d_OneVector, float h_NormalizationFactor) { /* // Note: d_grad1X, d_grad1Y and d_output have multiple image sets, while d_grad2X and d_grad2Y have a single image set. hipblasSdgmm(cublasHandle, HIPBLAS_SIDE_LEFT, image_size, num_image_sets, d_grad1X, image_size, d_mask_weight, 1, d_images, image_size); const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; hipLaunchKernelGGL(( computeGradientInformation_kernel), dim3(iDivUp(image_size*num_image_sets, b)), dim3(b), 0, 0, d_grad1X, d_grad1Y, d_grad2X, d_grad2Y, d_output, image_size, isSingleModality, d_mask_weight, num_image_sets); float beta = 0.0f; hipblasStatus_t status = hipblasSgemv(cublasHandle, HIPBLAS_OP_T, image_size, num_image_sets, &h_NormalizationFactor, d_output, image_size, d_OneVector, 1, &beta, d_temp_SM, 1); if (status != HIPBLAS_STATUS_SUCCESS){ print_and_log("!!!! hipblasSgemv execution error\n"); } cutilSafeCall( hipMemcpy(h_temp_SM, d_temp_SM, num_image_sets*sizeof(float), hipMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++) gradient_information[i] = (double)(h_temp_SM[i]); // float -> double conversion // for(int i=0;i<num_image_sets;i++){ print_and_log("GI[%d] = %f\n", i, gradient_information[i]); } */ } __global__ void computeGradientInformation_kernel(float *d_grad1X, float *d_grad1Y, float *d_grad2X, float *d_grad2Y, float *d_output, int image_size, bool isSingleModality , float *d_mask_weight, int num_image_sets, int exclusive_norm) { // image1 has multiple image sets, image2 has only one image set const int index = blockDim.x * blockIdx.x + threadIdx.x; if(index >= image_size*num_image_sets) return; const int index2 = index % image_size; const float epsilon = 1e-6; // compute inner product of gradient vectors float inner_product = (d_grad1X[index]*d_grad2X[index2]) + (d_grad1Y[index]*d_grad2Y[index2]); // compute product of norm float norm[2] = {sqrt( d_grad1X[index]*d_grad1X[index]+d_grad1Y[index]*d_grad1Y[index] ), sqrt( d_grad2X[index2]*d_grad2X[index2]+d_grad2Y[index2]*d_grad2Y[index2] )}; float norm_prod = norm[0] * norm[1]; float norm_angle_cos = inner_product / norm_prod; // compute angle between two gradient vector and compute weight // if norm_prod is small, weight is 0 float weight; if(norm_prod<epsilon) weight = 0; else if(norm_angle_cos>1.0 || norm_angle_cos<-1.0) weight = 1; else if(isSingleModality) weight = ( cos( acos(norm_angle_cos) ) + 1 ) / 2; // for single-modality registration (zero weight on 180 degrees) else weight = ( cos( 2*acos(norm_angle_cos) ) + 1 ) / 2; // for multi-modality registration (high weight on 180 degrees) // compute gradient angle if(d_mask_weight) weight *= d_mask_weight[index2]; // multiply mask (for the case when we exclude part of the image from contribution to the similarity metric) if(exclusive_norm >= 0) d_output[index] = weight * norm[exclusive_norm]; // if specified, we always use the norm else d_output[index] = weight * MIN( norm[0], norm[1] ); } extern "C" void computeSSIM(float *d_mu1_multi, float *d_sigma_sq1_multi, float *d_mu2_single, float *d_sigma_sq2_single, float *d_output_multi, int image_size , float *d_mask_weight, double *SSIM, int num_image_sets , hipblasHandle_t cublasHandle, float *d_temp_SSIM, float *h_temp_SSIM, float *d_OneVector, float h_DynamicRange) { // image1 has multiple image sets, image2 has only one image set const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; // d_output needs to be set as the covariance image prior to this function call float K1 = 0.01, K2 = 0.03; float C1 = (K1*h_DynamicRange)*(K1*h_DynamicRange), C2 = (K2*h_DynamicRange)*(K2*h_DynamicRange); // print_and_log("computeSSIM(), dynamic range: %f, C1: %f, C2: %f\n", h_DynamicRange, C1, C2); hipLaunchKernelGGL(( computeSSIM_kernel), dim3(iDivUp(image_size*num_image_sets, b)), dim3(b), 0, 0, d_mu1_multi, d_sigma_sq1_multi, d_mu2_single, d_sigma_sq2_single, d_output_multi, image_size, d_mask_weight, num_image_sets, C1, C2); // compute sum of each SSIM image // to parallelize the computation, we consider d_output as a matrix of (image_size x num_image_sets)' and multiply one vector of image_size element // computeImageSum_kernel<<<iDivUp(num_image_sets, b), b>>>(d_output, image_size, num_image_sets, d_temp_SSIM); float alpha = 1.0f, beta = 0.0f; hipblasStatus_t status = hipblasSgemv(cublasHandle, HIPBLAS_OP_T, image_size, num_image_sets, &alpha, d_output_multi, image_size, d_OneVector, 1, &beta, d_temp_SSIM, 1); if (status != HIPBLAS_STATUS_SUCCESS){ print_and_log("!!!! hipblasSgemv execution error\n"); } cutilSafeCall( hipMemcpy(h_temp_SSIM, d_temp_SSIM, num_image_sets*sizeof(float), hipMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++) SSIM[i] = (double)(h_temp_SSIM[i]); // float -> double conversion // for(int i=0;i<num_image_sets;i++){ print_and_log("SSIM[%d] = %f\n", i, SSIM[i]); } } __global__ void computeSSIM_kernel(float *d_mu1_multi, float *d_sigma_sq1_multi, float *d_mu2_single, float *d_sigma_sq2_single, float *d_output_multi, int image_size , float *d_mask_weight, int num_image_sets, float C1, float C2) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if(index >= image_size*num_image_sets) return; const int index2 = index % image_size; d_output_multi[index] = ( (2*d_mu1_multi[index]*d_mu2_single[index2]+C1) * (2*d_output_multi[index] + C2) )/ ( (d_mu1_multi[index]*d_mu1_multi[index]+d_mu2_single[index2]*d_mu2_single[index2] + C1) * (d_sigma_sq1_multi[index] + d_sigma_sq2_single[index2] + C2) ); if(d_mask_weight) d_output_multi[index] *= d_mask_weight[index2]; // d_output[index] = (2*d_mu1[index]*d_mu2[index]+C1)/ (d_mu1[index]*d_mu1[index]+d_mu2[index]*d_mu2[index] + C1); // d_output[index] = (2*d_output[index] + C2)/ (d_sigma_sq1[index] + d_sigma_sq2[index] + C2); } __global__ void computeImageSum_kernel(float *d_image, int image_size, int num_image_sets, float *d_weight, float *d_sum) { const int image_indx = blockDim.x * blockIdx.x + threadIdx.x; if(image_indx >= num_image_sets) return; float sum = 0, *start_ptr = d_image+image_indx*image_size, *end_ptr = start_ptr+image_size; for(float *ptr=start_ptr;ptr!=end_ptr;ptr++) sum += (*ptr); d_sum[image_indx] = sum*d_weight[image_indx]; } struct p_log_p : public thrust::unary_function<float, float> { __host__ __device__ float operator()(float a){ return (a == 0) ? 0 : -a*logf(a); } }; extern "C" float computeJointEntropy(float *d_data1, float *d_data2, int length, int *d_joint_hist, float *d_joint_pdf) { computeJointPDF(d_data1, d_data2, length, d_joint_hist, d_joint_pdf); thrust::device_vector<float> pdf_vec(MI_NUMBER_OF_BINS*MI_NUMBER_OF_BINS); thrust::copy_n( thrust::device_ptr<float>(d_joint_pdf), MI_NUMBER_OF_BINS*MI_NUMBER_OF_BINS, pdf_vec.begin() ); // make_transform_iterator could not be applied directly to device_ptr<float>, so we need to make a temporary copy... return thrust::reduce( make_transform_iterator( pdf_vec.begin(), p_log_p() ), make_transform_iterator( pdf_vec.end(), p_log_p() ), 0.0f, thrust::plus<float>() ); } extern "C" bool computeJointPDF(float *d_data1, float *d_data2, int length, int *d_joint_hist, float *d_joint_pdf) { const int block_size = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; dim3 dimBlock(block_size, 1); int block_num = iDivUp(length, block_size); int size_sq = static_cast<int>(ceil( sqrtf(static_cast<float>(block_num)) )); dim3 dimDataGrid(size_sq, iDivUp(block_num, size_sq), 1); // print_and_log("computeJointPDF(), length of data: %d, size of joint_pdf: %d, sqrt of size: %f(%d), grid = (%d, %d), block = (%d, %d)\n" // , length, MI_NUMBER_OF_BINS*MI_NUMBER_OF_BINS, sqrtf(block_num), size_sq, dimDataGrid.x, dimDataGrid.y, dimBlock.x, dimBlock.y); cutilSafeCall( hipMemset(d_joint_hist, 0, MI_NUMBER_OF_BINS*MI_NUMBER_OF_BINS*sizeof(int)) ); hipLaunchKernelGGL(( computeJointHistogramKernel), dim3(dimDataGrid), dim3(dimBlock), 0 , 0, d_data1, d_data2, length, d_joint_hist, MI_NUMBER_OF_BINS); // normalize histogram (divide each element by length) thrust::transform( thrust::device_ptr<int>(d_joint_hist), thrust::device_ptr<int>(d_joint_hist) + MI_NUMBER_OF_BINS*MI_NUMBER_OF_BINS, thrust::make_constant_iterator<float>(length), thrust::device_ptr<float>(d_joint_pdf), thrust::divides<float>()); return true; } extern "C" float computeEntropy(float *d_data, int length, int *d_hist, float *d_pdf) { computePDF(d_data, length, d_hist, d_pdf); thrust::device_vector<float> pdf_vec(MI_NUMBER_OF_BINS); thrust::copy_n( thrust::device_ptr<float>(d_pdf), MI_NUMBER_OF_BINS, pdf_vec.begin() ); // make_transform_iterator could not be applied directly to device_ptr<float>, so we need to make a temporary copy... return thrust::reduce( make_transform_iterator( pdf_vec.begin(), p_log_p() ), make_transform_iterator( pdf_vec.end(), p_log_p() ), 0.0f, thrust::plus<float>() ); } extern "C" bool computePDF(float *d_data, int length, int *d_hist, float *d_pdf) { const int block_size = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; dim3 dimBlock(block_size, 1); int block_num = iDivUp(length, block_size); int size_sq = static_cast<int>(ceil( sqrtf(static_cast<float>(block_num)) )); dim3 dimDataGrid(size_sq, iDivUp(block_num, size_sq), 1); cutilSafeCall( hipMemset(d_hist, 0, MI_NUMBER_OF_BINS*sizeof(int)) ); hipLaunchKernelGGL(( computeHistogramKernel), dim3(dimDataGrid), dim3(dimBlock), 0 , 0, d_data, length, d_hist, MI_NUMBER_OF_BINS); // normalize histogram (divide each element by length) thrust::transform( thrust::device_ptr<int>(d_hist), thrust::device_ptr<int>(d_hist) + MI_NUMBER_OF_BINS, thrust::make_constant_iterator<float>(length), thrust::device_ptr<float>(d_pdf), thrust::divides<float>()); return true; } __global__ void computeJointHistogramKernel(float *d_data1, float *d_data2, int length, int *d_joint_hist, int num_bins) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; // blockDim.y = 1 if(index < length && index>=0){ int accumulate_index = floor(d_data1[index]*num_bins)*num_bins+floor(d_data2[index]*num_bins); // d_data has to be normalized to [0 1] in advance if(accumulate_index < num_bins*num_bins) atomicAdd(d_joint_hist+accumulate_index, 1); } } __global__ void computeHistogramKernel(float *d_data, int length, int *d_hist, int num_bins) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; // blockDim.y = 1 if(index < length && index>=0){ int accumulate_index = floor(d_data[index]*num_bins); // d_data has to be normalized to [0 1] in advance if(accumulate_index < num_bins) atomicAdd(d_hist+accumulate_index, 1); } } __global__ void normalizeHistogramKernel(int *d_hist, float *d_pdf, int size, float denominator) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; // blockDim.y = 1 if(index < size) d_pdf[index] = (float)d_hist[index]/denominator; } //////////////////////////////////////////////////////////////////////////////// /// Position convolution kernel center at (0, 0) in the image //////////////////////////////////////////////////////////////////////////////// __global__ void padKernel_kernel(float *d_Dst, float *d_Src, int fftH, int fftW, int kernelH, int kernelW, int kernelY, int kernelX) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; if(y < kernelH && x < kernelW){ int ky = y - kernelY; if(ky < 0) ky += fftH; int kx = x - kernelX; if(kx < 0) kx += fftW; d_Dst[ky * fftW + kx] = LOAD_FLOAT(y * kernelW + x); } } //////////////////////////////////////////////////////////////////////////////// // Prepare data for "pad to border" addressing mode //////////////////////////////////////////////////////////////////////////////// __global__ void padDataClampToBorder_kernel(float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelH, int kernelW, int kernelY, int kernelX, int imageRepeat) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int total_x = blockDim.x * blockIdx.x + threadIdx.x; const int borderH = dataH + kernelY; const int borderW = dataW + kernelX; const int x = total_x % fftW; const int imageID = (total_x-x)/fftW; // division in 'int' if(imageID>=imageRepeat || imageID<0) return; if(y < fftH && x < fftW){ const int dy = (y<dataH) ? y : ( (y>=dataH && y<borderH) ? (dataH-1) : 0 ); const int dx = (x<dataW) ? x : ( (x>=dataW && x<borderW) ? (dataW-1) : 0 ); d_Dst[y * fftW + x + fftH*fftW*imageID] = LOAD_FLOAT(dy * dataW + dx + dataH*dataW*imageID); //d_Dst[y * fftW + x + fftH*fftW*imageID] = 0; // LOAD_FLOAT(dy * dataW + dx + dataH*dataW*imageID); } } __global__ void fillEdges_kernel(float *d_Dst, float *d_Ref, int fftH, int fftW, int dataH, int dataW, int kernelH, int kernelW, int kernelY, int kernelX, int imageRepeat, float threshold) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int total_x = blockDim.x * blockIdx.x + threadIdx.x; const int borderH = dataH + kernelY; const int borderW = dataW + kernelX; const int x = total_x % fftW; const int imageID = (total_x-x)/fftW; // division in 'int' if(imageID>=imageRepeat || imageID<0) return; if(y < fftH && x < fftW){ const int dy = (y<dataH) ? y : ( (y>=dataH && y<borderH) ? (dataH-1) : 0 ); const int dx = (x<dataW) ? x : ( (x>=dataW && x<borderW) ? (dataW-1) : 0 ); if(d_Ref[dy * dataW + dx + dataH*dataW*imageID]<threshold){ // if the pixel in the reference image is smaller than threshold, fill (kernelH x kernenW) pixels around the pixel for(int i=-kernelX;i<=kernelX;i++){ for(int j=-kernelY;j<=kernelY;j++){ if((y+j>=0) && (y+j<fftH) && (x+i>=0) && (x+i<fftW)) d_Dst[(y+j) * fftW + (x+i) + fftH*fftW*imageID] = 0; } } // for(int i=-0;i<=0;i++) for(int j=-0;j<=0;j++) d_Dst[(y+j) * fftW + (x+i) + fftH*fftW*imageID] = 0; } } } __global__ void unPadDataClampToBorder_kernel(float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelY, int kernelX, int imageRepeat) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int total_x = blockDim.x * blockIdx.x + threadIdx.x; const int borderH = dataH + kernelY; const int borderW = dataW + kernelX; const int x = total_x % dataW; const int imageID = (total_x-x)/dataW; // division in 'int' if(imageID>=imageRepeat || imageID<0) return; if(y < dataH && x < dataW){ const int dy = (y<dataH) ? y : ( (y>=dataH && y<borderH) ? (dataH-1) : 0 ); const int dx = (x<dataW) ? x : ( (x>=dataW && x<borderW) ? (dataW-1) : 0 ); d_Dst[dy*dataW+dx + dataH*dataW*imageID] = LOAD_FLOAT(y*fftW+x + fftH*fftW*imageID); } } //////////////////////////////////////////////////////////////////////////////// /// Position convolution kernel center at (0, 0) in the image //////////////////////////////////////////////////////////////////////////////// extern "C" void padKernel(float *d_Dst, float *d_Src, int fftH, int fftW, int kernelH, int kernelW, int kernelY, int kernelX) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(kernelW, threads.x), iDivUp(kernelH, threads.y)); SET_FLOAT_BASE; hipLaunchKernelGGL(( padKernel_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Src, fftH, fftW, kernelH, kernelW, kernelY, kernelX); } //////////////////////////////////////////////////////////////////////////////// // Prepare data for "pad to border" addressing mode //////////////////////////////////////////////////////////////////////////////// extern "C" void padDataClampToBorder(float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelW, int kernelH, int kernelY, int kernelX, int imageRepeat) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(fftW*imageRepeat, threads.x), iDivUp(fftH, threads.y)); // print_and_log("padDataClampToBorder, grid size: (%dx%d), block size: (%dx%d), fftH: %d, fftW: %d\n", grid.x, grid.y, threads.x, threads.y, fftH, fftW); // print_and_log("dataH: %d, dataW: %d, kernelH: %d, kernelW: %d, kernelY: %d, kernelX: %d, imageRepeat: %d\n", dataH, dataW, kernelH, kernelW, kernelY, kernelX, imageRepeat); SET_FLOAT_BASE; // When we use texture for small image (less than 256x256 maybe), this line causes crashes, for some reason... hipLaunchKernelGGL(( padDataClampToBorder_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Src, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, imageRepeat); } extern "C" void fillEdges(float *d_Dst, float *d_Ref, int fftH, int fftW, int dataH, int dataW, int kernelW, int kernelH, int kernelY, int kernelX, int imageRepeat, float threshold) { assert(d_Ref != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(fftW*imageRepeat, threads.x), iDivUp(fftH, threads.y)); // print_and_log("fillEdges, grid size: (%dx%d), block size: (%dx%d)\n", grid.x, grid.y, threads.x, threads.y); SET_FLOAT_BASE; // When we use texture for small image (less than 256x256 maybe), this line causes crashes, for some reason... hipLaunchKernelGGL(( fillEdges_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Ref, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, imageRepeat, threshold); } extern "C" void unPadDataClampToBorder(float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelY, int kernelX, int imageRepeat) { dim3 threads(32, 8); dim3 grid(iDivUp(dataW*imageRepeat, threads.x), iDivUp(dataH, threads.y)); // print_and_log("unPadDataClampToBorder, grid size: (%dx%d), block size: (%dx%d)\n", grid.x, grid.y, threads.x, threads.y); SET_FLOAT_BASE; hipLaunchKernelGGL(( unPadDataClampToBorder_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Src, fftH, fftW, dataH, dataW, kernelY, kernelX, imageRepeat); } //////////////////////////////////////////////////////////////////////////////// // Modulate Fourier image of padded data by Fourier image of padded kernel // and normalize by FFT size //////////////////////////////////////////////////////////////////////////////// __global__ void modulateAndNormalize_kernel(fComplex *d_Dst, fComplex *d_Src, int dataSize, int imageRepeat, float c) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= (dataSize*imageRepeat)) return; fComplex a = d_Src[i%dataSize]; // the same d_Src is repeatedly multiplied to d_Dst fComplex b = d_Dst[i]; mulAndScale(a, b, c); d_Dst[i] = a; } extern "C" void modulateAndNormalize(fComplex *d_Dst, fComplex *d_Src, int fftH, int fftW, int imageRepeat, int padding) { assert( fftW % 2 == 0 ); const int dataSize = fftH * (fftW / 2 + padding); const int b = 256; //BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; hipLaunchKernelGGL(( modulateAndNormalize_kernel), dim3(iDivUp(dataSize*imageRepeat, b)), dim3(b), 0, 0, d_Dst, d_Src, dataSize, imageRepeat, 1.0f / (float)(fftW * fftH) ); } extern "C" void normalizeImages(float *d_images, int size, float norm_max, float norm_min) { if(norm_max == 0 && norm_min == 0){ // compute maximum and minimum if needed ComputeMaxMin(d_images, size, &norm_max, &norm_min); // print_and_log("(min,max) = (%f, %f)\n", norm_min, norm_max); } // normalize images NormalizeData(d_images, size, norm_max, norm_min); } extern "C" void maskImages( float *d_images, float *d_mask_weight, int image_size, int num_image_sets, hipblasHandle_t cublasHandle ) { hipblasSdgmm(cublasHandle, HIPBLAS_SIDE_LEFT, image_size, num_image_sets, d_images, image_size, d_mask_weight, 1, d_images, image_size); } extern "C" int countZeroPixels(float *d_image, int image_size) { return thrust::count( thrust::device_ptr<float>(d_image), thrust::device_ptr<float>(d_image)+image_size, 0.0f); }
4c533487f4ad0bd1c6109b81ae79de1bc88f87da.cu
#include <driver_types.h> #include "RegTools.h" #include <cufft.h> #define _USE_MATH_DEFINES #include "math.h" // for M_PI_2, cos, sin #include <float.h> // thrust functions // important note: thrust is not compatible with multi-thread environment, thus not compativle with multi-GPU in this library #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/fill.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/count.h> #include <assert.h> #define USE_TEXTURE 0 // When we use texture for small image (less than 256x256 maybe), padDataClampToBorder() (SET_FLOAT_BASE) crashes, for some reason #if(USE_TEXTURE) texture<float, 1, cudaReadModeElementType> texFloat; #define LOAD_FLOAT(i) tex1Dfetch(texFloat, i) #define SET_FLOAT_BASE cutilSafeCall( cudaBindTexture(0, texFloat, d_Src) ) #else #define LOAD_FLOAT(i) d_Src[i] #define SET_FLOAT_BASE #endif extern FILE *m_LogFile; //////////////////////////////////////////////////////////////////////////////// // 2D R2C / C2R post/preprocessing kernels //////////////////////////////////////////////////////////////////////////////// #if(USE_TEXTURE) texture<fComplex, 1, cudaReadModeElementType> texComplexA; texture<fComplex, 1, cudaReadModeElementType> texComplexB; #define LOAD_FCOMPLEX(i) tex1Dfetch(texComplexA, i) #define LOAD_FCOMPLEX_A(i) tex1Dfetch(texComplexA, i) #define LOAD_FCOMPLEX_B(i) tex1Dfetch(texComplexB, i) #define SET_FCOMPLEX_BASE cutilSafeCall( cudaBindTexture(0, texComplexA, d_Src) ) #define SET_FCOMPLEX_BASE_A cutilSafeCall( cudaBindTexture(0, texComplexA, d_SrcA) ) #define SET_FCOMPLEX_BASE_B cutilSafeCall( cudaBindTexture(0, texComplexB, d_SrcB) ) #else #define LOAD_FCOMPLEX(i) d_Src[i] #define LOAD_FCOMPLEX_A(i) d_SrcA[i] #define LOAD_FCOMPLEX_B(i) d_SrcB[i] #define SET_FCOMPLEX_BASE #define SET_FCOMPLEX_BASE_A #define SET_FCOMPLEX_BASE_B #endif extern "C" int snapTransformSize(int dataSize){ int hiBit; unsigned int lowPOT, hiPOT; dataSize = iAlignUp(dataSize, 16); for(hiBit = 31; hiBit >= 0; hiBit--) if(dataSize & (1U << hiBit)) break; lowPOT = 1U << hiBit; if(lowPOT == dataSize) return dataSize; hiPOT = 1U << (hiBit + 1); if(hiPOT <= 1024) return hiPOT; else return iAlignUp(dataSize, 512); } extern "C" void computeKernelSpectrumGPU(float *h_Kernel, int dataH, int dataW, int kernelH, int kernelW, int kernelY, int kernelX , fComplex *d_KernelSpectrum, cufftHandle fftPlanFwd) { // this function is copied from CUDA SDK (convolutionFFT2D) and modified by Yoshito Otake // Note: make sure that there is no 'bad' number in d_Data (e.g., Inf, Nan, etc.) // if there is any 'bad' number, it breaks the entire result. const int fftH = snapTransformSize(dataH + kernelH - 1); const int fftW = snapTransformSize(dataW + kernelW - 1); float *d_Kernel, *d_PaddedKernel; cutilSafeCall( cudaMalloc(&(d_Kernel), kernelH*kernelW*sizeof(float)) ); cutilSafeCall( cudaMalloc(&(d_PaddedKernel), fftH*fftW*sizeof(float)) ); // fComplex *d_KernelSpectrum = temp_spectrum+ (fftH * (fftW / 2 + 1)); //print_and_log("kernelH: %d, kernelW: %d, fftH: %d, fftW: %d\n", kernelH, kernelW, fftH, fftW); cutilSafeCall( cudaMemcpy(d_Kernel, h_Kernel, kernelH * kernelW * sizeof(float), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemset(d_PaddedKernel, 0, fftH * fftW * sizeof(float)) ); #if defined RegTools_VERBOSE_MESSAGE print_and_log("h_Kernel:\n"); for(int i=0;i<kernelW;i++){ for(int j=0;j<kernelH;j++) print_and_log("%f ", h_Kernel[i*kernelH+j]); print_and_log("\n"); } #endif padKernel( d_PaddedKernel, d_Kernel, fftH, fftW, kernelH, kernelW, kernelY, kernelX ); #if defined RegTools_VERBOSE_MESSAGE print_and_log("h_PaddedKernel:\n"); float *h_PaddedKernel = new float[fftH*fftW]; cudaMemcpy(h_PaddedKernel, d_PaddedKernel, fftH*fftW*sizeof(float), cudaMemcpyDeviceToHost); for(int i=0;i<fftW;i++){ for(int j=0;j<fftH;j++) print_and_log("%f ", h_PaddedKernel[i*fftH+j]); print_and_log("\n"); } delete[] h_PaddedKernel; #endif int error; if( (error=cufftExecR2C(fftPlanFwd, (cufftReal *)d_PaddedKernel, (cufftComplex *)d_KernelSpectrum)) != CUFFT_SUCCESS) { print_and_log("ERROR at cufftExecR2C in computeKernelSpectrumGPU (6 means CUFFT_EXEC_FAILED), error code: %d\n", error); } cutilSafeCall( cudaFree( d_Kernel ) ); cutilSafeCall( cudaFree( d_PaddedKernel ) ); } extern "C" unsigned int getKernelSize(float sigma) { // compute kernel size based on sigma const float epsilon = 1e-2; return ceil(sigma*sqrt(-2*log(sqrt(2*M_PI)*sigma*epsilon))); } extern "C" bool computeGaussianGradientGPUMulti(float *d_input_images, int *dim, int num_image_sets, float *d_output_imagesX, float *d_output_imagesY, float sigma , fComplex *d_x_kernel_spectrum, fComplex *d_y_kernel_spectrum, float *temp_padded, fComplex *temp_spectrum , cufftHandle fftPlanManyFwd, cufftHandle fftPlanManyInv, float GI_threshold ) { const unsigned int kernelY = getKernelSize(sigma), kernelX = kernelY; const unsigned int kernelH = 2*kernelY+1, kernelW = kernelH; const int fftH = snapTransformSize(dim[1] + kernelH - 1), dataH = dim[1]; const int fftW = snapTransformSize(dim[0] + kernelW - 1), dataW = dim[0]; const int spectrum_size = fftH*(fftW/2+1), padded_size = fftH*fftW; const int y_offset = dim[2]*num_image_sets; // FFT/iFFT for all images simultaneously (fftPlanMany needs to be created for exactly the same number of images) padDataClampToBorder( temp_padded, d_input_images, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets ); // Note: the following FFT fails if input image contains NaN (even only one pixel!) cufftSafeCall( cufftExecR2C( fftPlanManyFwd, (cufftReal *)temp_padded, (cufftComplex *)temp_spectrum ) ); cutilSafeCall( cudaMemcpy(temp_spectrum+spectrum_size*y_offset, temp_spectrum, spectrum_size*y_offset*sizeof(fComplex), cudaMemcpyDeviceToDevice) ); // modulate all spectrums modulateAndNormalize(temp_spectrum, d_x_kernel_spectrum, fftH, fftW, dim[2]*num_image_sets, 1); modulateAndNormalize(temp_spectrum+spectrum_size*y_offset, d_y_kernel_spectrum, fftH, fftW, dim[2]*num_image_sets, 1); // print_and_log("computeGaussianGradientGPUMulti(), dim: %dx%dx%d, fft size: %dx%d, kernelXY: %dx%d, kernelWH: %dx%d, spectrum_size: %d, padded_size: %d, fftPlanManyInv: %d, GI_threshold: %f\n", // dim[0], dim[1], dim[2], fftW, fftH, kernelX, kernelY, kernelW, kernelH, spectrum_size, padded_size, fftPlanManyInv, GI_threshold); // inverse FFT cufftSafeCall( cufftExecC2R(fftPlanManyInv, (cufftComplex *)temp_spectrum, (cufftReal *)temp_padded) ); #if defined RegTools_GI_BACKGROUND_EDGE_CHECK // fill zero in (kernelW x kernelH) pixels of the gradient image around the pixel which has an intensity lower than threshold value in the original image fillEdges( temp_padded, d_input_images, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets, GI_threshold ); fillEdges( temp_padded+padded_size*y_offset, d_input_images, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets, GI_threshold ); #endif // unPad all images unPadDataClampToBorder(d_output_imagesX, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2]*num_image_sets); unPadDataClampToBorder(d_output_imagesY, temp_padded+padded_size*y_offset, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2]*num_image_sets); return true; } struct sqrt_inverse_functor { __host__ __device__ float operator()(const float &x) const { return 1/sqrt(x); } }; struct square_functor { __host__ __device__ float operator()(const float &x) const { return x*x; } }; __global__ void apply_floating_mask_kernel(float *d_images, float *d_floating_mask, int size) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; if(d_floating_mask[index]<1e-6) d_images[index] = 0; } __global__ void square_functor_kernel(float *d_input, float *d_output, int size) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; d_output[index] = d_input[index] * d_input[index]; } __global__ void sqrt_inverse_functor_kernel(float *d_input, float *d_output, int size) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; d_output[index] = 1/sqrt(d_input[index]); } extern "C" bool zeromean_stddivide_Images(float *d_images, float *d_temp, int image_size, int num_image_sets, cublasHandle_t cublasHandle, float *d_mean_std, float *d_OneVector, float h_NormalizationFactor, float *d_mask_weight, float *d_floating_mask) { float alpha, beta; // (OPTIONAL) apply mask to each image (column). Note that mask_weight is single image set (single column vector) //print_and_log("zeromean_stddivide_Images(), pass0, d_images: %d, d_temp: %d, image_size: %d, num_image_sets: %d, cublasHandle: %d\n", d_images, d_temp, image_size, num_image_sets, cublasHandle); if(d_mask_weight) cublasSdgmm(cublasHandle, CUBLAS_SIDE_LEFT, image_size, num_image_sets, d_images, image_size, d_mask_weight, 1, d_images, image_size); // compute mean of each image set (dim[2] images are considered as one set) beta = 0.0; cublasSgemv(cublasHandle, CUBLAS_OP_T, image_size, num_image_sets, &h_NormalizationFactor, d_images, image_size, d_OneVector, 1, &beta, d_mean_std, 1); //print_and_log("zeromean_stddivide_Images(), pass1\n"); // subtract mean from each image set (column-wise subtraction) alpha = -1.0f; beta = 1.0; cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, image_size, num_image_sets, 1, &alpha, d_OneVector, image_size, d_mean_std, 1, &beta, d_images, image_size); // is this correct? maybe below? (d_mean_std and d_OneVector should be opposite order) //cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, image_size, num_image_sets, 1, &alpha, d_mean_std, image_size, d_OneVector, 1, &beta, d_images, image_size); //print_and_log("zeromean_stddivide_Images(), pass2, image_size: %d, num_image_sets: %d\n", image_size, num_image_sets); const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; // apply floating mask if needed (set zero for the pixels with d_floating_mask>0) if(d_floating_mask) apply_floating_mask_kernel << <iDivUp(image_size*num_image_sets, b), b >> > (d_images, d_floating_mask, image_size*num_image_sets); // compute std of each image set (column-wise) //thrust::transform(thrust::device_ptr<float>(d_images), thrust::device_ptr<float>(d_images)+image_size*num_image_sets, thrust::device_ptr<float>(d_temp), square_functor()); square_functor_kernel << <iDivUp(image_size*num_image_sets, b), b >> > (d_images, d_temp, image_size*num_image_sets); //print_and_log("zeromean_stddivide_Images(), pass3\n"); alpha = 1.0f; beta = 0.0; cublasSgemv(cublasHandle, CUBLAS_OP_T, image_size, num_image_sets, &alpha, d_temp, image_size, d_OneVector, 1, &beta, d_mean_std, 1); //print_and_log("zeromean_stddivide_Images(), pass4\n"); //thrust::transform(thrust::device_ptr<float>(d_mean_std), thrust::device_ptr<float>(d_mean_std)+num_image_sets, thrust::device_ptr<float>(d_mean_std), sqrt_inverse_functor()); sqrt_inverse_functor_kernel << <iDivUp(num_image_sets, b), b >> > (d_mean_std, d_mean_std, num_image_sets); // devide each image set by std (column-wise division) cublasSdgmm(cublasHandle, CUBLAS_SIDE_RIGHT, image_size, num_image_sets, d_images, image_size, d_mean_std, 1, d_images, image_size); /* float *h_mean_std = new float[num_image_sets]; cutilSafeCall( cudaMemcpy(h_mean_std, d_mean_std, num_image_sets*sizeof(float), cudaMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++){ print_and_log("d_images[%d] mean: %f\n", i, h_mean_std[i]); } delete[] h_mean_std; */ return true; } extern "C" bool computeNormalizedCrossCorrelation_Pixels(float *d_images1_zeromean_multi, float *d_images2_zeromean_single, int image_size, int num_image_sets, cublasHandle_t cublasHandle) { // (numerator) compute normalized covariance (pixel-wise multiplication between images1 and images2) // overwrite onto images1 cublasSdgmm(cublasHandle, CUBLAS_SIDE_LEFT, image_size, num_image_sets, d_images1_zeromean_multi, image_size, d_images2_zeromean_single, 1, d_images1_zeromean_multi, image_size); return true; } extern "C" bool computeMeanSquaredError(float *d_images1_multi, float *d_images2_single, float *d_OneVector, double *MSE, int image_size, int num_image_sets, float *d_temp_MSE, float *h_temp_MSE, cublasHandle_t cublasHandle) { // subtract images2_single from each image set in d_images1_multi (column-wise subtraction) float alpha = -1.0f, beta = 1.0f; // cudaMemcpy(d_images1_multi, d_images2_single, image_size*sizeof(float), cudaMemcpyDeviceToDevice); cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, image_size, num_image_sets, 1, &alpha, d_images2_single, image_size, d_OneVector, 1, &beta, d_images1_multi, image_size); // square d_images1_multi //thrust::transform(thrust::device_ptr<float>(d_images1_multi), thrust::device_ptr<float>(d_images1_multi)+image_size*num_image_sets, thrust::device_ptr<float>(d_images1_multi), square_functor()); const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; square_functor_kernel << <iDivUp(image_size*num_image_sets, b), b >> > (d_images1_multi, d_images1_multi, image_size*num_image_sets); // average over each image alpha = 1.0f/(float)image_size; beta = 0.0; cublasSgemv(cublasHandle, CUBLAS_OP_T, image_size, num_image_sets, &alpha, d_images1_multi, image_size, d_OneVector, 1, &beta, d_temp_MSE, 1); // copy the results back to host cutilSafeCall( cudaMemcpy(h_temp_MSE, d_temp_MSE, num_image_sets*sizeof(float), cudaMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++) MSE[i] += (double)(h_temp_MSE[i]); // float -> double conversion (result is accumulated. make sure zeroing the initial array) return true; } struct absolute_value_functor { __host__ __device__ float operator()(const float &x) const { return fabs(x); } }; extern "C" bool computeNormalizedCrossCorrelation_Sum(float *d_images1_multi, float *d_images2_multi, float *d_output_multi, double *NCC, int image_size, int num_image_sets , cublasHandle_t cublasHandle, float *d_temp_NCC, float *h_temp_NCC, float *d_OneVector) { if(d_images2_multi){ float alpha = 0.5f; // pixel-wise mean (for Gradient Correlation) cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, image_size, num_image_sets, &alpha, d_images1_multi, image_size, &alpha, d_images2_multi, image_size, d_output_multi, image_size); // compute absolute value // thrust::transform( thrust::device_ptr<float>(d_output_multi), thrust::device_ptr<float>(d_output_multi)+image_size*num_image_sets, // thrust::device_ptr<float>(d_output_multi), absolute_value_functor() ); } else if(d_images1_multi){ // just memory copy (for NCC) cutilSafeCall( cudaMemcpy(d_output_multi, d_images1_multi, num_image_sets*image_size*sizeof(float), cudaMemcpyDeviceToDevice) ); } // summation (column-wise) float alpha = 1.0f, beta = 0.0f; cublasSgemv(cublasHandle, CUBLAS_OP_T, image_size, num_image_sets, &alpha, d_output_multi, image_size, d_OneVector, 1, &beta, d_temp_NCC, 1); // copy the results back to host // print_and_log("computeNormalizedCrossCorrelation_Sum(), d_temp_NCC: %d\n", d_temp_NCC); // for (int i = 0; i < num_image_sets; i++) print_and_log("%f, ", h_temp_NCC[i]); // print_and_log("\n"); //cutilSafeCall(cudaMemcpy(d_temp_NCC, h_temp_NCC, num_image_sets * sizeof(float), cudaMemcpyHostToDevice)); cutilSafeCall( cudaMemcpy(h_temp_NCC, d_temp_NCC, num_image_sets*sizeof(float), cudaMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++) NCC[i] += (double)(h_temp_NCC[i]); // float -> double conversion (result is accumulated. make sure zeroing the initial array) // for(int i=0;i<num_image_sets;i++){ print_and_log("NCC[%d]: %f\n", i, NCC[i]); } return true; } struct subtract_square_functor { __host__ __device__ float operator()(const float &x, const float &y) const { return (x-y)*(x-y); } }; __global__ void subtract_square_functor_kernel(float *d_input1, float *d_input2, float *d_output, int size) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; d_output[index] = (d_input1[index]-d_input2[index]) * (d_input1[index] - d_input2[index]); } extern "C" bool computeGaussianGPUMulti(float *d_input_images, int *dim, int num_image_sets, float *d_output_mu, float *d_output_sigma_sq, float sigma , fComplex *d_kernel_spectrum, float *temp_padded, fComplex *temp_spectrum , cufftHandle fftPlanManyFwd, cufftHandle fftPlanManyInv ) { const unsigned int kernelY = getKernelSize(sigma), kernelX = kernelY; const unsigned int kernelH = 2*kernelY+1, kernelW = kernelH; const int fftH = snapTransformSize(dim[1] + kernelH - 1), dataH = dim[1]; const int fftW = snapTransformSize(dim[0] + kernelW - 1), dataW = dim[0]; const int spectrum_size = fftH*(fftW/2+1), padded_size = fftH*fftW; const int total_size = dim[0]*dim[1]*dim[2]*num_image_sets; // compute mean images // FFT/iFFT for all images simultaneously (fftPlanMany needs to be created for exactly the same number of images) // Note: the following FFT fails if input image contains NaN (even only one pixel!) // compute mu images padDataClampToBorder( temp_padded, d_input_images, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets ); cufftSafeCall( cufftExecR2C( fftPlanManyFwd, (cufftReal *)temp_padded, (cufftComplex *)temp_spectrum ) ); modulateAndNormalize(temp_spectrum, d_kernel_spectrum, fftH, fftW, dim[2]*num_image_sets, 1); cufftSafeCall( cufftExecC2R(fftPlanManyInv, (cufftComplex *)temp_spectrum, (cufftReal *)temp_padded) ); unPadDataClampToBorder(d_output_mu, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2]*num_image_sets); // compute sigma images (gaussian filter after mean subtraction and square. // thrust::transform( thrust::device_ptr<float>(d_input_images), thrust::device_ptr<float>(d_input_images)+total_size, // thrust::device_ptr<float>(d_output_mu), thrust::device_ptr<float>(d_output_sigma_sq), subtract_square_functor() ); const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; subtract_square_functor_kernel << <iDivUp(total_size, b), b >> > (d_input_images, d_output_mu, d_output_sigma_sq, total_size); padDataClampToBorder( temp_padded, d_output_sigma_sq, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets ); cufftSafeCall( cufftExecR2C( fftPlanManyFwd, (cufftReal *)temp_padded, (cufftComplex *)temp_spectrum ) ); // fft modulateAndNormalize(temp_spectrum, d_kernel_spectrum, fftH, fftW, dim[2]*num_image_sets, 1); cufftSafeCall( cufftExecC2R(fftPlanManyInv, (cufftComplex *)temp_spectrum, (cufftReal *)temp_padded) ); // inverse fft unPadDataClampToBorder(d_output_sigma_sq, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2]*num_image_sets); // print_and_log("computeGaussianGPUMulti(), dim: %dx%dx%d, fft size: %dx%d, kernelXY: %dx%d, kernelWH: %dx%d, spectrum_size: %d, padded_size: %d, fftPlanManyInv: %d, total_size: %d\n", // dim[0], dim[1], dim[2], fftW, fftH, kernelX, kernelY, kernelW, kernelH, spectrum_size, padded_size, fftPlanManyInv, total_size); return true; } struct sqrt_op { __host__ __device__ float operator()(const float& x) const { return x < 1e-8f ? 0.0f : sqrt(x); } }; struct checked_div { __host__ __device__ float operator()(const float& numerator, const float& denominator) const { return abs(denominator) < 1e-8f ? 0.0f : numerator / denominator; } }; extern "C" bool computeLocalContrastNormalizationGPUMulti(float *d_input_images, int *dim, int num_image_sets, float *d_output_centered, float *d_output_std, float *d_output, float sigma , fComplex *d_kernel_spectrum, float *temp_padded, fComplex *temp_spectrum , cufftHandle fftPlanManyFwd, cufftHandle fftPlanManyInv) { float *d_temp_std = d_output_std ? d_output_std : d_output; // d_output_std is just for debugging. set to NULL if no std image output is necessary const unsigned int kernelY = getKernelSize(sigma), kernelX = kernelY; const unsigned int kernelH = 2 * kernelY + 1, kernelW = kernelH; const int fftH = snapTransformSize(dim[1] + kernelH - 1), dataH = dim[1]; const int fftW = snapTransformSize(dim[0] + kernelW - 1), dataW = dim[0]; const int spectrum_size = fftH*(fftW / 2 + 1), padded_size = fftH*fftW; const int total_size = dim[0] * dim[1] * dim[2] * num_image_sets; // compute centered images // FFT/iFFT for all images simultaneously (fftPlanMany needs to be created for exactly the same number of images) // Note: the following FFT fails if input image contains NaN (even only one pixel!) // compute mu images padDataClampToBorder(temp_padded, d_input_images, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2] * num_image_sets); cufftSafeCall(cufftExecR2C(fftPlanManyFwd, (cufftReal *)temp_padded, (cufftComplex *)temp_spectrum)); modulateAndNormalize(temp_spectrum, d_kernel_spectrum, fftH, fftW, dim[2] * num_image_sets, 1); cufftSafeCall(cufftExecC2R(fftPlanManyInv, (cufftComplex *)temp_spectrum, (cufftReal *)temp_padded)); unPadDataClampToBorder(d_output_centered, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2] * num_image_sets); thrust::transform(thrust::device_ptr<float>(d_input_images), thrust::device_ptr<float>(d_input_images) + total_size, thrust::device_ptr<float>(d_output_centered), thrust::device_ptr<float>(d_output_centered), thrust::minus<float>()); // compute std images (gaussian filtering on squared centered images). thrust::transform(thrust::device_ptr<float>(d_output_centered), thrust::device_ptr<float>(d_output_centered) + total_size, thrust::device_ptr<float>(d_temp_std), thrust::square<float>()); padDataClampToBorder(temp_padded, d_temp_std, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2] * num_image_sets); cufftSafeCall(cufftExecR2C(fftPlanManyFwd, (cufftReal *)temp_padded, (cufftComplex *)temp_spectrum)); // fft modulateAndNormalize(temp_spectrum, d_kernel_spectrum, fftH, fftW, dim[2] * num_image_sets, 1); cufftSafeCall(cufftExecC2R(fftPlanManyInv, (cufftComplex *)temp_spectrum, (cufftReal *)temp_padded)); // inverse fft unPadDataClampToBorder(d_temp_std, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2] * num_image_sets); thrust::transform(thrust::device_ptr<float>(d_temp_std), thrust::device_ptr<float>(d_temp_std) + total_size, thrust::device_ptr<float>(d_temp_std), sqrt_op()); thrust::transform(thrust::device_ptr<float>(d_output_centered), thrust::device_ptr<float>(d_output_centered) + total_size, thrust::device_ptr<float>(d_temp_std), thrust::device_ptr<float>(d_output), checked_div()); // thrust::transform(thrust::device_ptr<float>(d_input_images), thrust::device_ptr<float>(d_input_images) + total_size, // thrust::device_ptr<float>(d_input_images), thrust::device_ptr<float>(d_output), checked_div()); // print_and_log("computeLocalContrastNormalizationGPUMulti(), dim: %dx%dx%d, fft size: %dx%d, kernelXY: %dx%d, kernelWH: %dx%d, spectrum_size: %d, padded_size: %d, fftPlanManyInv: %d, total_size: %d\n", // dim[0], dim[1], dim[2], fftW, fftH, kernelX, kernelY, kernelW, kernelH, spectrum_size, padded_size, fftPlanManyInv, total_size); return true; } extern "C" bool computeCovarianceGPUMulti(float *d_input_images1_multi, float *d_mu1_multi, float *d_input_images2_single, float *d_mu2_single, int *dim, int num_image_sets , float *d_output_images_multi, float sigma, fComplex *d_kernel_spectrum, float *temp_padded, fComplex *temp_spectrum , cufftHandle fftPlanManyFwd, cufftHandle fftPlanManyInv ) { const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; const int image_size = dim[0]*dim[1]*dim[2]; computeCovariance_kernel<<<iDivUp(image_size*num_image_sets, b), b>>> (d_input_images1_multi, d_mu1_multi, d_input_images2_single, d_mu2_single, d_output_images_multi, image_size, num_image_sets); const unsigned int kernelY = getKernelSize(sigma), kernelX = kernelY; const unsigned int kernelH = 2*kernelY+1, kernelW = kernelH; const int fftH = snapTransformSize(dim[1] + kernelH - 1), dataH = dim[1]; const int fftW = snapTransformSize(dim[0] + kernelW - 1), dataW = dim[0]; const int spectrum_size = fftH*(fftW/2+1), padded_size = fftH*fftW; // gaussian filter (local sum) padDataClampToBorder( temp_padded, d_output_images_multi, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, dim[2]*num_image_sets ); cufftSafeCall( cufftExecR2C( fftPlanManyFwd, (cufftReal *)temp_padded, (cufftComplex *)temp_spectrum ) ); // fft modulateAndNormalize(temp_spectrum, d_kernel_spectrum, fftH, fftW, dim[2]*num_image_sets, 1); // one kernel spectrum is repeatedly mulplied to a set of images cufftSafeCall( cufftExecC2R(fftPlanManyInv, (cufftComplex *)temp_spectrum, (cufftReal *)temp_padded) ); // inverse fft unPadDataClampToBorder(d_output_images_multi, temp_padded, fftH, fftW, dataH, dataW, kernelY, kernelX, dim[2]*num_image_sets); // print_and_log("computeCovarianceGPUMulti(), dim: %dx%dx%d, fft size: %dx%d, kernelXY: %dx%d, kernelWH: %dx%d, spectrum_size: %d, padded_size: %d, fftPlanManyInv: %d\n", // dim[0], dim[1], dim[2], fftW, fftH, kernelX, kernelY, kernelW, kernelH, spectrum_size, padded_size, fftPlanManyInv); return true; } __global__ void computeCovariance_kernel(float *d_img1_multi, float *d_mu1_multi, float *d_img2_single, float *d_mu2_single, float *d_output_multi, int image_size, int num_image_sets) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if(index >= image_size*num_image_sets) return; const int index2 = index % image_size; d_output_multi[index] = (d_img2_single[index2]-d_mu2_single[index2])*(d_img1_multi[index]-d_mu1_multi[index]); } extern "C" value_type gauss(value_type x, value_type sigma) { // Gaussian return exp(-x*x/(2*sigma*sigma)) / (sigma*sqrt(2*M_PI)); } extern "C" value_type dgauss(value_type x, value_type sigma) { // first order derivative of Gaussian return -x * gauss(x,sigma) / (sigma*sigma); } extern "C" int getGaussianGradientKernel(const value_type sigma, const int kernel_size, value_type *h_kernel) { // we assume kernel_size is odd number, which is computed by something like the following // epsilon=1e-2; // halfsize=ceil(sigma*sqrt(-2*log(sqrt(2*pi)*sigma*epsilon))); // size=2*halfsize+1; int half_size = (kernel_size-1)/2; value_type sum = 0.0; for(int i=0;i<kernel_size;i++){ for(int j=0;j<kernel_size;j++){ int index = j*kernel_size+i; h_kernel[index] = gauss(i-half_size, sigma)*dgauss(j-half_size, sigma); sum += abs(h_kernel[index])*abs(h_kernel[index]); } } sum = sqrt(sum); value_type* end_ptr = &(h_kernel[kernel_size*kernel_size]); for(value_type *ptr = h_kernel;ptr != end_ptr;ptr++) *ptr /= sum; return true; } extern "C" int getGaussianKernel(const value_type sigma, const int kernel_size, value_type *h_kernel) { // we assume kernel_size is odd number, which is computed by something like the following value_type half_size = (kernel_size-1)/2; value_type sum = 0.0; for(int i=0;i<kernel_size;i++){ for(int j=0;j<kernel_size;j++){ int index = j*kernel_size+i; h_kernel[index] = gauss(sqrt((i-half_size)*(i-half_size)+(j-half_size)*(j-half_size)), sigma); sum += h_kernel[index]; } } value_type* end_ptr = &(h_kernel[kernel_size*kernel_size]); for(value_type *ptr = h_kernel;ptr != end_ptr;ptr++) *ptr /= sum; return true; } extern "C" bool computeGaussianGradientKernelSpectrum(float sigma, int x_dim, int y_dim, int z_dim, fComplex *x_kernel_spectrum, fComplex *y_kernel_spectrum, cufftHandle fftPlanFwd) { unsigned int halfsize = getKernelSize(sigma); unsigned int kernel_size=2*halfsize+1; // print_and_log("computeGaussianGradientKernel(), kernel_size: %d\n", kernel_size); float *x_kernel = new float[kernel_size*kernel_size], *y_kernel = new float[kernel_size*kernel_size]; // compute gaussian gradient kernel for x direction getGaussianGradientKernel(sigma, kernel_size, x_kernel); // compute gaussian gradient kernel for y direction (just transpose x kernel) for(unsigned int i=0;i<kernel_size;i++) for(unsigned int j=0;j<kernel_size;j++) y_kernel[j*kernel_size+i] = x_kernel[i*kernel_size+j]; computeKernelSpectrumGPU( x_kernel, y_dim, x_dim, kernel_size, kernel_size, halfsize, halfsize, x_kernel_spectrum, fftPlanFwd ); computeKernelSpectrumGPU( y_kernel, y_dim, x_dim, kernel_size, kernel_size, halfsize, halfsize, y_kernel_spectrum, fftPlanFwd ); delete[] x_kernel; delete[] y_kernel; return true; } extern "C" bool computeGaussianKernelSpectrum(float sigma, int x_dim, int y_dim, int z_dim, fComplex *kernel_spectrum, cufftHandle fftPlanFwd) { unsigned int halfsize = getKernelSize(sigma); unsigned int kernel_size=2*halfsize+1; //print_and_log("computeGaussianKernel(), kernel_size: %d, sigma: %f\n", kernel_size, sigma); float *kernel = new float[kernel_size*kernel_size]; // compute gaussian gradient kernel getGaussianKernel(sigma, kernel_size, kernel); //for (int i = 0; i < kernel_size*kernel_size; i++) print_and_log("%f,", kernel[i]); //print_and_log("\n"); computeKernelSpectrumGPU( kernel, y_dim, x_dim, kernel_size, kernel_size, halfsize, halfsize, kernel_spectrum, fftPlanFwd ); delete[] kernel; return true; } extern "C" void computeGradientInformation(float *d_grad1X, float *d_grad1Y, float *d_grad2X, float *d_grad2Y, float *d_output, int image_size , float *d_mask_weight, bool isSingleModality, double *gradient_information, int num_image_sets , cublasHandle_t cublasHandle, float *d_temp_SM, float *h_temp_SM, float *d_OneVector, float h_NormalizationFactor, int exclusive_norm) { const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; computeGradientInformation_kernel<<<iDivUp(image_size*num_image_sets, b), b>>> (d_grad1X, d_grad1Y, d_grad2X, d_grad2Y, d_output, image_size, isSingleModality, d_mask_weight, num_image_sets, exclusive_norm); // compute sum of each gradient information image // to parallelize the computation, we consider d_output as a matrix of (image_size x num_image_sets)' and multiply one vector of image_size element // computeImageSum_kernel<<<iDivUp(num_image_sets, b), b>>>(d_output, image_size, num_image_sets, d_OneVector, d_temp_SM); // cutilSafeCall( cudaMemcpy(h_temp_SM, d_output, num_image_sets*sizeof(float), cudaMemcpyDeviceToHost) ); // for(int i=0;i<num_image_sets;i++){ print_and_log("d_output[%d] = %f\n", i, h_temp_SM[i]); } float beta = 0.0f; cublasStatus_t status = cublasSgemv(cublasHandle, CUBLAS_OP_T, image_size, num_image_sets, &h_NormalizationFactor, d_output, image_size, d_OneVector, 1, &beta, d_temp_SM, 1); if (status != CUBLAS_STATUS_SUCCESS){ print_and_log("!!!! cublasSgemv execution error\n"); } cutilSafeCall( cudaMemcpy(h_temp_SM, d_temp_SM, num_image_sets*sizeof(float), cudaMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++) gradient_information[i] = (double)(h_temp_SM[i]); // float -> double conversion // for(int i=0;i<num_image_sets;i++){ print_and_log("GI[%d] = %f\n", i, gradient_information[i]); } } extern "C" void computeGradientInformation_StdNorm(float *d_grad1X, float *d_grad1Y, float *d_grad2X, float *d_grad2Y, float *d_output, int image_size , float *d_mask_weight, bool isSingleModality, double *gradient_information, int num_image_sets , cublasHandle_t cublasHandle, float *d_temp_SM, float *h_temp_SM, float *d_OneVector, float h_NormalizationFactor) { /* // Note: d_grad1X, d_grad1Y and d_output have multiple image sets, while d_grad2X and d_grad2Y have a single image set. cublasSdgmm(cublasHandle, CUBLAS_SIDE_LEFT, image_size, num_image_sets, d_grad1X, image_size, d_mask_weight, 1, d_images, image_size); const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; computeGradientInformation_kernel<<<iDivUp(image_size*num_image_sets, b), b>>> (d_grad1X, d_grad1Y, d_grad2X, d_grad2Y, d_output, image_size, isSingleModality, d_mask_weight, num_image_sets); float beta = 0.0f; cublasStatus_t status = cublasSgemv(cublasHandle, CUBLAS_OP_T, image_size, num_image_sets, &h_NormalizationFactor, d_output, image_size, d_OneVector, 1, &beta, d_temp_SM, 1); if (status != CUBLAS_STATUS_SUCCESS){ print_and_log("!!!! cublasSgemv execution error\n"); } cutilSafeCall( cudaMemcpy(h_temp_SM, d_temp_SM, num_image_sets*sizeof(float), cudaMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++) gradient_information[i] = (double)(h_temp_SM[i]); // float -> double conversion // for(int i=0;i<num_image_sets;i++){ print_and_log("GI[%d] = %f\n", i, gradient_information[i]); } */ } __global__ void computeGradientInformation_kernel(float *d_grad1X, float *d_grad1Y, float *d_grad2X, float *d_grad2Y, float *d_output, int image_size, bool isSingleModality , float *d_mask_weight, int num_image_sets, int exclusive_norm) { // image1 has multiple image sets, image2 has only one image set const int index = blockDim.x * blockIdx.x + threadIdx.x; if(index >= image_size*num_image_sets) return; const int index2 = index % image_size; const float epsilon = 1e-6; // compute inner product of gradient vectors float inner_product = (d_grad1X[index]*d_grad2X[index2]) + (d_grad1Y[index]*d_grad2Y[index2]); // compute product of norm float norm[2] = {sqrt( d_grad1X[index]*d_grad1X[index]+d_grad1Y[index]*d_grad1Y[index] ), sqrt( d_grad2X[index2]*d_grad2X[index2]+d_grad2Y[index2]*d_grad2Y[index2] )}; float norm_prod = norm[0] * norm[1]; float norm_angle_cos = inner_product / norm_prod; // compute angle between two gradient vector and compute weight // if norm_prod is small, weight is 0 float weight; if(norm_prod<epsilon) weight = 0; else if(norm_angle_cos>1.0 || norm_angle_cos<-1.0) weight = 1; else if(isSingleModality) weight = ( cos( acos(norm_angle_cos) ) + 1 ) / 2; // for single-modality registration (zero weight on 180 degrees) else weight = ( cos( 2*acos(norm_angle_cos) ) + 1 ) / 2; // for multi-modality registration (high weight on 180 degrees) // compute gradient angle if(d_mask_weight) weight *= d_mask_weight[index2]; // multiply mask (for the case when we exclude part of the image from contribution to the similarity metric) if(exclusive_norm >= 0) d_output[index] = weight * norm[exclusive_norm]; // if specified, we always use the norm else d_output[index] = weight * MIN( norm[0], norm[1] ); } extern "C" void computeSSIM(float *d_mu1_multi, float *d_sigma_sq1_multi, float *d_mu2_single, float *d_sigma_sq2_single, float *d_output_multi, int image_size , float *d_mask_weight, double *SSIM, int num_image_sets , cublasHandle_t cublasHandle, float *d_temp_SSIM, float *h_temp_SSIM, float *d_OneVector, float h_DynamicRange) { // image1 has multiple image sets, image2 has only one image set const int b = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; // d_output needs to be set as the covariance image prior to this function call float K1 = 0.01, K2 = 0.03; float C1 = (K1*h_DynamicRange)*(K1*h_DynamicRange), C2 = (K2*h_DynamicRange)*(K2*h_DynamicRange); // print_and_log("computeSSIM(), dynamic range: %f, C1: %f, C2: %f\n", h_DynamicRange, C1, C2); computeSSIM_kernel<<<iDivUp(image_size*num_image_sets, b), b>>> (d_mu1_multi, d_sigma_sq1_multi, d_mu2_single, d_sigma_sq2_single, d_output_multi, image_size, d_mask_weight, num_image_sets, C1, C2); // compute sum of each SSIM image // to parallelize the computation, we consider d_output as a matrix of (image_size x num_image_sets)' and multiply one vector of image_size element // computeImageSum_kernel<<<iDivUp(num_image_sets, b), b>>>(d_output, image_size, num_image_sets, d_temp_SSIM); float alpha = 1.0f, beta = 0.0f; cublasStatus_t status = cublasSgemv(cublasHandle, CUBLAS_OP_T, image_size, num_image_sets, &alpha, d_output_multi, image_size, d_OneVector, 1, &beta, d_temp_SSIM, 1); if (status != CUBLAS_STATUS_SUCCESS){ print_and_log("!!!! cublasSgemv execution error\n"); } cutilSafeCall( cudaMemcpy(h_temp_SSIM, d_temp_SSIM, num_image_sets*sizeof(float), cudaMemcpyDeviceToHost) ); for(int i=0;i<num_image_sets;i++) SSIM[i] = (double)(h_temp_SSIM[i]); // float -> double conversion // for(int i=0;i<num_image_sets;i++){ print_and_log("SSIM[%d] = %f\n", i, SSIM[i]); } } __global__ void computeSSIM_kernel(float *d_mu1_multi, float *d_sigma_sq1_multi, float *d_mu2_single, float *d_sigma_sq2_single, float *d_output_multi, int image_size , float *d_mask_weight, int num_image_sets, float C1, float C2) { const int index = blockDim.x * blockIdx.x + threadIdx.x; if(index >= image_size*num_image_sets) return; const int index2 = index % image_size; d_output_multi[index] = ( (2*d_mu1_multi[index]*d_mu2_single[index2]+C1) * (2*d_output_multi[index] + C2) )/ ( (d_mu1_multi[index]*d_mu1_multi[index]+d_mu2_single[index2]*d_mu2_single[index2] + C1) * (d_sigma_sq1_multi[index] + d_sigma_sq2_single[index2] + C2) ); if(d_mask_weight) d_output_multi[index] *= d_mask_weight[index2]; // d_output[index] = (2*d_mu1[index]*d_mu2[index]+C1)/ (d_mu1[index]*d_mu1[index]+d_mu2[index]*d_mu2[index] + C1); // d_output[index] = (2*d_output[index] + C2)/ (d_sigma_sq1[index] + d_sigma_sq2[index] + C2); } __global__ void computeImageSum_kernel(float *d_image, int image_size, int num_image_sets, float *d_weight, float *d_sum) { const int image_indx = blockDim.x * blockIdx.x + threadIdx.x; if(image_indx >= num_image_sets) return; float sum = 0, *start_ptr = d_image+image_indx*image_size, *end_ptr = start_ptr+image_size; for(float *ptr=start_ptr;ptr!=end_ptr;ptr++) sum += (*ptr); d_sum[image_indx] = sum*d_weight[image_indx]; } struct p_log_p : public thrust::unary_function<float, float> { __host__ __device__ float operator()(float a){ return (a == 0) ? 0 : -a*logf(a); } }; extern "C" float computeJointEntropy(float *d_data1, float *d_data2, int length, int *d_joint_hist, float *d_joint_pdf) { computeJointPDF(d_data1, d_data2, length, d_joint_hist, d_joint_pdf); thrust::device_vector<float> pdf_vec(MI_NUMBER_OF_BINS*MI_NUMBER_OF_BINS); thrust::copy_n( thrust::device_ptr<float>(d_joint_pdf), MI_NUMBER_OF_BINS*MI_NUMBER_OF_BINS, pdf_vec.begin() ); // make_transform_iterator could not be applied directly to device_ptr<float>, so we need to make a temporary copy... return thrust::reduce( make_transform_iterator( pdf_vec.begin(), p_log_p() ), make_transform_iterator( pdf_vec.end(), p_log_p() ), 0.0f, thrust::plus<float>() ); } extern "C" bool computeJointPDF(float *d_data1, float *d_data2, int length, int *d_joint_hist, float *d_joint_pdf) { const int block_size = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; dim3 dimBlock(block_size, 1); int block_num = iDivUp(length, block_size); int size_sq = static_cast<int>(ceil( sqrtf(static_cast<float>(block_num)) )); dim3 dimDataGrid(size_sq, iDivUp(block_num, size_sq), 1); // print_and_log("computeJointPDF(), length of data: %d, size of joint_pdf: %d, sqrt of size: %f(%d), grid = (%d, %d), block = (%d, %d)\n" // , length, MI_NUMBER_OF_BINS*MI_NUMBER_OF_BINS, sqrtf(block_num), size_sq, dimDataGrid.x, dimDataGrid.y, dimBlock.x, dimBlock.y); cutilSafeCall( cudaMemset(d_joint_hist, 0, MI_NUMBER_OF_BINS*MI_NUMBER_OF_BINS*sizeof(int)) ); computeJointHistogramKernel<<< dimDataGrid, dimBlock, 0 >>> (d_data1, d_data2, length, d_joint_hist, MI_NUMBER_OF_BINS); // normalize histogram (divide each element by length) thrust::transform( thrust::device_ptr<int>(d_joint_hist), thrust::device_ptr<int>(d_joint_hist) + MI_NUMBER_OF_BINS*MI_NUMBER_OF_BINS, thrust::make_constant_iterator<float>(length), thrust::device_ptr<float>(d_joint_pdf), thrust::divides<float>()); return true; } extern "C" float computeEntropy(float *d_data, int length, int *d_hist, float *d_pdf) { computePDF(d_data, length, d_hist, d_pdf); thrust::device_vector<float> pdf_vec(MI_NUMBER_OF_BINS); thrust::copy_n( thrust::device_ptr<float>(d_pdf), MI_NUMBER_OF_BINS, pdf_vec.begin() ); // make_transform_iterator could not be applied directly to device_ptr<float>, so we need to make a temporary copy... return thrust::reduce( make_transform_iterator( pdf_vec.begin(), p_log_p() ), make_transform_iterator( pdf_vec.end(), p_log_p() ), 0.0f, thrust::plus<float>() ); } extern "C" bool computePDF(float *d_data, int length, int *d_hist, float *d_pdf) { const int block_size = BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; dim3 dimBlock(block_size, 1); int block_num = iDivUp(length, block_size); int size_sq = static_cast<int>(ceil( sqrtf(static_cast<float>(block_num)) )); dim3 dimDataGrid(size_sq, iDivUp(block_num, size_sq), 1); cutilSafeCall( cudaMemset(d_hist, 0, MI_NUMBER_OF_BINS*sizeof(int)) ); computeHistogramKernel<<< dimDataGrid, dimBlock, 0 >>> (d_data, length, d_hist, MI_NUMBER_OF_BINS); // normalize histogram (divide each element by length) thrust::transform( thrust::device_ptr<int>(d_hist), thrust::device_ptr<int>(d_hist) + MI_NUMBER_OF_BINS, thrust::make_constant_iterator<float>(length), thrust::device_ptr<float>(d_pdf), thrust::divides<float>()); return true; } __global__ void computeJointHistogramKernel(float *d_data1, float *d_data2, int length, int *d_joint_hist, int num_bins) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; // blockDim.y = 1 if(index < length && index>=0){ int accumulate_index = floor(d_data1[index]*num_bins)*num_bins+floor(d_data2[index]*num_bins); // d_data has to be normalized to [0 1] in advance if(accumulate_index < num_bins*num_bins) atomicAdd(d_joint_hist+accumulate_index, 1); } } __global__ void computeHistogramKernel(float *d_data, int length, int *d_hist, int num_bins) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; // blockDim.y = 1 if(index < length && index>=0){ int accumulate_index = floor(d_data[index]*num_bins); // d_data has to be normalized to [0 1] in advance if(accumulate_index < num_bins) atomicAdd(d_hist+accumulate_index, 1); } } __global__ void normalizeHistogramKernel(int *d_hist, float *d_pdf, int size, float denominator) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; // blockDim.y = 1 if(index < size) d_pdf[index] = (float)d_hist[index]/denominator; } //////////////////////////////////////////////////////////////////////////////// /// Position convolution kernel center at (0, 0) in the image //////////////////////////////////////////////////////////////////////////////// __global__ void padKernel_kernel(float *d_Dst, float *d_Src, int fftH, int fftW, int kernelH, int kernelW, int kernelY, int kernelX) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; if(y < kernelH && x < kernelW){ int ky = y - kernelY; if(ky < 0) ky += fftH; int kx = x - kernelX; if(kx < 0) kx += fftW; d_Dst[ky * fftW + kx] = LOAD_FLOAT(y * kernelW + x); } } //////////////////////////////////////////////////////////////////////////////// // Prepare data for "pad to border" addressing mode //////////////////////////////////////////////////////////////////////////////// __global__ void padDataClampToBorder_kernel(float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelH, int kernelW, int kernelY, int kernelX, int imageRepeat) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int total_x = blockDim.x * blockIdx.x + threadIdx.x; const int borderH = dataH + kernelY; const int borderW = dataW + kernelX; const int x = total_x % fftW; const int imageID = (total_x-x)/fftW; // division in 'int' if(imageID>=imageRepeat || imageID<0) return; if(y < fftH && x < fftW){ const int dy = (y<dataH) ? y : ( (y>=dataH && y<borderH) ? (dataH-1) : 0 ); const int dx = (x<dataW) ? x : ( (x>=dataW && x<borderW) ? (dataW-1) : 0 ); d_Dst[y * fftW + x + fftH*fftW*imageID] = LOAD_FLOAT(dy * dataW + dx + dataH*dataW*imageID); //d_Dst[y * fftW + x + fftH*fftW*imageID] = 0; // LOAD_FLOAT(dy * dataW + dx + dataH*dataW*imageID); } } __global__ void fillEdges_kernel(float *d_Dst, float *d_Ref, int fftH, int fftW, int dataH, int dataW, int kernelH, int kernelW, int kernelY, int kernelX, int imageRepeat, float threshold) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int total_x = blockDim.x * blockIdx.x + threadIdx.x; const int borderH = dataH + kernelY; const int borderW = dataW + kernelX; const int x = total_x % fftW; const int imageID = (total_x-x)/fftW; // division in 'int' if(imageID>=imageRepeat || imageID<0) return; if(y < fftH && x < fftW){ const int dy = (y<dataH) ? y : ( (y>=dataH && y<borderH) ? (dataH-1) : 0 ); const int dx = (x<dataW) ? x : ( (x>=dataW && x<borderW) ? (dataW-1) : 0 ); if(d_Ref[dy * dataW + dx + dataH*dataW*imageID]<threshold){ // if the pixel in the reference image is smaller than threshold, fill (kernelH x kernenW) pixels around the pixel for(int i=-kernelX;i<=kernelX;i++){ for(int j=-kernelY;j<=kernelY;j++){ if((y+j>=0) && (y+j<fftH) && (x+i>=0) && (x+i<fftW)) d_Dst[(y+j) * fftW + (x+i) + fftH*fftW*imageID] = 0; } } // for(int i=-0;i<=0;i++) for(int j=-0;j<=0;j++) d_Dst[(y+j) * fftW + (x+i) + fftH*fftW*imageID] = 0; } } } __global__ void unPadDataClampToBorder_kernel(float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelY, int kernelX, int imageRepeat) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int total_x = blockDim.x * blockIdx.x + threadIdx.x; const int borderH = dataH + kernelY; const int borderW = dataW + kernelX; const int x = total_x % dataW; const int imageID = (total_x-x)/dataW; // division in 'int' if(imageID>=imageRepeat || imageID<0) return; if(y < dataH && x < dataW){ const int dy = (y<dataH) ? y : ( (y>=dataH && y<borderH) ? (dataH-1) : 0 ); const int dx = (x<dataW) ? x : ( (x>=dataW && x<borderW) ? (dataW-1) : 0 ); d_Dst[dy*dataW+dx + dataH*dataW*imageID] = LOAD_FLOAT(y*fftW+x + fftH*fftW*imageID); } } //////////////////////////////////////////////////////////////////////////////// /// Position convolution kernel center at (0, 0) in the image //////////////////////////////////////////////////////////////////////////////// extern "C" void padKernel(float *d_Dst, float *d_Src, int fftH, int fftW, int kernelH, int kernelW, int kernelY, int kernelX) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(kernelW, threads.x), iDivUp(kernelH, threads.y)); SET_FLOAT_BASE; padKernel_kernel<<<grid, threads>>>(d_Dst, d_Src, fftH, fftW, kernelH, kernelW, kernelY, kernelX); } //////////////////////////////////////////////////////////////////////////////// // Prepare data for "pad to border" addressing mode //////////////////////////////////////////////////////////////////////////////// extern "C" void padDataClampToBorder(float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelW, int kernelH, int kernelY, int kernelX, int imageRepeat) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(fftW*imageRepeat, threads.x), iDivUp(fftH, threads.y)); // print_and_log("padDataClampToBorder, grid size: (%dx%d), block size: (%dx%d), fftH: %d, fftW: %d\n", grid.x, grid.y, threads.x, threads.y, fftH, fftW); // print_and_log("dataH: %d, dataW: %d, kernelH: %d, kernelW: %d, kernelY: %d, kernelX: %d, imageRepeat: %d\n", dataH, dataW, kernelH, kernelW, kernelY, kernelX, imageRepeat); SET_FLOAT_BASE; // When we use texture for small image (less than 256x256 maybe), this line causes crashes, for some reason... padDataClampToBorder_kernel<<<grid, threads>>>(d_Dst, d_Src, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, imageRepeat); } extern "C" void fillEdges(float *d_Dst, float *d_Ref, int fftH, int fftW, int dataH, int dataW, int kernelW, int kernelH, int kernelY, int kernelX, int imageRepeat, float threshold) { assert(d_Ref != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(fftW*imageRepeat, threads.x), iDivUp(fftH, threads.y)); // print_and_log("fillEdges, grid size: (%dx%d), block size: (%dx%d)\n", grid.x, grid.y, threads.x, threads.y); SET_FLOAT_BASE; // When we use texture for small image (less than 256x256 maybe), this line causes crashes, for some reason... fillEdges_kernel<<<grid, threads>>>(d_Dst, d_Ref, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX, imageRepeat, threshold); } extern "C" void unPadDataClampToBorder(float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelY, int kernelX, int imageRepeat) { dim3 threads(32, 8); dim3 grid(iDivUp(dataW*imageRepeat, threads.x), iDivUp(dataH, threads.y)); // print_and_log("unPadDataClampToBorder, grid size: (%dx%d), block size: (%dx%d)\n", grid.x, grid.y, threads.x, threads.y); SET_FLOAT_BASE; unPadDataClampToBorder_kernel<<<grid, threads>>>(d_Dst, d_Src, fftH, fftW, dataH, dataW, kernelY, kernelX, imageRepeat); } //////////////////////////////////////////////////////////////////////////////// // Modulate Fourier image of padded data by Fourier image of padded kernel // and normalize by FFT size //////////////////////////////////////////////////////////////////////////////// __global__ void modulateAndNormalize_kernel(fComplex *d_Dst, fComplex *d_Src, int dataSize, int imageRepeat, float c) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= (dataSize*imageRepeat)) return; fComplex a = d_Src[i%dataSize]; // the same d_Src is repeatedly multiplied to d_Dst fComplex b = d_Dst[i]; mulAndScale(a, b, c); d_Dst[i] = a; } extern "C" void modulateAndNormalize(fComplex *d_Dst, fComplex *d_Src, int fftH, int fftW, int imageRepeat, int padding) { assert( fftW % 2 == 0 ); const int dataSize = fftH * (fftW / 2 + padding); const int b = 256; //BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; modulateAndNormalize_kernel<<<iDivUp(dataSize*imageRepeat, b), b>>>( d_Dst, d_Src, dataSize, imageRepeat, 1.0f / (float)(fftW * fftH) ); } extern "C" void normalizeImages(float *d_images, int size, float norm_max, float norm_min) { if(norm_max == 0 && norm_min == 0){ // compute maximum and minimum if needed ComputeMaxMin(d_images, size, &norm_max, &norm_min); // print_and_log("(min,max) = (%f, %f)\n", norm_min, norm_max); } // normalize images NormalizeData(d_images, size, norm_max, norm_min); } extern "C" void maskImages( float *d_images, float *d_mask_weight, int image_size, int num_image_sets, cublasHandle_t cublasHandle ) { cublasSdgmm(cublasHandle, CUBLAS_SIDE_LEFT, image_size, num_image_sets, d_images, image_size, d_mask_weight, 1, d_images, image_size); } extern "C" int countZeroPixels(float *d_image, int image_size) { return thrust::count( thrust::device_ptr<float>(d_image), thrust::device_ptr<float>(d_image)+image_size, 0.0f); }
d0caa0e848db539c6e5a08df867e3291d3f473e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "mixed_tentusscher_myo_epi_2004_S2_2.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); check_cuda_error(hipFree(mapping_device)); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice)); } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); if(mapping_device) check_cuda_error(hipFree(mapping_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < num_volumes) { // Initial conditions for TenTusscher 2004 myocardium if (mapping[threadID] == 0) { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } // Initial conditions for TenTusscher 2004 epicardium else { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5236591284772,0.00130241284471985,0.778613483022969,0.778472769811598,0.000175875277625194,0.484626058693879,0.00294965177778795,0.999998333317616,1.94791112184908e-08,1.90234417053386e-05,0.999779558473224,1.00713872511970,0.999995965310622,4.41551215458988e-05,0.567040008888733,10.2464162625462,139.303734550690}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { if (mapping[sv_id] == 0) { RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt); for(int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } else { RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt); for (int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } } } } inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.2751110459407,0.000197490405913840,0.000138093676576538,0.000459611951400222,0.248312214169369,0.146550920650185,0.141336894566835,4.51002424199619,0.0147942147525980,1.60874334855823,1098.91591518736,0.000497071049372500,0.357179450926053,0.0190817376935230,0.00515881032161095,3.63348608264117e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
d0caa0e848db539c6e5a08df867e3291d3f473e6.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "mixed_tentusscher_myo_epi_2004_S2_2.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice)); } kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); check_cuda_error(cudaFree(mapping_device)); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice)); } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); if(mapping_device) check_cuda_error(cudaFree(mapping_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < num_volumes) { // Initial conditions for TenTusscher 2004 myocardium if (mapping[threadID] == 0) { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } // Initial conditions for TenTusscher 2004 epicardium else { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5236591284772,0.00130241284471985,0.778613483022969,0.778472769811598,0.000175875277625194,0.484626058693879,0.00294965177778795,0.999998333317616,1.94791112184908e-08,1.90234417053386e-05,0.999779558473224,1.00713872511970,0.999995965310622,4.41551215458988e-05,0.567040008888733,10.2464162625462,139.303734550690}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { if (mapping[sv_id] == 0) { RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt); for(int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } else { RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt); for (int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } } } } inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.2751110459407,0.000197490405913840,0.000138093676576538,0.000459611951400222,0.248312214169369,0.146550920650185,0.141336894566835,4.51002424199619,0.0147942147525980,1.60874334855823,1098.91591518736,0.000497071049372500,0.357179450926053,0.0190817376935230,0.00515881032161095,3.63348608264117e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
39cf9aad97a903db736b86fbcdde49271d3c9496.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 10 __global__ void add(int *a, int *b, int *c) { int tid = blockIdx.x; if (tid < N) c[tid] = a[tid] + b[tid]; } int main() { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate memory on the device hipMalloc((void**)&dev_a, N * sizeof(int)); hipMalloc((void**)&dev_b, N * sizeof(int)); hipMalloc((void**)&dev_c, N * sizeof(int)); // Initialise data in the host's memory for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i*i; } // copy over the data to the device hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice); // launch the kernel add << <N, 1 >> > (dev_a, dev_b, dev_c); // copy the results back over to the host hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } return 0; }
39cf9aad97a903db736b86fbcdde49271d3c9496.cu
#include <stdio.h> #define N 10 __global__ void add(int *a, int *b, int *c) { int tid = blockIdx.x; if (tid < N) c[tid] = a[tid] + b[tid]; } int main() { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate memory on the device cudaMalloc((void**)&dev_a, N * sizeof(int)); cudaMalloc((void**)&dev_b, N * sizeof(int)); cudaMalloc((void**)&dev_c, N * sizeof(int)); // Initialise data in the host's memory for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i*i; } // copy over the data to the device cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice); // launch the kernel add << <N, 1 >> > (dev_a, dev_b, dev_c); // copy the results back over to the host cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } return 0; }
72f57a7ec429c2036e751984dc032dfed52ba1f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/platform/device_context.h> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/partial_sum_op.h" #include "paddle/fluid/platform/float16.h" namespace plat = paddle::platform; namespace paddle { namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) using LoDTensor = framework::LoDTensor; using Tensor = framework::Tensor; template <class T> __global__ void SumArrayPartialCUDAKernel(T **in, T *out, int64_t lod_length, size_t in_size, int64_t start_index, int64_t length, int64_t row_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < lod_length) { T total = static_cast<T>(0); int b_id = id / length; int b_offset = id % length; for (int i = 0; i < in_size; ++i) { const T *tmp = in[i]; if (tmp) { total += tmp[start_index + b_id * row_length + b_offset]; } } out[id] = total; id += blockDim.x * gridDim.x; } } template <class T> __global__ void PartialSumGradCUDAKernel(T **res_grad, const T *out_grad, int64_t lod_length, size_t in_size, int64_t start_index, int64_t length, int64_t row_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < lod_length) { T total = static_cast<T>(0); int b_id = id / length; int b_offset = id % length; for (int i = 0; i < in_size; ++i) { T *tmp = res_grad[i]; tmp[start_index + b_id * row_length + b_offset] = out_grad[i]; } id += blockDim.x * gridDim.x; } } template <typename T> class PartialSumOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto in_vars = ctx.MultiInput<Tensor>("X"); Tensor *out = ctx.Output<Tensor>("Out"); PADDLE_ENFORCE_EQ( in_vars[0] != nullptr, true, platform::errors::InvalidArgument("The input should not be null.")); auto place = ctx.GetPlace(); // GPUPlace only now auto start_index = ctx.Attr<int>("start_index"); auto length = ctx.Attr<int>("length"); auto batch_size = in_vars[0]->dims()[0]; if (length == -1) { length = in_vars[0]->dims()[1] - start_index; } constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; auto lod_length = length * batch_size; auto row_length = in_vars[0]->dims()[1]; auto in_num = in_vars.size(); std::vector<const T *> in_data; for (int i = 0; i < in_num; ++i) { in_data.emplace_back(in_vars[i]->data<T>()); } if (!in_data.empty()) { auto tmp_in_array = memory::Alloc(dev_ctx, in_data.size() * sizeof(T *)); memory::Copy(dev_ctx.GetPlace(), tmp_in_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(lod_length); hipLaunchKernelGGL(( SumArrayPartialCUDAKernel<T>), dim3(grids), dim3(blocks), 0, stream, in_array_data, out->data<T>(), lod_length, in_data.size(), start_index, length, row_length); } } }; template <typename T> class PartialSumGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { const Tensor *out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto ins = ctx.MultiInput<LoDTensor>("X"); auto outs = ctx.MultiOutput<LoDTensor>(framework::GradVarName("X")); PADDLE_ENFORCE_EQ( ins[0] != nullptr, true, platform::errors::InvalidArgument("The input should not be null.")); auto start_index = ctx.Attr<int>("start_index"); auto length = ctx.Attr<int>("length"); if (length == -1) { length = ins[0]->dims()[1] - start_index; } // initialize auto &place = *ctx.template device_context<platform::CUDADeviceContext>() .eigen_device(); for (size_t i = 0; i < outs.size(); ++i) { outs[i]->mutable_data<T>(ctx.GetPlace()); auto dxt = framework::EigenVector<T>::Flatten(*outs[i]); dxt.device(place) = dxt.constant(static_cast<T>(0)); } auto batch_size = ins[0]->dims()[0]; if (length == -1) { length = ins[0]->dims()[1] - start_index; } auto lod_length = length * batch_size; auto row_length = ins[0]->dims()[1]; auto out_num = outs.size(); constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; std::vector<const T *> out_data; for (int i = 0; i < out_num; ++i) { out_data.emplace_back(outs[i]->data<T>()); } if (!out_data.empty()) { auto tmp_out_array = memory::Alloc(dev_ctx, out_data.size() * sizeof(T *)); memory::Copy(dev_ctx.GetPlace(), tmp_out_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(out_data.data()), out_data.size() * sizeof(T *), dev_ctx.stream()); T **out_grad_data = reinterpret_cast<T **>(tmp_out_array->ptr()); ComputeKernelParameter(lod_length); hipLaunchKernelGGL(( PartialSumGradCUDAKernel<T>), dim3(grids), dim3(blocks), 0, stream, out_grad_data, out_grad->data<T>(), lod_length, out_data.size(), start_index, length, row_length); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(partial_sum, ops::PartialSumOpCUDAKernel<float>, ops::PartialSumOpCUDAKernel<double>, ops::PartialSumOpCUDAKernel<int>, ops::PartialSumOpCUDAKernel<int64_t>, ops::PartialSumOpCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(partial_sum_grad, ops::PartialSumGradOpCUDAKernel<float>, ops::PartialSumGradOpCUDAKernel<double>, ops::PartialSumGradOpCUDAKernel<int>, ops::PartialSumGradOpCUDAKernel<int64_t>, ops::PartialSumGradOpCUDAKernel<plat::float16>);
72f57a7ec429c2036e751984dc032dfed52ba1f3.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/platform/device_context.h> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/partial_sum_op.h" #include "paddle/fluid/platform/float16.h" namespace plat = paddle::platform; namespace paddle { namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) using LoDTensor = framework::LoDTensor; using Tensor = framework::Tensor; template <class T> __global__ void SumArrayPartialCUDAKernel(T **in, T *out, int64_t lod_length, size_t in_size, int64_t start_index, int64_t length, int64_t row_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < lod_length) { T total = static_cast<T>(0); int b_id = id / length; int b_offset = id % length; for (int i = 0; i < in_size; ++i) { const T *tmp = in[i]; if (tmp) { total += tmp[start_index + b_id * row_length + b_offset]; } } out[id] = total; id += blockDim.x * gridDim.x; } } template <class T> __global__ void PartialSumGradCUDAKernel(T **res_grad, const T *out_grad, int64_t lod_length, size_t in_size, int64_t start_index, int64_t length, int64_t row_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < lod_length) { T total = static_cast<T>(0); int b_id = id / length; int b_offset = id % length; for (int i = 0; i < in_size; ++i) { T *tmp = res_grad[i]; tmp[start_index + b_id * row_length + b_offset] = out_grad[i]; } id += blockDim.x * gridDim.x; } } template <typename T> class PartialSumOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto in_vars = ctx.MultiInput<Tensor>("X"); Tensor *out = ctx.Output<Tensor>("Out"); PADDLE_ENFORCE_EQ( in_vars[0] != nullptr, true, platform::errors::InvalidArgument("The input should not be null.")); auto place = ctx.GetPlace(); // GPUPlace only now auto start_index = ctx.Attr<int>("start_index"); auto length = ctx.Attr<int>("length"); auto batch_size = in_vars[0]->dims()[0]; if (length == -1) { length = in_vars[0]->dims()[1] - start_index; } constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; auto lod_length = length * batch_size; auto row_length = in_vars[0]->dims()[1]; auto in_num = in_vars.size(); std::vector<const T *> in_data; for (int i = 0; i < in_num; ++i) { in_data.emplace_back(in_vars[i]->data<T>()); } if (!in_data.empty()) { auto tmp_in_array = memory::Alloc(dev_ctx, in_data.size() * sizeof(T *)); memory::Copy(dev_ctx.GetPlace(), tmp_in_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(lod_length); SumArrayPartialCUDAKernel<T><<<grids, blocks, 0, stream>>>( in_array_data, out->data<T>(), lod_length, in_data.size(), start_index, length, row_length); } } }; template <typename T> class PartialSumGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { const Tensor *out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto ins = ctx.MultiInput<LoDTensor>("X"); auto outs = ctx.MultiOutput<LoDTensor>(framework::GradVarName("X")); PADDLE_ENFORCE_EQ( ins[0] != nullptr, true, platform::errors::InvalidArgument("The input should not be null.")); auto start_index = ctx.Attr<int>("start_index"); auto length = ctx.Attr<int>("length"); if (length == -1) { length = ins[0]->dims()[1] - start_index; } // initialize auto &place = *ctx.template device_context<platform::CUDADeviceContext>() .eigen_device(); for (size_t i = 0; i < outs.size(); ++i) { outs[i]->mutable_data<T>(ctx.GetPlace()); auto dxt = framework::EigenVector<T>::Flatten(*outs[i]); dxt.device(place) = dxt.constant(static_cast<T>(0)); } auto batch_size = ins[0]->dims()[0]; if (length == -1) { length = ins[0]->dims()[1] - start_index; } auto lod_length = length * batch_size; auto row_length = ins[0]->dims()[1]; auto out_num = outs.size(); constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; std::vector<const T *> out_data; for (int i = 0; i < out_num; ++i) { out_data.emplace_back(outs[i]->data<T>()); } if (!out_data.empty()) { auto tmp_out_array = memory::Alloc(dev_ctx, out_data.size() * sizeof(T *)); memory::Copy(dev_ctx.GetPlace(), tmp_out_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(out_data.data()), out_data.size() * sizeof(T *), dev_ctx.stream()); T **out_grad_data = reinterpret_cast<T **>(tmp_out_array->ptr()); ComputeKernelParameter(lod_length); PartialSumGradCUDAKernel<T><<<grids, blocks, 0, stream>>>( out_grad_data, out_grad->data<T>(), lod_length, out_data.size(), start_index, length, row_length); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(partial_sum, ops::PartialSumOpCUDAKernel<float>, ops::PartialSumOpCUDAKernel<double>, ops::PartialSumOpCUDAKernel<int>, ops::PartialSumOpCUDAKernel<int64_t>, ops::PartialSumOpCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(partial_sum_grad, ops::PartialSumGradOpCUDAKernel<float>, ops::PartialSumGradOpCUDAKernel<double>, ops::PartialSumGradOpCUDAKernel<int>, ops::PartialSumGradOpCUDAKernel<int64_t>, ops::PartialSumGradOpCUDAKernel<plat::float16>);
8a80c69f0458cf04562f417f5900041a1b0d2052.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions mixed zc -> ds */ #include "magma_internal.h" #define NB 64 // TODO check precision, as in zlag2c? __global__ void zclaswp_kernel( int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaFloatComplex res; if (ind < m) { SA += ind; ipiv += ind; newind = ipiv[0]; for (int i=0; i < n; i++) { res = MAGMA_C_MAKE( (float)MAGMA_Z_REAL( A[newind+i*lda] ), (float)MAGMA_Z_IMAG( A[newind+i*lda] )); SA[i*ldsa] = res; } } } __global__ void zclaswp_inv_kernel( int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaDoubleComplex res; if (ind < m) { A += ind; ipiv += ind; newind = ipiv[0]; for (int i=0; i < n; i++) { res = MAGMA_Z_MAKE( (double)MAGMA_C_REAL( SA[newind+i*ldsa] ), (double)MAGMA_C_IMAG( SA[newind+i*ldsa] )); A[i*lda] = res; } } } /***************************************************************************//** Purpose ------- Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or row i of SA is cast to double precision in row ipiv[i] of A (incx < 0), for 0 <= i < M. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A. @param[in,out] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the M-by-N matrix to which the row interchanges will be applied. TODO update docs @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in,out] SA REAL array on the GPU, dimension (LDSA,N) On exit, the single precision, permuted matrix. TODO update docs @param[in] ldsa INTEGER. LDSA specifies the leading dimension of SA. @param[in] m The number of rows to be interchanged. @param[in] ipiv INTEGER array on the GPU, dimension (M) The vector of pivot indices. Row i of A is cast to single precision in row ipiv[i] of SA, for 0 <= i < m. @param[in] incx INTEGER If INCX is negative, the pivots are applied in reverse order, otherwise in straight-forward order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_zclaswp_q( magma_int_t n, magmaDoubleComplex_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t ldsa, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx, magma_queue_t queue ) { int blocks = magma_ceildiv( m, NB ); dim3 grid( blocks ); dim3 threads( NB ); if (incx >= 0) hipLaunchKernelGGL(( zclaswp_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, m, ipiv); else hipLaunchKernelGGL(( zclaswp_inv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, m, ipiv); }
8a80c69f0458cf04562f417f5900041a1b0d2052.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions mixed zc -> ds */ #include "magma_internal.h" #define NB 64 // TODO check precision, as in zlag2c? __global__ void zclaswp_kernel( int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaFloatComplex res; if (ind < m) { SA += ind; ipiv += ind; newind = ipiv[0]; for (int i=0; i < n; i++) { res = MAGMA_C_MAKE( (float)MAGMA_Z_REAL( A[newind+i*lda] ), (float)MAGMA_Z_IMAG( A[newind+i*lda] )); SA[i*ldsa] = res; } } } __global__ void zclaswp_inv_kernel( int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaDoubleComplex res; if (ind < m) { A += ind; ipiv += ind; newind = ipiv[0]; for (int i=0; i < n; i++) { res = MAGMA_Z_MAKE( (double)MAGMA_C_REAL( SA[newind+i*ldsa] ), (double)MAGMA_C_IMAG( SA[newind+i*ldsa] )); A[i*lda] = res; } } } /***************************************************************************//** Purpose ------- Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or row i of SA is cast to double precision in row ipiv[i] of A (incx < 0), for 0 <= i < M. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A. @param[in,out] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the M-by-N matrix to which the row interchanges will be applied. TODO update docs @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in,out] SA REAL array on the GPU, dimension (LDSA,N) On exit, the single precision, permuted matrix. TODO update docs @param[in] ldsa INTEGER. LDSA specifies the leading dimension of SA. @param[in] m The number of rows to be interchanged. @param[in] ipiv INTEGER array on the GPU, dimension (M) The vector of pivot indices. Row i of A is cast to single precision in row ipiv[i] of SA, for 0 <= i < m. @param[in] incx INTEGER If INCX is negative, the pivots are applied in reverse order, otherwise in straight-forward order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_zclaswp_q( magma_int_t n, magmaDoubleComplex_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t ldsa, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx, magma_queue_t queue ) { int blocks = magma_ceildiv( m, NB ); dim3 grid( blocks ); dim3 threads( NB ); if (incx >= 0) zclaswp_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n, A, lda, SA, ldsa, m, ipiv); else zclaswp_inv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n, A, lda, SA, ldsa, m, ipiv); }
dc481c52664eb0284fc68f7074b5291e6cfd1430.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _UNDO_CHEN_H_ #define _UNDO_CHEN_H_ #include <stdint.h> __device__ inline unsigned char undo_chen_byte_gpu(const unsigned char c, const unsigned char cp, const unsigned char s) { return (unsigned char) (((int) (c ^ s) - (int) cp) % 256); } /** * Reverses Chen transformation (b_i = (b'_i ^ x'_i - b'_{i-1}) % 256. * A block processes a row of the input image. * A thread process a complete pixel consisting of three components (R, G, B). */ __global__ void undo_chen_gpu(unsigned char *in, unsigned char *out, const size_t pixels_per_row) { // Shared memory layout: // (R,G,B),(R,G,B),...,(R,G,B) // Each thread processes one pixel, i.e., three bytes. // First element of array is used to store right-most pixel from previous row. extern __shared__ unsigned char s_row[]; const int row = blockIdx.x; const size_t off = row * 3 * pixels_per_row; // check if thread is out-of-bounds if (threadIdx.x >= pixels_per_row) { return; } // fetch row from global memory and store it in shared memory // fetch first third of row, then second third, and finally third third // from global memory. #pragma unroll 3 for (int i = 0; i < 3; i++) { // leave space for previous row pixel s_row[3 + i * pixels_per_row + threadIdx.x] = in[off + i * pixels_per_row + threadIdx.x]; } // fetch first pixel (right-most element from previous row or key init // values) if (row > 0) { if (threadIdx.x <= 2) { s_row[threadIdx.x] = in[off - 3 + threadIdx.x]; } } else { if (threadIdx.x <= 2) { s_row[threadIdx.x] = const_p0[threadIdx.x]; } } __syncthreads(); // process R, then G, then B bytes #pragma unroll 3 for (int i = 0; i < 3; i++) { // get right sequence (sx, sy, sz) and find correct row in sequence. const unsigned char *s = const_seqs[i] + row * pixels_per_row; // thread 0 is responsible for 1st pixel // thread 1 is responsible for 2nd pixel, and so on size_t index = 3 + 3 * threadIdx.x + i; unsigned char seq_element = s[threadIdx.x]; unsigned char new_byte = undo_chen_byte_gpu(s_row[index], s_row[index - 3], seq_element); // let every thead obtain its byte before overwriting it (subsequent thread // needs old value that predecessor thread updates) __syncthreads(); s_row[index] = new_byte; // no __syncthreads required here, since color channel transformation // are independent } __syncthreads(); // write first third, then second third, and finally third third to global // memory. #pragma unroll 3 for (int i = 0; i < 3; i++) { // do not write previous pixel to array out[off + i * pixels_per_row + threadIdx.x] = s_row[3 + i * pixels_per_row + threadIdx.x]; } } #endif // #ifndef _UNDO_CHEN_H_
dc481c52664eb0284fc68f7074b5291e6cfd1430.cu
#ifndef _UNDO_CHEN_H_ #define _UNDO_CHEN_H_ #include <stdint.h> __device__ inline unsigned char undo_chen_byte_gpu(const unsigned char c, const unsigned char cp, const unsigned char s) { return (unsigned char) (((int) (c ^ s) - (int) cp) % 256); } /** * Reverses Chen transformation (b_i = (b'_i ^ x'_i - b'_{i-1}) % 256. * A block processes a row of the input image. * A thread process a complete pixel consisting of three components (R, G, B). */ __global__ void undo_chen_gpu(unsigned char *in, unsigned char *out, const size_t pixels_per_row) { // Shared memory layout: // (R,G,B),(R,G,B),...,(R,G,B) // Each thread processes one pixel, i.e., three bytes. // First element of array is used to store right-most pixel from previous row. extern __shared__ unsigned char s_row[]; const int row = blockIdx.x; const size_t off = row * 3 * pixels_per_row; // check if thread is out-of-bounds if (threadIdx.x >= pixels_per_row) { return; } // fetch row from global memory and store it in shared memory // fetch first third of row, then second third, and finally third third // from global memory. #pragma unroll 3 for (int i = 0; i < 3; i++) { // leave space for previous row pixel s_row[3 + i * pixels_per_row + threadIdx.x] = in[off + i * pixels_per_row + threadIdx.x]; } // fetch first pixel (right-most element from previous row or key init // values) if (row > 0) { if (threadIdx.x <= 2) { s_row[threadIdx.x] = in[off - 3 + threadIdx.x]; } } else { if (threadIdx.x <= 2) { s_row[threadIdx.x] = const_p0[threadIdx.x]; } } __syncthreads(); // process R, then G, then B bytes #pragma unroll 3 for (int i = 0; i < 3; i++) { // get right sequence (sx, sy, sz) and find correct row in sequence. const unsigned char *s = const_seqs[i] + row * pixels_per_row; // thread 0 is responsible for 1st pixel // thread 1 is responsible for 2nd pixel, and so on size_t index = 3 + 3 * threadIdx.x + i; unsigned char seq_element = s[threadIdx.x]; unsigned char new_byte = undo_chen_byte_gpu(s_row[index], s_row[index - 3], seq_element); // let every thead obtain its byte before overwriting it (subsequent thread // needs old value that predecessor thread updates) __syncthreads(); s_row[index] = new_byte; // no __syncthreads required here, since color channel transformation // are independent } __syncthreads(); // write first third, then second third, and finally third third to global // memory. #pragma unroll 3 for (int i = 0; i < 3; i++) { // do not write previous pixel to array out[off + i * pixels_per_row + threadIdx.x] = s_row[3 + i * pixels_per_row + threadIdx.x]; } } #endif // #ifndef _UNDO_CHEN_H_
f9e972ee7ddf7a69fe4a1c2a97dda3c27a754efe.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> // #include <stdio.h> #include "hip/hip_runtime.h" #define NDEBUG1 #include <assert.h> #define tx threadIdx.x #define ty threadIdx.y #define bx blockIdx.x #define by blockIdx.y #define NN 64 #define multi 1 #define BSZ0 16 #define BSZ (multi * (BSZ0)) #define GSZ (NN / BSZ) #define MAX_NSZ 7 #define NSZ 5 #define NSZ2 (NSZ / 2) #define BSZ_HL (BSZ + NSZ - 1) #define SER(row, col, ncols) ((row) * (ncols) + (col)) __constant__ float gaussDistW[MAX_NSZ * MAX_NSZ]; // me 4*4 blocks kai kai 1024 thr/block kai gia 4 pixel/thread vgainei ligo panw apo 32k shared // alla den mporw na exw 1024 thread, mallon giati einai mexri 768. // ara to spaw se 256/block? -> ipologismos metaforwn. __device__ __forceinline__ void getSharedBlock(float sharedImg[BSZ_HL], const float *globalImg, int I0, int N) { int ii = SER(ty, tx, BSZ0); // 2d to 1d index of thread i in the block do { int I = ii % BSZ_HL; // x index in block including padding int J = ii / BSZ_HL; // y index in block including padding int IGlobal = I0 + SER(J, I, N); // global input index assert(I < BSZ_HL); if( (I < BSZ_HL) && (J < BSZ_HL) && (ii < N * N) ) { sharedImg[SER(J, I, BSZ_HL)] = globalImg[IGlobal]; // download from global } ii += BSZ0 * BSZ0; // next iteration starts THREADNUM position after } while ( ii < BSZ_HL * BSZ_HL ); // only J check needed ? } __device__ __forceinline__ void getWeight( float foreignBlockImg[BSZ_HL], float sigma, float *weightSum, float *fSum) { // Compute block weights with self // new tx is (1) blockdim away for(int k = 0; k < BSZ; k++ ) { // Other block for(int l = 0; l < BSZ; l++ ) { float partialW = 0; for(int m = -NSZ2; m <= NSZ2; m++){ // Neighbourhoud for(int n = -NSZ2; n <= NSZ2; n++){ float te = foreignBlockImg[1+SER((l + NSZ2) + n, (k + NSZ2) + m, BSZ_HL) ]; partialW += //gaussDistW[ SER((n + MAX_NSZ / 2), (m + MAX_NSZ / 2), MAX_NSZ)] 1* ( (float)(tx-1.)/tx//blockImg[(txM + NSZ2) + m][(tyM + NSZ2) + n] - te); } } // if (!mx && !my && k==1) printf("%f\n",partialW); partialW = expf((-partialW / sigma)); *weightSum += partialW; *fSum += partialW * foreignBlockImg[SER((l + NSZ2), (k + NSZ2), BSZ_HL)]; } } } __device__ __forceinline__ void downloadAndCalculate( float foreignBlockImg[BSZ_HL], const float *inputImg, float sigma, float *weightSum, float* fSum, int N, int I0) { getSharedBlock(foreignBlockImg, inputImg, I0, N); __syncthreads(); getWeight(foreignBlockImg, sigma, weightSum, fSum); } __global__ void nlm(float const *inputImg, float *outputImg, int N, float sigma) { // assert(NN == N); int N2 = NN + NSZ - 1; // input image with padding // assert(GSZ == gridDim.x); // assert(BSZ0 == blockDim.x); // __shared__ float blockImg[BSZ_HL][BSZ_HL]; __shared__ float foreignBlockImg[BSZ_HL * BSZ_HL]; // if(!tx && !ty && !bx && !by) { // for (int mu = 0; mu < 49; mu++) { // printf("%f ",gaussDistW[mu]); // } // } // __syncthreads(); float weightSum=0, fSum=0; // Weightsums for multiple pixels per thread. // put inside int I0 = SER(by * BSZ, bx * BSZ, N2); // Download this block's pixels //downloadAndCalculate(blockImg, blockImg, inputImg, sigma, weightSum, fSum, N2, I0); for (char i = 0; i < GSZ; i++) { // gia kathe BLOCK stin arxiki eikona (X) for (char j = 0; j < GSZ; j++) { if ( !(by == j && bx == i) ) { // Download other blocks int I1 = SER(j * BSZ, i * BSZ, N2); //first pixel in block. Used as a ref point to calculate the block.(pg21) // downloadAndCalculate(foreignBlockImg, inputImg, sigma, &weightSum, &fSum, N2, I1); getSharedBlock(foreignBlockImg, inputImg, I1, N2); __syncthreads(); getWeight(foreignBlockImg, sigma, &weightSum, &fSum); } } } // add NSZ2 to skip the padding pixels outputImg[SER(by * BSZ, bx * BSZ, NN)] = fSum / weightSum; //blockImg[(txM + NSZ2)][(tyM + NSZ2)] //inputImg[I0 + SER(NSZ2, NSZ2, N2) + SER(tyM, txM, NN)] } // template __global__ void kernel<false>();
f9e972ee7ddf7a69fe4a1c2a97dda3c27a754efe.cu
#include <math.h> // #include <stdio.h> #include "cuda_runtime.h" #define NDEBUG1 #include <assert.h> #define tx threadIdx.x #define ty threadIdx.y #define bx blockIdx.x #define by blockIdx.y #define NN 64 #define multi 1 #define BSZ0 16 #define BSZ (multi * (BSZ0)) #define GSZ (NN / BSZ) #define MAX_NSZ 7 #define NSZ 5 #define NSZ2 (NSZ / 2) #define BSZ_HL (BSZ + NSZ - 1) #define SER(row, col, ncols) ((row) * (ncols) + (col)) __constant__ float gaussDistW[MAX_NSZ * MAX_NSZ]; // me 4*4 blocks kai kai 1024 thr/block kai gia 4 pixel/thread vgainei ligo panw apo 32k shared // alla den mporw na exw 1024 thread, mallon giati einai mexri 768. // ara to spaw se 256/block? -> ipologismos metaforwn. __device__ __forceinline__ void getSharedBlock(float sharedImg[BSZ_HL], const float *globalImg, int I0, int N) { int ii = SER(ty, tx, BSZ0); // 2d to 1d index of thread i in the block do { int I = ii % BSZ_HL; // x index in block including padding int J = ii / BSZ_HL; // y index in block including padding int IGlobal = I0 + SER(J, I, N); // global input index assert(I < BSZ_HL); if( (I < BSZ_HL) && (J < BSZ_HL) && (ii < N * N) ) { sharedImg[SER(J, I, BSZ_HL)] = globalImg[IGlobal]; // download from global } ii += BSZ0 * BSZ0; // next iteration starts THREADNUM position after } while ( ii < BSZ_HL * BSZ_HL ); // only J check needed ? } __device__ __forceinline__ void getWeight( float foreignBlockImg[BSZ_HL], float sigma, float *weightSum, float *fSum) { // Compute block weights with self // new tx is (1) blockdim away for(int k = 0; k < BSZ; k++ ) { // Other block for(int l = 0; l < BSZ; l++ ) { float partialW = 0; for(int m = -NSZ2; m <= NSZ2; m++){ // Neighbourhoud for(int n = -NSZ2; n <= NSZ2; n++){ float te = foreignBlockImg[1+SER((l + NSZ2) + n, (k + NSZ2) + m, BSZ_HL) ]; partialW += //gaussDistW[ SER((n + MAX_NSZ / 2), (m + MAX_NSZ / 2), MAX_NSZ)] 1* ( (float)(tx-1.)/tx//blockImg[(txM + NSZ2) + m][(tyM + NSZ2) + n] - te); } } // if (!mx && !my && k==1) printf("%f\n",partialW); partialW = expf((-partialW / sigma)); *weightSum += partialW; *fSum += partialW * foreignBlockImg[SER((l + NSZ2), (k + NSZ2), BSZ_HL)]; } } } __device__ __forceinline__ void downloadAndCalculate( float foreignBlockImg[BSZ_HL], const float *inputImg, float sigma, float *weightSum, float* fSum, int N, int I0) { getSharedBlock(foreignBlockImg, inputImg, I0, N); __syncthreads(); getWeight(foreignBlockImg, sigma, weightSum, fSum); } __global__ void nlm(float const *inputImg, float *outputImg, int N, float sigma) { // assert(NN == N); int N2 = NN + NSZ - 1; // input image with padding // assert(GSZ == gridDim.x); // assert(BSZ0 == blockDim.x); // __shared__ float blockImg[BSZ_HL][BSZ_HL]; __shared__ float foreignBlockImg[BSZ_HL * BSZ_HL]; // if(!tx && !ty && !bx && !by) { // for (int mu = 0; mu < 49; mu++) { // printf("%f ",gaussDistW[mu]); // } // } // __syncthreads(); float weightSum=0, fSum=0; // Weightsums for multiple pixels per thread. // put inside int I0 = SER(by * BSZ, bx * BSZ, N2); // Download this block's pixels //downloadAndCalculate(blockImg, blockImg, inputImg, sigma, weightSum, fSum, N2, I0); for (char i = 0; i < GSZ; i++) { // gia kathe BLOCK stin arxiki eikona (X) for (char j = 0; j < GSZ; j++) { if ( !(by == j && bx == i) ) { // Download other blocks int I1 = SER(j * BSZ, i * BSZ, N2); //first pixel in block. Used as a ref point to calculate the block.(pg21) // downloadAndCalculate(foreignBlockImg, inputImg, sigma, &weightSum, &fSum, N2, I1); getSharedBlock(foreignBlockImg, inputImg, I1, N2); __syncthreads(); getWeight(foreignBlockImg, sigma, &weightSum, &fSum); } } } // add NSZ2 to skip the padding pixels outputImg[SER(by * BSZ, bx * BSZ, NN)] = fSum / weightSum; //blockImg[(txM + NSZ2)][(tyM + NSZ2)] //inputImg[I0 + SER(NSZ2, NSZ2, N2) + SER(tyM, txM, NN)] } // template __global__ void kernel<false>();
2616a827b4e8222e4b0814afe9ec92258b57d2ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void MatMultipl_naive (float * A, float * B, float * C , int nColsA , int nColsB , int sizeC ) { int i_col = blockIdx.x * blockDim.x + threadIdx.x; /// index in row int i_row = blockIdx.y * blockDim.y + threadIdx.y; /// index in column int idx = i_row * nColsB + i_col; // # of cols in B = # of cols in C float Cvalue = 0; if (idx < sizeC){ for (int e=0; e < nColsA; e++) Cvalue += A[i_row * nColsA + e] * B[e * nColsB + i_col]; C[idx] = Cvalue; } }
2616a827b4e8222e4b0814afe9ec92258b57d2ad.cu
#include "includes.h" __global__ void MatMultipl_naive (float * A, float * B, float * C , int nColsA , int nColsB , int sizeC ) { int i_col = blockIdx.x * blockDim.x + threadIdx.x; /// index in row int i_row = blockIdx.y * blockDim.y + threadIdx.y; /// index in column int idx = i_row * nColsB + i_col; // # of cols in B = # of cols in C float Cvalue = 0; if (idx < sizeC){ for (int e=0; e < nColsA; e++) Cvalue += A[i_row * nColsA + e] * B[e * nColsB + i_col]; C[idx] = Cvalue; } }
d164df87869c0bcc5b4b249c8cc5c5d508f8166b.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
d164df87869c0bcc5b4b249c8cc5c5d508f8166b.cu
/*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = double; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_dsrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
9f11fa4f5631909da10a8988289e4ab8a4904acd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void feilei(int n, float *hostInputA, float *hostInputB,float *result) { int i = threadIdx.y * blockDim.x + threadIdx.x; if (i<n) { for(int j = 0; j < n; j++){ if(hostInputA[j]==1.70141E38f){ //chang_tile[j/4]0 result[j] = hostInputA[j]; continue; } if(hostInputA[j]>=4500.0f){ result[j] = 1.0f; //1 } else if(hostInputA[j]>=3500.0f && hostInputA[j]<4500.0f){ result[j]= 2.0f; //2 } else if(hostInputA[j]>=2500.0f && hostInputA[j]<3500.0f){ result[j]= 3.0f; //3 } else if(hostInputA[j]>=1500.0f && hostInputA[j]<2500.0f && hostInputB[j] >= 2.0f){ result[j]= 4.0f; //4 } else if(hostInputA[j]>=1000.0f && hostInputA[j]<1500.0f && hostInputB[j] >= 5.0f){ result[j]= 5.0f; //5 } else if(hostInputA[j]>=300.0f && hostInputA[j]<1000.0f){ result[j]= 6.0f; //6 } else{ result[j] = 0.0f; // } } } }
9f11fa4f5631909da10a8988289e4ab8a4904acd.cu
extern "C" __global__ void feilei(int n, float *hostInputA, float *hostInputB,float *result) { int i = threadIdx.y * blockDim.x + threadIdx.x; if (i<n) { for(int j = 0; j < n; j++){ if(hostInputA[j]==1.70141E38f){ //如果chang_tile[j/4]的值是无效值,则无用值赋为0 result[j] = hostInputA[j]; continue; } if(hostInputA[j]>=4500.0f){ result[j] = 1.0f; //山地类型1 } else if(hostInputA[j]>=3500.0f && hostInputA[j]<4500.0f){ result[j]= 2.0f; //山地类型2 } else if(hostInputA[j]>=2500.0f && hostInputA[j]<3500.0f){ result[j]= 3.0f; //山地类型3 } else if(hostInputA[j]>=1500.0f && hostInputA[j]<2500.0f && hostInputB[j] >= 2.0f){ result[j]= 4.0f; //山地类型4 } else if(hostInputA[j]>=1000.0f && hostInputA[j]<1500.0f && hostInputB[j] >= 5.0f){ result[j]= 5.0f; //山地类型5 } else if(hostInputA[j]>=300.0f && hostInputA[j]<1000.0f){ result[j]= 6.0f; //山地类型6 } else{ result[j] = 0.0f; //非山地 } } } }
8441d08368d81dc881b58d36adcd93a0969068b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto* _pp_var_g __attribute__((unused)) = params_.state_vars[0];\ auto* _pp_var_tau __attribute__((unused)) = params_.parameters[0];\ auto* _pp_var_e __attribute__((unused)) = params_.parameters[1];\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { _pp_var_g[tid_] = 0.; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type dt = _pp_var_vec_dt[node_indexi_]; arb_value_type a_0_, ll0_, ll1_; ll1_ = 0.; ll0_ = 0.; a_0_ = -1.0/_pp_var_tau[tid_]; ll0_ = a_0_*dt; ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_); _pp_var_g[tid_] = _pp_var_g[tid_]*ll1_; } } __global__ void compute_currents(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; unsigned lane_mask_ = arb::gpu::ballot(0xffffffff, tid_<n_); PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type conductivity_ = 0; arb_value_type v = _pp_var_vec_v[node_indexi_]; arb_value_type current_ = 0; arb_value_type i = 0; i = _pp_var_g[tid_]*(v-_pp_var_e[tid_]); current_ = i; conductivity_ = _pp_var_g[tid_]; ::arb::gpu::reduce_by_key(_pp_var_weight[tid_]*conductivity_,_pp_var_vec_g, node_indexi_, lane_mask_); ::arb::gpu::reduce_by_key(_pp_var_weight[tid_]*current_,_pp_var_vec_i, node_indexi_, lane_mask_); } } __global__ void apply_events(arb_mechanism_ppack params_, arb_deliverable_event_stream stream) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; if(tid_<stream.n_streams) { auto begin = stream.events + stream.begin[tid_]; auto end = stream.events + stream.end[tid_]; for (auto p = begin; p<end; ++p) { if (p->mech_id==_pp_var_mechanism_id) { auto tid_ = p->mech_index; auto weight = p->weight; _pp_var_g[tid_] = max( 0., min(_pp_var_g[tid_]+weight, 10.0)); } } } } } // namespace void mechanism_non_linear_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p); if (!p->multiplicity) return; hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(1}), block_dim, 0, *p); } void mechanism_non_linear_gpu_compute_currents_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( compute_currents), dim3(grid_dim), dim3(block_dim), 0, 0, *p); } void mechanism_non_linear_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p); } void mechanism_non_linear_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_non_linear_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_non_linear_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* stream_ptr) { auto n = stream_ptr->n_streams; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( apply_events), dim3(grid_dim), dim3(block_dim), 0, 0, *p, *stream_ptr); } } // namespace testing
8441d08368d81dc881b58d36adcd93a0969068b2.cu
#include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto* _pp_var_g __attribute__((unused)) = params_.state_vars[0];\ auto* _pp_var_tau __attribute__((unused)) = params_.parameters[0];\ auto* _pp_var_e __attribute__((unused)) = params_.parameters[1];\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { _pp_var_g[tid_] = 0.; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type dt = _pp_var_vec_dt[node_indexi_]; arb_value_type a_0_, ll0_, ll1_; ll1_ = 0.; ll0_ = 0.; a_0_ = -1.0/_pp_var_tau[tid_]; ll0_ = a_0_*dt; ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_); _pp_var_g[tid_] = _pp_var_g[tid_]*ll1_; } } __global__ void compute_currents(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; unsigned lane_mask_ = arb::gpu::ballot(0xffffffff, tid_<n_); PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type conductivity_ = 0; arb_value_type v = _pp_var_vec_v[node_indexi_]; arb_value_type current_ = 0; arb_value_type i = 0; i = _pp_var_g[tid_]*(v-_pp_var_e[tid_]); current_ = i; conductivity_ = _pp_var_g[tid_]; ::arb::gpu::reduce_by_key(_pp_var_weight[tid_]*conductivity_,_pp_var_vec_g, node_indexi_, lane_mask_); ::arb::gpu::reduce_by_key(_pp_var_weight[tid_]*current_,_pp_var_vec_i, node_indexi_, lane_mask_); } } __global__ void apply_events(arb_mechanism_ppack params_, arb_deliverable_event_stream stream) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; if(tid_<stream.n_streams) { auto begin = stream.events + stream.begin[tid_]; auto end = stream.events + stream.end[tid_]; for (auto p = begin; p<end; ++p) { if (p->mech_id==_pp_var_mechanism_id) { auto tid_ = p->mech_index; auto weight = p->weight; _pp_var_g[tid_] = max( 0., min(_pp_var_g[tid_]+weight, 10.0)); } } } } } // namespace void mechanism_non_linear_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); init<<<grid_dim, block_dim>>>(*p); if (!p->multiplicity) return; multiply<<<dim3{grid_dim, 1}, block_dim>>>(*p); } void mechanism_non_linear_gpu_compute_currents_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); compute_currents<<<grid_dim, block_dim>>>(*p); } void mechanism_non_linear_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); advance_state<<<grid_dim, block_dim>>>(*p); } void mechanism_non_linear_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_non_linear_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_non_linear_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* stream_ptr) { auto n = stream_ptr->n_streams; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); apply_events<<<grid_dim, block_dim>>>(*p, *stream_ptr); } } // namespace testing
903194cbf37afbc2d6fb99f01b2210caba4eec7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuNDFFT.h" #include "cudaDeviceManager.h" using namespace Gadgetron; template<class T> __global__ void timeswitch_kernel(T* data, int dimsize, int batchsize, size_t nelements){ int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; if (idx < nelements){ int index = (idx/batchsize)%dimsize; if (index & 1) //Check if number is odd data[idx] *= -1; } } template<class T> __global__ void timeswitch_kernel1D(T* data, size_t nelements){ int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; if (idx < nelements){ data[idx] *= (-int(threadIdx.x & 1)*2+1); //Multiply by -1 if x is odd } } template<class T> __global__ void timeswitch_kernel2D(T* data, size_t nelements){ int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; if (idx < nelements) data[idx] *= (-int(threadIdx.x & 1)*2+1)*int(-(blockIdx.x & 1)*2+1); //Multiply by -1 if x or y coordinate is odd, but not if both are } template<class T> __global__ void timeswitch_kernel3D(T* data, size_t nelements){ int idx = ((blockIdx.z*gridDim.y+blockIdx.y)*gridDim.x + blockIdx.x)*blockDim.x + threadIdx.x; if (idx < nelements) data[idx] *= (-int(threadIdx.x & 1)*2+1)*(-int(blockIdx.x & 1)*2+1)*(-int(blockIdx.y & 1)*2+1); //Multiply by -1 if x or y coordinate is odd, but not if both are } template<class T> void Gadgetron::timeswitch1D(cuNDArray<complext<T> >* inout){ dim3 dimBlock(inout->get_size(0)); size_t max_grid = cudaDeviceManager::Instance()->max_griddim(); size_t nelements = inout->get_number_of_elements(); size_t gridX = ::max(::min(nelements/dimBlock.x,max_grid),size_t(1)); size_t gridY = ::max(size_t(1),nelements/(gridX*dimBlock.x)); dim3 dimGrid(gridX,gridY); hipLaunchKernelGGL(( timeswitch_kernel1D), dim3(dimGrid),dim3(dimBlock), 0, 0, inout->get_data_ptr(),nelements); } template<class T> void Gadgetron::timeswitch2D(cuNDArray<complext<T> >* inout){ dim3 dimBlock(inout->get_size(0)); size_t max_grid = cudaDeviceManager::Instance()->max_griddim(); size_t nelements = inout->get_number_of_elements(); size_t gridX = inout->get_size(1); size_t gridY = ::max(size_t(1),nelements/(gridX*dimBlock.x)); dim3 dimGrid(gridX,gridY); hipLaunchKernelGGL(( timeswitch_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, inout->get_data_ptr(),nelements); } template<class T> void Gadgetron::timeswitch3D(cuNDArray<complext<T> >* inout){ dim3 dimBlock(inout->get_size(0)); size_t max_grid = cudaDeviceManager::Instance()->max_griddim(); size_t nelements = inout->get_number_of_elements(); size_t gridX = inout->get_size(1); size_t gridY = inout->get_size(2); size_t gridZ = ::max(size_t(1),nelements/(gridX*dimBlock.x*gridY)); dim3 dimGrid(gridX,gridY,gridZ); hipLaunchKernelGGL(( timeswitch_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, inout->get_data_ptr(),nelements); } template<class T> void Gadgetron::timeswitch(cuNDArray<complext<T> >* inout, int dim_to_transform){ size_t batchsize = 1; for (int i = 0; i < dim_to_transform; i++) batchsize *= inout->get_size(i); size_t dimsize = inout->get_size(dim_to_transform); size_t nelements = inout->get_number_of_elements(); size_t max_block = cudaDeviceManager::Instance()->max_blockdim(); dim3 dimBlock(::min(max_block,nelements)); size_t max_grid = cudaDeviceManager::Instance()->max_griddim(); size_t gridX = ::max(::min(nelements/dimBlock.x,max_grid),size_t(1)); size_t gridY = ::max(size_t(1),nelements/(gridX*dimBlock.x)); dim3 dimGrid(gridX,gridY); hipLaunchKernelGGL(( timeswitch_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, inout->get_data_ptr(),dimsize,batchsize,nelements); } template EXPORTGPUFFT void Gadgetron::timeswitch<float>(cuNDArray<float_complext>*, int); template EXPORTGPUFFT void Gadgetron::timeswitch<double>(cuNDArray<double_complext>*, int); template EXPORTGPUFFT void Gadgetron::timeswitch1D<float>(cuNDArray<float_complext>*); template EXPORTGPUFFT void Gadgetron::timeswitch1D<double>(cuNDArray<double_complext>*); template EXPORTGPUFFT void Gadgetron::timeswitch2D<float>(cuNDArray<float_complext>*); template EXPORTGPUFFT void Gadgetron::timeswitch2D<double>(cuNDArray<double_complext>*); template EXPORTGPUFFT void Gadgetron::timeswitch3D<float>(cuNDArray<float_complext>*); template EXPORTGPUFFT void Gadgetron::timeswitch3D<double>(cuNDArray<double_complext>*);
903194cbf37afbc2d6fb99f01b2210caba4eec7c.cu
#include "cuNDFFT.h" #include "cudaDeviceManager.h" using namespace Gadgetron; template<class T> __global__ void timeswitch_kernel(T* data, int dimsize, int batchsize, size_t nelements){ int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; if (idx < nelements){ int index = (idx/batchsize)%dimsize; if (index & 1) //Check if number is odd data[idx] *= -1; } } template<class T> __global__ void timeswitch_kernel1D(T* data, size_t nelements){ int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; if (idx < nelements){ data[idx] *= (-int(threadIdx.x & 1)*2+1); //Multiply by -1 if x is odd } } template<class T> __global__ void timeswitch_kernel2D(T* data, size_t nelements){ int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; if (idx < nelements) data[idx] *= (-int(threadIdx.x & 1)*2+1)*int(-(blockIdx.x & 1)*2+1); //Multiply by -1 if x or y coordinate is odd, but not if both are } template<class T> __global__ void timeswitch_kernel3D(T* data, size_t nelements){ int idx = ((blockIdx.z*gridDim.y+blockIdx.y)*gridDim.x + blockIdx.x)*blockDim.x + threadIdx.x; if (idx < nelements) data[idx] *= (-int(threadIdx.x & 1)*2+1)*(-int(blockIdx.x & 1)*2+1)*(-int(blockIdx.y & 1)*2+1); //Multiply by -1 if x or y coordinate is odd, but not if both are } template<class T> void Gadgetron::timeswitch1D(cuNDArray<complext<T> >* inout){ dim3 dimBlock(inout->get_size(0)); size_t max_grid = cudaDeviceManager::Instance()->max_griddim(); size_t nelements = inout->get_number_of_elements(); size_t gridX = std::max(std::min(nelements/dimBlock.x,max_grid),size_t(1)); size_t gridY = std::max(size_t(1),nelements/(gridX*dimBlock.x)); dim3 dimGrid(gridX,gridY); timeswitch_kernel1D<<<dimGrid,dimBlock>>>(inout->get_data_ptr(),nelements); } template<class T> void Gadgetron::timeswitch2D(cuNDArray<complext<T> >* inout){ dim3 dimBlock(inout->get_size(0)); size_t max_grid = cudaDeviceManager::Instance()->max_griddim(); size_t nelements = inout->get_number_of_elements(); size_t gridX = inout->get_size(1); size_t gridY = std::max(size_t(1),nelements/(gridX*dimBlock.x)); dim3 dimGrid(gridX,gridY); timeswitch_kernel2D<<<dimGrid,dimBlock>>>(inout->get_data_ptr(),nelements); } template<class T> void Gadgetron::timeswitch3D(cuNDArray<complext<T> >* inout){ dim3 dimBlock(inout->get_size(0)); size_t max_grid = cudaDeviceManager::Instance()->max_griddim(); size_t nelements = inout->get_number_of_elements(); size_t gridX = inout->get_size(1); size_t gridY = inout->get_size(2); size_t gridZ = std::max(size_t(1),nelements/(gridX*dimBlock.x*gridY)); dim3 dimGrid(gridX,gridY,gridZ); timeswitch_kernel3D<<<dimGrid,dimBlock>>>(inout->get_data_ptr(),nelements); } template<class T> void Gadgetron::timeswitch(cuNDArray<complext<T> >* inout, int dim_to_transform){ size_t batchsize = 1; for (int i = 0; i < dim_to_transform; i++) batchsize *= inout->get_size(i); size_t dimsize = inout->get_size(dim_to_transform); size_t nelements = inout->get_number_of_elements(); size_t max_block = cudaDeviceManager::Instance()->max_blockdim(); dim3 dimBlock(std::min(max_block,nelements)); size_t max_grid = cudaDeviceManager::Instance()->max_griddim(); size_t gridX = std::max(std::min(nelements/dimBlock.x,max_grid),size_t(1)); size_t gridY = std::max(size_t(1),nelements/(gridX*dimBlock.x)); dim3 dimGrid(gridX,gridY); timeswitch_kernel<<<dimGrid,dimBlock>>>(inout->get_data_ptr(),dimsize,batchsize,nelements); } template EXPORTGPUFFT void Gadgetron::timeswitch<float>(cuNDArray<float_complext>*, int); template EXPORTGPUFFT void Gadgetron::timeswitch<double>(cuNDArray<double_complext>*, int); template EXPORTGPUFFT void Gadgetron::timeswitch1D<float>(cuNDArray<float_complext>*); template EXPORTGPUFFT void Gadgetron::timeswitch1D<double>(cuNDArray<double_complext>*); template EXPORTGPUFFT void Gadgetron::timeswitch2D<float>(cuNDArray<float_complext>*); template EXPORTGPUFFT void Gadgetron::timeswitch2D<double>(cuNDArray<double_complext>*); template EXPORTGPUFFT void Gadgetron::timeswitch3D<float>(cuNDArray<float_complext>*); template EXPORTGPUFFT void Gadgetron::timeswitch3D<double>(cuNDArray<double_complext>*);
1fe735d9bcb563761c8124efd61a11291f3bdd7e.hip
// !!! This is a file automatically generated by hipify!!! #include <memory> #include <iostream> #include <hip/hip_runtime.h> int main(void) { int device_count = 0; hipGetDeviceCount(&device_count); std::cout << "There are " << device_count << " gpus on this computer" << std::endl; }
1fe735d9bcb563761c8124efd61a11291f3bdd7e.cu
#include <memory> #include <iostream> #include <cuda_runtime.h> int main(void) { int device_count = 0; cudaGetDeviceCount(&device_count); std::cout << "There are " << device_count << " gpus on this computer" << std::endl; }
271e7daad904125583b0acf9c9d2d02410628a79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../include/cudaconv2.cuh" #include "../../nvmatrix/include/nvmatrix_kernels.cuh" __device__ __forceinline__ void filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(int fPidx, int imgLoadModPosY, int imgLoadModPosX, int imgSizeX, int filterSize, int& iPidx) { int x = imgLoadModPosX + (fPidx) % filterSize; int y = imgLoadModPosY + (fPidx) / filterSize; iPidx = y >= 0 && y < imgSizeX && x >= 0 && x < imgSizeX ? y * imgSizeX + x : -1; } #define FA_COLOR3_IMPRELOAD(c,i) imPreload[c][i] = iPidxNext < 0 || (checkImgBounds && myImgIdx + i * B_X >= numImages) ? 0 : mm[c * imgPixels * imgStride + i * B_X]; #define FA_COLOR3_IMPRELOAD_TX(c,i) imPreload[c][i] = iPidxNext < 0 || (checkImgBounds && myImgIdx + i * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imagesOffset2 + c * imgPixels * imgStride + i * B_X); /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache, bool scale, bool checkImgBounds> //__launch_bounds__(128,3) __global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex(hipTextureObject_t images, hipTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numModules = numModulesX * numModulesY; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; // images += myImgIdx; // filters += blockFilterIdx // + shFilterLoadY * numFilters + shFilterLoadX; // if (!conv) { // NOTE: UNTESTED! // filters += moduleIdx * numColors * filterPixels * numFilters; // } const int imagesOffset = myImgIdx; const int filtersOffset = blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX + (conv ? 0 : moduleIdx * numColors * filterPixels * numFilters); targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } int iPidxNext; float imPreload[numColors][imgsPerThread]; float fPreload[numColors][pixelCache*filtersPerThread/B_X]; #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int p = 0; p < pixelCache; p += B_X/filtersPerThread) { if (p + shFilterLoadY < filterPixels) { fPreload[c][p*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filtersOffset + p * numFilters + c * numFilters * filterPixels); } else{ fPreload[c][p*filtersPerThread/B_X] = 0; } } } filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) { imPreload[c][i] = tex1Dfetch<float>(images, imagesOffset + (c * imgPixels + iPidxNext) * imgStride + i * B_X); } else { imPreload[c][i] = 0; } } } for (int p = 0; p < filterPixels; p += pixelCache) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < numColors; ++c) { // NOTE: bank conflicts here! shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i]; } } const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache; filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); // const float* ff = &filters[numFilters * fPidxNext]; // const float* mm = &images[imgStride * iPidxNext]; const int filtersOffset2 = filtersOffset + numFilters * fPidxNext; const int imagesOffset2 = imagesOffset + imgStride * iPidxNext; FA_COLOR3_IMPRELOAD_TX(0,0); FA_COLOR3_IMPRELOAD_TX(0,1); FA_COLOR3_IMPRELOAD_TX(0,2); FA_COLOR3_IMPRELOAD_TX(0,3); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int pp = 0; pp < pixelCache; pp += B_X/filtersPerThread) { shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp*filtersPerThread/B_X]; } } __syncthreads(); FA_COLOR3_IMPRELOAD_TX(1,0); FA_COLOR3_IMPRELOAD_TX(1,1); FA_COLOR3_IMPRELOAD_TX(1,2); FA_COLOR3_IMPRELOAD_TX(1,3); FA_COLOR3_IMPRELOAD_TX(2,0); FA_COLOR3_IMPRELOAD_TX(2,1); FA_COLOR3_IMPRELOAD_TX(2,2); FA_COLOR3_IMPRELOAD_TX(2,3); #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int pp = 0; pp < 2; pp++) { fPreload[c][pp] = fPidxNext + pp*(B_X/filtersPerThread) + shFilterLoadY >= filterPixels ? 0 : tex1Dfetch<float>(filters, filtersOffset2 + c * numFilters* filterPixels + pp*(B_X/filtersPerThread) * numFilters); } } #pragma unroll for (int pp = 0; pp < pixelCache; pp++) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int i = 0; i < imgsPerThread; i++) { prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f]; } } } } __syncthreads(); } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * This won't be pretty. */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex(hipTextureObject_t images, hipTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numModules = numModulesX * numModulesY; // Another fun insanity: the % B_X makes things faster, even though threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int warp = tidx / 32; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; // images += myImgIdx; // filters += blockFilterIdx // + shFilterLoadY * numFilters + shFilterLoadX; // if (!conv) { // NOTE: UNTESTED! // filters += moduleIdx * numColors * filterPixels * numFilters; // } const int imagesOffset = myImgIdx; const int filtersOffset = blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX + (conv ? 0 : moduleIdx * numColors * filterPixels * numFilters); targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } int iPidxNext; float imPreload[numColors][imgsPerThread]; float fPreload[numColors][DIVUP(pixelCache*filtersPerThread,B_X)]; if (warp < 3) { #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int p = 0; p < pixelCache; p += 2) { if (p + shFilterLoadY < filterPixels) { fPreload[c][p/2] = tex1Dfetch<float>(filters, filtersOffset + p * numFilters + c * numFilters * filterPixels); } else { fPreload[c][p/2] = 0; } } } } filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) { imPreload[c][i] = tex1Dfetch<float>(images, imagesOffset + (c * imgPixels + iPidxNext) * imgStride + i * B_X); } else { imPreload[c][i] = 0; } } } for (int p = 0; p < filterPixels; p += pixelCache) { const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache; filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i]; } } if (warp < 3) { #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int pp = 0; pp < pixelCache; pp += 2) { shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp/2]; } } } __syncthreads(); // const float* ff = &filters[numFilters * fPidxNext]; // const float* mm = &images[imgStride * iPidxNext]; const int filtersOffset2 = filtersOffset + numFilters * fPidxNext; const int imagesOffset2 = imagesOffset + imgStride * iPidxNext; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { #pragma unroll for (int c = 0; c < numColors; c++) { FA_COLOR3_IMPRELOAD_TX(c,i); } } #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int pp = 0; pp < 2; pp++) { fPreload[c][pp] = warp >= 3 || fPidxNext + pp*2 + shFilterLoadY >= filterPixels ? 0 : tex1Dfetch<float>(filters, filtersOffset2 + c * numFilters* filterPixels + pp*2 * numFilters); } #pragma unroll for (int pp = 0; pp < pixelCache; pp++) { #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f]; } } } } __syncthreads(); } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops costs 2 registers, but saves time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } __device__ inline void filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(int filterSize, int imgSizeX, int imgLoadModPosY, int imgLoadModPosX, int imgY, int imgX, int& fPidx, int& iPidx) { int filterPxY = imgY - imgLoadModPosY; int filterPxX = imgX - imgLoadModPosX; fPidx = filterPxY * filterSize + filterPxX; iPidx = imgY * imgSizeX + imgX; // Pixel index in img } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * Note: in git there's a 1.5% faster version of this which sues 167 registers instead of 154... * it's basically the same thing, but it doesn't do the next-pixel computation. It just avoids * pre-loading when it rolls over to the next pixel. */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; filters +=blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; if (!conv) { filters += moduleIdx * numFilterColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; // float fCache[filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } // NOTE: these max/min functions increase register usage as compared to my macros const int imgStartX = max(0, imgLoadModPosX); const int imgStartY = max(0, imgLoadModPosY); const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] int fPidx, iPidx; float imPreload[imgsPerThread]; float fPreload[colorCache*filtersPerThread/B_X]; // float fCache[filtersPerThread]; filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { imPreload[i] = images[imgStride * iPidx + i * B_X]; } else { imPreload[i] = 0; } } if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage.. #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { fPreload[c*filtersPerThread/B_X] = filters[(c * filterPixels + fPidx) * numFilters]; } } for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { // const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { // const int filterPxX = imgX - imgLoadModPosX; // const int p = filterPxY * filterSize + filterPxX; // const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img // setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx); // float* m = &images[imgStride * pixIdx]; const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1; int imgYNext = imgY; int imgXNext = imgX; int fPidxNext, iPidxNext; if (!lastPixel) { imgYNext = imgY + (imgX + 1 == imgEndX); imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1; } filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext); for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)]; const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)]; if (oc == numFilterColors - colorCache) { ff = &filters[fPidxNext * numFilters]; mm = &images[iPidxNext * imgStride]; fPidx = fPidxNext; iPidx = iPidxNext; } #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X]; } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[ty][tx * imgsPerThread + i] = imPreload[i]; } imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : mm[0 * B_X]; imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : mm[1 * B_X]; imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : mm[2 * B_X]; __syncthreads(); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f]; } } fPreload[0] = ff[0]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f]; } } fPreload[1] = ff[(B_X/filtersPerThread * filterPixels) * numFilters]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f]; } } imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : mm[3 * B_X]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f]; } } __syncthreads(); } } } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex(hipTextureObject_t images, hipTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; const int imgOffset = (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; // images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; const int filterOffset = blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX + (conv ? 0 : moduleIdx * numFilterColors * filterPixels * numFilters); // filters +=blockFilterIdx // + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; // if (!conv) { // filters += moduleIdx * numFilterColors * filterPixels * numFilters; // } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; // float fCache[filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } // NOTE: these max/min functions increase register usage as compared to my macros const int imgStartX = max(0, imgLoadModPosX); const int imgStartY = max(0, imgLoadModPosY); const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] int fPidx, iPidx; float imPreload[imgsPerThread]; // [4] float fPreload[colorCache*filtersPerThread/B_X]; // [2] // float fCache[filtersPerThread]; filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { imPreload[i] = tex1Dfetch<float>(images, imgOffset + imgStride * iPidx + i * B_X); } else { imPreload[i] = 0; } } if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage.. #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { fPreload[c*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filterOffset + (c * filterPixels + fPidx) * numFilters); } } for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { // const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { // const int filterPxX = imgX - imgLoadModPosX; // const int p = filterPxY * filterSize + filterPxX; // const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img // setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx); // float* m = &images[imgStride * pixIdx]; const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1; int imgYNext = imgY; int imgXNext = imgX; int fPidxNext, iPidxNext; if (!lastPixel) { imgYNext = imgY + (imgX + 1 == imgEndX); imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1; } filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext); for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) // const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)]; // const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)]; int imgOffset2 = imgOffset + imgStride * ((oc + colorCache) * imgPixels + iPidx); int filterOffset2 = filterOffset + numFilters * ((oc + colorCache) * filterPixels + fPidx); if (oc == numFilterColors - colorCache) { filterOffset2 = filterOffset + fPidxNext * numFilters; imgOffset2 = imgOffset + iPidxNext * imgStride; fPidx = fPidxNext; iPidx = iPidxNext; } #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X]; } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[ty][tx * imgsPerThread + i] = imPreload[i]; } imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 0 * B_X); imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 1 * B_X); imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 2 * B_X); __syncthreads(); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f]; } } fPreload[0] = tex1Dfetch<float>(filters, filterOffset2 + 0); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f]; } } fPreload[1] = tex1Dfetch<float>(filters, filterOffset2 + (B_X/filtersPerThread * filterPixels) * numFilters); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f]; } } imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 3 * B_X); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f]; } } __syncthreads(); } } } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of module and B_Y * filtersPerThread * * images: (numColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numColors, filterPixels, numFilters) if conv * (numModules, numColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * * Number of filters per module should be divisible by B_Y * filtersPerThread * checkImgBounds indicates whether number of images is divisible by B_X * imgsPerThread * * The imgSize here is the size of the actual image without the padding. * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_color(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[pixelCache*numColors][B_Y * filtersPerThread]; // pre-load pixelCache pixels from B_Y*filtersPerThread filters __shared__ float shImages[pixelCache*numColors][B_X * imgsPerThread]; // pre-load pixelCache pixels from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = blockIdx.y % blocksPerModule; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int numModules = numModulesY * numModulesX; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += myImgIdx; filters += filtersPerThread * B_Y * blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX; if (!conv) { filters += moduleIdx * numColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y*filtersPerThread) * numImages * numModulesY * numModulesX + myImgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } //float* shImgLoad = &shImages[0][threadIdx.x]; for (int p = 0; p < filterPixels; p += pixelCache) { /* * Load pixelCache pixels from B_Y*filtersPerThread filters * This condition covers the case when B_X is not divisible by filtersPerThread. * In this case, not all of the threads will participate in the loading operation. * This ensures that in each loop iteration, an integer number of rows of shFilters * are filled, which makes indexing simple. */ if (B_X % filtersPerThread == 0 || shFilterLoadY < B_X/filtersPerThread) { #pragma unroll for (int p2 = 0; p2 < pixelCache; p2 += B_X/filtersPerThread) { const bool omit = pixelCache % (B_X / filtersPerThread) == 0; const int preloadPx = shFilterLoadY + p2; if (omit || preloadPx < pixelCache) { if (p + preloadPx < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = 0; } } } } } /* * Load pixelCache pixels from B_X*imgsPerThread images. */ #pragma unroll for (int ly = 0; ly < pixelCache; ly += B_Y) { const int preloadPx = ly + threadIdx.y; const int pixIdx = p + preloadPx; const bool omit = pixelCache % B_Y == 0; // Compile-time condition /* * Don't load any image pixels corresponding to filter pixels that don't exist. */ if (pixIdx < filterPixels && (omit || preloadPx < pixelCache)) { const int x = imgLoadModPosX + pixIdx % filterSize; const int y = imgLoadModPosY + pixIdx / filterSize; if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) { float* m = &images[imgStride * (y * imgSizeX + x)]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = m[c * imgStride * imgPixels + i * B_X]; } else { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0; } } } } else { // Padding #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0; } } } } } __syncthreads(); #pragma unroll for (int i = 0; i < pixelCache*numColors; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] += shImages[i][g + threadIdx.x * imgsPerThread] * shFilters[i][threadIdx.y * filtersPerThread + f]; } } } __syncthreads(); } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { targets[g * B_X + f * numImages * numModules] = scaleTargets * targets[g * B_X + f * numImages * numModules] + scaleOutputs * prod[f][g]; } } } } else { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * numImages * numModules] = scaleOutputs * prod[f][g]; } } } } } /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of B_Y * filtersPerThread * * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * B_Y one of 4, 8, 16 * B_X one of 16, 32 * imgsPerThread one of 1, 2, 4 * filtersPerThread one of 1, 2, 4, 8 * colorCache: how many colors to put into shmem * * numFilters should be divisible by B_Y * filtersPerThread * numImages be divisible by B_X * imgsPerThread * numFilterColors should be divisible by colorCache. * numImgColors must be even. * numFilters must be divisible by numGroups. * no restrictions on pixelCache * The imgSize here is the size of the actual image without the padding. * As always, try to make B_X * imgsPerThread == B_Y * filtersPerThread for maximum efficiency. * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_sparse2(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; filters +=blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; if (!conv) { filters += moduleIdx * numFilterColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y) * numImages * numModules + myImgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } const int imgStartX = MAX(0, imgLoadModPosX); const int imgStartY = MAX(0, imgLoadModPosY); const int imgEndX = MIN(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = MIN(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { const int filterPxX = imgX - imgLoadModPosX; const int p = filterPxY * filterSize + filterPxX; for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) /* * Load a pixel from B_Y*filtersPerThread filters * This condition covers the case when B_X is not divisible by filtersPerThread. * In this case, not all of the threads will participate in the loading operation. * This ensures that in each loop iteration, an integer number of rows of shFilters * are filled, which makes indexing simple. * nvcc is behaving in a completely insane way: removing this condition under * template parameters that guarantee it to be true actually slows down * the computation. * */ if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { if (colorCache % (B_X/filtersPerThread) == 0 || c + shFilterLoadY < colorCache) { shFilters[c + shFilterLoadY][shFilterLoadX] = filters[((oc+c) * filterPixels + p) * numFilters]; } } } /* * Load a pixel from B_X*imgsPerThread images. */ const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img float* m = &images[imgStride * (oc * imgPixels + pixIdx)]; #pragma unroll for (int c = 0; c < colorCache; c += B_Y) { if (colorCache % B_Y == 0 || threadIdx.y + c < colorCache) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { shImages[c + threadIdx.y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X]; } else { shImages[c + threadIdx.y][threadIdx.x + i * B_X] = 0; } } } } __syncthreads(); for (int c = 0; c < colorCache; c++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[f][g] += shImages[c][g * B_X + threadIdx.x] * shFilters[c][threadIdx.y + f * B_Y]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g]; } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModules, numImages) * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _filterActs(THCState* state, THCudaTensor* images, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numFilters = filters->size[1]; int numModules = numModulesY * numModulesX; int numImages = images->size[1]; int imgPixels = images->size[0]/numImgColors; int imgSizeX = imgPixels / imgSizeY; int filterModuleMult = conv ? 1 : numModules; THAssert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0))); THAssert(numGroups == 1 || numFilterColors % 4 == 0); THAssert(numFilters % (16 * numGroups) == 0); THAssert(numImgColors % numGroups == 0); //images.printShape("images"); // printf("rows: %d, pixels: %d, colors: %d\n", images->size[0], imgPixels, numImgColors); //images.printShape("images"); THAssert(images->size[0] == imgPixels * numImgColors); THAssert(imgSizeY * imgSizeX == imgPixels); int numFiltersPerGroup = numFilters / numGroups; int imgStride = images->stride[0]; // images does not need to be a contiguous matrix int filterPixels = filters->size[0] / (filterModuleMult * numFilterColors); int filterSize = int(sqrt(filterPixels)); THAssert(filterSize * filterSize == filterPixels); THAssert(filters->size[0] == filterModuleMult * numFilterColors * filterPixels); // These routines don't handle the case when only part of the image is visited in the convolution THAssert(paddingStart <= 0); THAssert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); THAssert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); THAssert(moduleStride <= filterSize); THAssert(THCudaTensor_isContiguous(state, filters)); THAssert(THCudaTensor_isContiguous(state, targets)); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int filtersPerThread, threadsY = 4; if (numImgColors <= 3) { // Special kernels written for colors = 3, filters = 64 and colors = 3, filters = 48 cases. // The remaining cases use the old routines. // TODO: Modernize the remaining cases if you care about them. filtersPerThread = numFiltersPerGroup % 64 == 0 ? 16 : numFiltersPerGroup % 48 == 0 ? 12 : numFiltersPerGroup % 32 == 0 ? 8 : 4; } else { filtersPerThread = numFiltersPerGroup % 64 == 0 ? 16 : numFiltersPerGroup % 32 == 0 ? 8 : 4; threadsY = numFiltersPerGroup % 128 == 0 && numFilterColors % 8 == 0 && imgsPerThread != 4 ? 8 : 4; } int threadsX = 32; dim3 threads(threadsX, threadsY); dim3 blocks = dim3(DIVUP(numImages, threads.x * imgsPerThread), (numModules * numFilters) / (threads.y * filtersPerThread)); bool checkImgBounds = numImages % (threads.x*imgsPerThread) != 0; bool scale = scaleTargets != 0; /* printf("numFilterColors: %d\n" "numFilters: %d\n" "numModules: %d\n" "numImages: %d\n" "imgPixels: %d\n" "imgSizeX: %d\n" "filterModuleMult: %d\n" "imgStride: %d\n" "filterPixels: %d\n" "filterSize: %d\n" "checkImgBounds: %d\n" "scale: %d\n", numFilterColors, numFilters, numModules, numImages, imgPixels, imgSizeX, filterModuleMult, imgStride, filterPixels, filterSize, checkImgBounds, scale); */ if (scaleTargets == 0) { THCudaTensor_resize2d(state, targets, numFilters * numModules, numImages); } else { THAssert(targets->size[0] == numFilters * numModules); THAssert(targets->size[1] == numImages); } // Auto-generated calling code... // NOTE: The calling code is set up such that if checkImgBounds is true, then imgsPerThread = 1. // In principle it doesn't have to be this way, and you may want to optimize for that case. if (scale == false) { if (checkImgBounds == false) { if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 128 == 0) { if (THCudaTensor_nElement(state, images) * 4 < TEXTURE_SIZE_MAX) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferL1); hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, texImages, texFilters, THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texFilters)); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferL1); hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numFiltersPerGroup % 64 == 0) { if (THCudaTensor_nElement(state, images) * 4 < TEXTURE_SIZE_MAX) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferL1); hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, texImages, texFilters, THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texFilters)); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferL1); hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors % 4 == 0) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 3) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex < 4, 32, 4, 16, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex < 4, 32, 4, 16, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, texImages, texFilters, THCudaTensor_data(state, targets),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texFilters)); } else if (numFiltersPerGroup % 48 == 0) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex < 4, 32, 4, 12, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex < 4, 32, 4, 12, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, texImages, texFilters, THCudaTensor_data(state, targets),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texFilters)); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 16, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 12, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 16, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 12, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } else if (checkImgBounds == true) { if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors % 4 == 0) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 3) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } } else if (scale == true) { if (checkImgBounds == false) { if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 128 == 0) { if (THCudaTensor_nElement(state, images) * 4 < TEXTURE_SIZE_MAX) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferL1); hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, texImages, texFilters, THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texFilters)); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferL1); hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numFiltersPerGroup % 64 == 0) { if (THCudaTensor_nElement(state, images) * 4 < TEXTURE_SIZE_MAX) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferL1); hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, texImages, texFilters, THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texFilters)); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferL1); hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors % 4 == 0) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 3) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex < 4, 32, 4, 16, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex < 4, 32, 4, 16, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, texImages, texFilters, THCudaTensor_data(state, targets),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texFilters)); } else if (numFiltersPerGroup % 48 == 0) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex < 4, 32, 4, 12, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex < 4, 32, 4, 12, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, texImages, texFilters, THCudaTensor_data(state, targets),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texFilters)); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 16, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 12, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 16, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 12, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } else if (checkImgBounds == true) { if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors % 4 == 0) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 3) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, true >) , dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } } getLastCudaError("filterActs: kernel execution failed"); } void convFilterActs(THCState* state, THCudaTensor* images, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups) { convFilterActsSt(state, images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1); } void convFilterActsSt(THCState* state, THCudaTensor* images, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _filterActs(state, images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true); } void localFilterActs(THCState* state, THCudaTensor* images, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups) { localFilterActsSt(state, images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1); } void localFilterActsSt(THCState* state, THCudaTensor* images, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _filterActs(state, images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false); }
271e7daad904125583b0acf9c9d2d02410628a79.cu
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../include/cudaconv2.cuh" #include "../../nvmatrix/include/nvmatrix_kernels.cuh" __device__ __forceinline__ void filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(int fPidx, int imgLoadModPosY, int imgLoadModPosX, int imgSizeX, int filterSize, int& iPidx) { int x = imgLoadModPosX + (fPidx) % filterSize; int y = imgLoadModPosY + (fPidx) / filterSize; iPidx = y >= 0 && y < imgSizeX && x >= 0 && x < imgSizeX ? y * imgSizeX + x : -1; } #define FA_COLOR3_IMPRELOAD(c,i) imPreload[c][i] = iPidxNext < 0 || (checkImgBounds && myImgIdx + i * B_X >= numImages) ? 0 : mm[c * imgPixels * imgStride + i * B_X]; #define FA_COLOR3_IMPRELOAD_TX(c,i) imPreload[c][i] = iPidxNext < 0 || (checkImgBounds && myImgIdx + i * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imagesOffset2 + c * imgPixels * imgStride + i * B_X); /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache, bool scale, bool checkImgBounds> //__launch_bounds__(128,3) __global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex(cudaTextureObject_t images, cudaTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numModules = numModulesX * numModulesY; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; // images += myImgIdx; // filters += blockFilterIdx // + shFilterLoadY * numFilters + shFilterLoadX; // if (!conv) { // NOTE: UNTESTED! // filters += moduleIdx * numColors * filterPixels * numFilters; // } const int imagesOffset = myImgIdx; const int filtersOffset = blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX + (conv ? 0 : moduleIdx * numColors * filterPixels * numFilters); targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } int iPidxNext; float imPreload[numColors][imgsPerThread]; float fPreload[numColors][pixelCache*filtersPerThread/B_X]; #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int p = 0; p < pixelCache; p += B_X/filtersPerThread) { if (p + shFilterLoadY < filterPixels) { fPreload[c][p*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filtersOffset + p * numFilters + c * numFilters * filterPixels); } else{ fPreload[c][p*filtersPerThread/B_X] = 0; } } } filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) { imPreload[c][i] = tex1Dfetch<float>(images, imagesOffset + (c * imgPixels + iPidxNext) * imgStride + i * B_X); } else { imPreload[c][i] = 0; } } } for (int p = 0; p < filterPixels; p += pixelCache) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < numColors; ++c) { // NOTE: bank conflicts here! shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i]; } } const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache; filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); // const float* ff = &filters[numFilters * fPidxNext]; // const float* mm = &images[imgStride * iPidxNext]; const int filtersOffset2 = filtersOffset + numFilters * fPidxNext; const int imagesOffset2 = imagesOffset + imgStride * iPidxNext; FA_COLOR3_IMPRELOAD_TX(0,0); FA_COLOR3_IMPRELOAD_TX(0,1); FA_COLOR3_IMPRELOAD_TX(0,2); FA_COLOR3_IMPRELOAD_TX(0,3); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int pp = 0; pp < pixelCache; pp += B_X/filtersPerThread) { shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp*filtersPerThread/B_X]; } } __syncthreads(); FA_COLOR3_IMPRELOAD_TX(1,0); FA_COLOR3_IMPRELOAD_TX(1,1); FA_COLOR3_IMPRELOAD_TX(1,2); FA_COLOR3_IMPRELOAD_TX(1,3); FA_COLOR3_IMPRELOAD_TX(2,0); FA_COLOR3_IMPRELOAD_TX(2,1); FA_COLOR3_IMPRELOAD_TX(2,2); FA_COLOR3_IMPRELOAD_TX(2,3); #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int pp = 0; pp < 2; pp++) { fPreload[c][pp] = fPidxNext + pp*(B_X/filtersPerThread) + shFilterLoadY >= filterPixels ? 0 : tex1Dfetch<float>(filters, filtersOffset2 + c * numFilters* filterPixels + pp*(B_X/filtersPerThread) * numFilters); } } #pragma unroll for (int pp = 0; pp < pixelCache; pp++) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int i = 0; i < imgsPerThread; i++) { prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f]; } } } } __syncthreads(); } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * This won't be pretty. */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex(cudaTextureObject_t images, cudaTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numModules = numModulesX * numModulesY; // Another fun insanity: the % B_X makes things faster, even though threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int warp = tidx / 32; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; // images += myImgIdx; // filters += blockFilterIdx // + shFilterLoadY * numFilters + shFilterLoadX; // if (!conv) { // NOTE: UNTESTED! // filters += moduleIdx * numColors * filterPixels * numFilters; // } const int imagesOffset = myImgIdx; const int filtersOffset = blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX + (conv ? 0 : moduleIdx * numColors * filterPixels * numFilters); targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } int iPidxNext; float imPreload[numColors][imgsPerThread]; float fPreload[numColors][DIVUP(pixelCache*filtersPerThread,B_X)]; if (warp < 3) { #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int p = 0; p < pixelCache; p += 2) { if (p + shFilterLoadY < filterPixels) { fPreload[c][p/2] = tex1Dfetch<float>(filters, filtersOffset + p * numFilters + c * numFilters * filterPixels); } else { fPreload[c][p/2] = 0; } } } } filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) { imPreload[c][i] = tex1Dfetch<float>(images, imagesOffset + (c * imgPixels + iPidxNext) * imgStride + i * B_X); } else { imPreload[c][i] = 0; } } } for (int p = 0; p < filterPixels; p += pixelCache) { const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache; filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i]; } } if (warp < 3) { #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int pp = 0; pp < pixelCache; pp += 2) { shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp/2]; } } } __syncthreads(); // const float* ff = &filters[numFilters * fPidxNext]; // const float* mm = &images[imgStride * iPidxNext]; const int filtersOffset2 = filtersOffset + numFilters * fPidxNext; const int imagesOffset2 = imagesOffset + imgStride * iPidxNext; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { #pragma unroll for (int c = 0; c < numColors; c++) { FA_COLOR3_IMPRELOAD_TX(c,i); } } #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int pp = 0; pp < 2; pp++) { fPreload[c][pp] = warp >= 3 || fPidxNext + pp*2 + shFilterLoadY >= filterPixels ? 0 : tex1Dfetch<float>(filters, filtersOffset2 + c * numFilters* filterPixels + pp*2 * numFilters); } #pragma unroll for (int pp = 0; pp < pixelCache; pp++) { #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f]; } } } } __syncthreads(); } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops costs 2 registers, but saves time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } __device__ inline void filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(int filterSize, int imgSizeX, int imgLoadModPosY, int imgLoadModPosX, int imgY, int imgX, int& fPidx, int& iPidx) { int filterPxY = imgY - imgLoadModPosY; int filterPxX = imgX - imgLoadModPosX; fPidx = filterPxY * filterSize + filterPxX; iPidx = imgY * imgSizeX + imgX; // Pixel index in img } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * Note: in git there's a 1.5% faster version of this which sues 167 registers instead of 154... * it's basically the same thing, but it doesn't do the next-pixel computation. It just avoids * pre-loading when it rolls over to the next pixel. */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; filters +=blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; if (!conv) { filters += moduleIdx * numFilterColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; // float fCache[filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } // NOTE: these max/min functions increase register usage as compared to my macros const int imgStartX = max(0, imgLoadModPosX); const int imgStartY = max(0, imgLoadModPosY); const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] int fPidx, iPidx; float imPreload[imgsPerThread]; float fPreload[colorCache*filtersPerThread/B_X]; // float fCache[filtersPerThread]; filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { imPreload[i] = images[imgStride * iPidx + i * B_X]; } else { imPreload[i] = 0; } } if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage.. #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { fPreload[c*filtersPerThread/B_X] = filters[(c * filterPixels + fPidx) * numFilters]; } } for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { // const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { // const int filterPxX = imgX - imgLoadModPosX; // const int p = filterPxY * filterSize + filterPxX; // const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img // setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx); // float* m = &images[imgStride * pixIdx]; const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1; int imgYNext = imgY; int imgXNext = imgX; int fPidxNext, iPidxNext; if (!lastPixel) { imgYNext = imgY + (imgX + 1 == imgEndX); imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1; } filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext); for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)]; const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)]; if (oc == numFilterColors - colorCache) { ff = &filters[fPidxNext * numFilters]; mm = &images[iPidxNext * imgStride]; fPidx = fPidxNext; iPidx = iPidxNext; } #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X]; } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[ty][tx * imgsPerThread + i] = imPreload[i]; } imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : mm[0 * B_X]; imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : mm[1 * B_X]; imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : mm[2 * B_X]; __syncthreads(); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f]; } } fPreload[0] = ff[0]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f]; } } fPreload[1] = ff[(B_X/filtersPerThread * filterPixels) * numFilters]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f]; } } imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : mm[3 * B_X]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f]; } } __syncthreads(); } } } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex(cudaTextureObject_t images, cudaTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; const int imgOffset = (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; // images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; const int filterOffset = blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX + (conv ? 0 : moduleIdx * numFilterColors * filterPixels * numFilters); // filters +=blockFilterIdx // + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; // if (!conv) { // filters += moduleIdx * numFilterColors * filterPixels * numFilters; // } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; // float fCache[filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } // NOTE: these max/min functions increase register usage as compared to my macros const int imgStartX = max(0, imgLoadModPosX); const int imgStartY = max(0, imgLoadModPosY); const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] int fPidx, iPidx; float imPreload[imgsPerThread]; // [4] float fPreload[colorCache*filtersPerThread/B_X]; // [2] // float fCache[filtersPerThread]; filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { imPreload[i] = tex1Dfetch<float>(images, imgOffset + imgStride * iPidx + i * B_X); } else { imPreload[i] = 0; } } if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage.. #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { fPreload[c*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filterOffset + (c * filterPixels + fPidx) * numFilters); } } for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { // const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { // const int filterPxX = imgX - imgLoadModPosX; // const int p = filterPxY * filterSize + filterPxX; // const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img // setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx); // float* m = &images[imgStride * pixIdx]; const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1; int imgYNext = imgY; int imgXNext = imgX; int fPidxNext, iPidxNext; if (!lastPixel) { imgYNext = imgY + (imgX + 1 == imgEndX); imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1; } filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext); for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) // const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)]; // const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)]; int imgOffset2 = imgOffset + imgStride * ((oc + colorCache) * imgPixels + iPidx); int filterOffset2 = filterOffset + numFilters * ((oc + colorCache) * filterPixels + fPidx); if (oc == numFilterColors - colorCache) { filterOffset2 = filterOffset + fPidxNext * numFilters; imgOffset2 = imgOffset + iPidxNext * imgStride; fPidx = fPidxNext; iPidx = iPidxNext; } #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X]; } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[ty][tx * imgsPerThread + i] = imPreload[i]; } imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 0 * B_X); imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 1 * B_X); imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 2 * B_X); __syncthreads(); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f]; } } fPreload[0] = tex1Dfetch<float>(filters, filterOffset2 + 0); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f]; } } fPreload[1] = tex1Dfetch<float>(filters, filterOffset2 + (B_X/filtersPerThread * filterPixels) * numFilters); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f]; } } imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 3 * B_X); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f]; } } __syncthreads(); } } } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of module and B_Y * filtersPerThread * * images: (numColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numColors, filterPixels, numFilters) if conv * (numModules, numColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * * Number of filters per module should be divisible by B_Y * filtersPerThread * checkImgBounds indicates whether number of images is divisible by B_X * imgsPerThread * * The imgSize here is the size of the actual image without the padding. * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_color(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[pixelCache*numColors][B_Y * filtersPerThread]; // pre-load pixelCache pixels from B_Y*filtersPerThread filters __shared__ float shImages[pixelCache*numColors][B_X * imgsPerThread]; // pre-load pixelCache pixels from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = blockIdx.y % blocksPerModule; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int numModules = numModulesY * numModulesX; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += myImgIdx; filters += filtersPerThread * B_Y * blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX; if (!conv) { filters += moduleIdx * numColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y*filtersPerThread) * numImages * numModulesY * numModulesX + myImgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } //float* shImgLoad = &shImages[0][threadIdx.x]; for (int p = 0; p < filterPixels; p += pixelCache) { /* * Load pixelCache pixels from B_Y*filtersPerThread filters * This condition covers the case when B_X is not divisible by filtersPerThread. * In this case, not all of the threads will participate in the loading operation. * This ensures that in each loop iteration, an integer number of rows of shFilters * are filled, which makes indexing simple. */ if (B_X % filtersPerThread == 0 || shFilterLoadY < B_X/filtersPerThread) { #pragma unroll for (int p2 = 0; p2 < pixelCache; p2 += B_X/filtersPerThread) { const bool omit = pixelCache % (B_X / filtersPerThread) == 0; const int preloadPx = shFilterLoadY + p2; if (omit || preloadPx < pixelCache) { if (p + preloadPx < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = 0; } } } } } /* * Load pixelCache pixels from B_X*imgsPerThread images. */ #pragma unroll for (int ly = 0; ly < pixelCache; ly += B_Y) { const int preloadPx = ly + threadIdx.y; const int pixIdx = p + preloadPx; const bool omit = pixelCache % B_Y == 0; // Compile-time condition /* * Don't load any image pixels corresponding to filter pixels that don't exist. */ if (pixIdx < filterPixels && (omit || preloadPx < pixelCache)) { const int x = imgLoadModPosX + pixIdx % filterSize; const int y = imgLoadModPosY + pixIdx / filterSize; if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) { float* m = &images[imgStride * (y * imgSizeX + x)]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = m[c * imgStride * imgPixels + i * B_X]; } else { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0; } } } } else { // Padding #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0; } } } } } __syncthreads(); #pragma unroll for (int i = 0; i < pixelCache*numColors; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] += shImages[i][g + threadIdx.x * imgsPerThread] * shFilters[i][threadIdx.y * filtersPerThread + f]; } } } __syncthreads(); } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { targets[g * B_X + f * numImages * numModules] = scaleTargets * targets[g * B_X + f * numImages * numModules] + scaleOutputs * prod[f][g]; } } } } else { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * numImages * numModules] = scaleOutputs * prod[f][g]; } } } } } /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of B_Y * filtersPerThread * * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * B_Y one of 4, 8, 16 * B_X one of 16, 32 * imgsPerThread one of 1, 2, 4 * filtersPerThread one of 1, 2, 4, 8 * colorCache: how many colors to put into shmem * * numFilters should be divisible by B_Y * filtersPerThread * numImages be divisible by B_X * imgsPerThread * numFilterColors should be divisible by colorCache. * numImgColors must be even. * numFilters must be divisible by numGroups. * no restrictions on pixelCache * The imgSize here is the size of the actual image without the padding. * As always, try to make B_X * imgsPerThread == B_Y * filtersPerThread for maximum efficiency. * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_sparse2(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; filters +=blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; if (!conv) { filters += moduleIdx * numFilterColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y) * numImages * numModules + myImgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } const int imgStartX = MAX(0, imgLoadModPosX); const int imgStartY = MAX(0, imgLoadModPosY); const int imgEndX = MIN(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = MIN(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { const int filterPxX = imgX - imgLoadModPosX; const int p = filterPxY * filterSize + filterPxX; for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) /* * Load a pixel from B_Y*filtersPerThread filters * This condition covers the case when B_X is not divisible by filtersPerThread. * In this case, not all of the threads will participate in the loading operation. * This ensures that in each loop iteration, an integer number of rows of shFilters * are filled, which makes indexing simple. * nvcc is behaving in a completely insane way: removing this condition under * template parameters that guarantee it to be true actually slows down * the computation. * */ if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { if (colorCache % (B_X/filtersPerThread) == 0 || c + shFilterLoadY < colorCache) { shFilters[c + shFilterLoadY][shFilterLoadX] = filters[((oc+c) * filterPixels + p) * numFilters]; } } } /* * Load a pixel from B_X*imgsPerThread images. */ const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img float* m = &images[imgStride * (oc * imgPixels + pixIdx)]; #pragma unroll for (int c = 0; c < colorCache; c += B_Y) { if (colorCache % B_Y == 0 || threadIdx.y + c < colorCache) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { shImages[c + threadIdx.y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X]; } else { shImages[c + threadIdx.y][threadIdx.x + i * B_X] = 0; } } } } __syncthreads(); for (int c = 0; c < colorCache; c++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[f][g] += shImages[c][g * B_X + threadIdx.x] * shFilters[c][threadIdx.y + f * B_Y]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g]; } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModules, numImages) * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _filterActs(THCState* state, THCudaTensor* images, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numFilters = filters->size[1]; int numModules = numModulesY * numModulesX; int numImages = images->size[1]; int imgPixels = images->size[0]/numImgColors; int imgSizeX = imgPixels / imgSizeY; int filterModuleMult = conv ? 1 : numModules; THAssert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0))); THAssert(numGroups == 1 || numFilterColors % 4 == 0); THAssert(numFilters % (16 * numGroups) == 0); THAssert(numImgColors % numGroups == 0); //images.printShape("images"); // printf("rows: %d, pixels: %d, colors: %d\n", images->size[0], imgPixels, numImgColors); //images.printShape("images"); THAssert(images->size[0] == imgPixels * numImgColors); THAssert(imgSizeY * imgSizeX == imgPixels); int numFiltersPerGroup = numFilters / numGroups; int imgStride = images->stride[0]; // images does not need to be a contiguous matrix int filterPixels = filters->size[0] / (filterModuleMult * numFilterColors); int filterSize = int(sqrt(filterPixels)); THAssert(filterSize * filterSize == filterPixels); THAssert(filters->size[0] == filterModuleMult * numFilterColors * filterPixels); // These routines don't handle the case when only part of the image is visited in the convolution THAssert(paddingStart <= 0); THAssert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); THAssert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); THAssert(moduleStride <= filterSize); THAssert(THCudaTensor_isContiguous(state, filters)); THAssert(THCudaTensor_isContiguous(state, targets)); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int filtersPerThread, threadsY = 4; if (numImgColors <= 3) { // Special kernels written for colors = 3, filters = 64 and colors = 3, filters = 48 cases. // The remaining cases use the old routines. // TODO: Modernize the remaining cases if you care about them. filtersPerThread = numFiltersPerGroup % 64 == 0 ? 16 : numFiltersPerGroup % 48 == 0 ? 12 : numFiltersPerGroup % 32 == 0 ? 8 : 4; } else { filtersPerThread = numFiltersPerGroup % 64 == 0 ? 16 : numFiltersPerGroup % 32 == 0 ? 8 : 4; threadsY = numFiltersPerGroup % 128 == 0 && numFilterColors % 8 == 0 && imgsPerThread != 4 ? 8 : 4; } int threadsX = 32; dim3 threads(threadsX, threadsY); dim3 blocks = dim3(DIVUP(numImages, threads.x * imgsPerThread), (numModules * numFilters) / (threads.y * filtersPerThread)); bool checkImgBounds = numImages % (threads.x*imgsPerThread) != 0; bool scale = scaleTargets != 0; /* printf("numFilterColors: %d\n" "numFilters: %d\n" "numModules: %d\n" "numImages: %d\n" "imgPixels: %d\n" "imgSizeX: %d\n" "filterModuleMult: %d\n" "imgStride: %d\n" "filterPixels: %d\n" "filterSize: %d\n" "checkImgBounds: %d\n" "scale: %d\n", numFilterColors, numFilters, numModules, numImages, imgPixels, imgSizeX, filterModuleMult, imgStride, filterPixels, filterSize, checkImgBounds, scale); */ if (scaleTargets == 0) { THCudaTensor_resize2d(state, targets, numFilters * numModules, numImages); } else { THAssert(targets->size[0] == numFilters * numModules); THAssert(targets->size[1] == numImages); } // Auto-generated calling code... // NOTE: The calling code is set up such that if checkImgBounds is true, then imgsPerThread = 1. // In principle it doesn't have to be this way, and you may want to optimize for that case. if (scale == false) { if (checkImgBounds == false) { if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 128 == 0) { if (THCudaTensor_nElement(state, images) * 4 < TEXTURE_SIZE_MAX) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferL1); filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0>>>(texImages, texFilters, THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texFilters)); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferL1); filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numFiltersPerGroup % 64 == 0) { if (THCudaTensor_nElement(state, images) * 4 < TEXTURE_SIZE_MAX) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferL1); filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0>>>(texImages, texFilters, THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texFilters)); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferL1); filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors % 4 == 0) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 3) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex < 4, 32, 4, 16, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex < 4, 32, 4, 16, 3, 4, false, false > <<<blocks, threads, 0>>>(texImages, texFilters, THCudaTensor_data(state, targets),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texFilters)); } else if (numFiltersPerGroup % 48 == 0) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex < 4, 32, 4, 12, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex < 4, 32, 4, 12, 3, 4, false, false > <<<blocks, threads, 0>>>(texImages, texFilters, THCudaTensor_data(state, targets),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texFilters)); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 3, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 3, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 16, 3, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 12, 3, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 8, 3, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 4, 3, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 16, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 12, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 16, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 12, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 8, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 4, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 16, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 12, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 16, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 12, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 8, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 4, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } else if (checkImgBounds == true) { if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors % 4 == 0) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 3) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } } else if (scale == true) { if (checkImgBounds == false) { if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 128 == 0) { if (THCudaTensor_nElement(state, images) * 4 < TEXTURE_SIZE_MAX) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferL1); filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0>>>(texImages, texFilters, THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texFilters)); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferL1); filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numFiltersPerGroup % 64 == 0) { if (THCudaTensor_nElement(state, images) * 4 < TEXTURE_SIZE_MAX) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferL1); filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0>>>(texImages, texFilters, THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texFilters)); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferL1); filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors % 4 == 0) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 3) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex < 4, 32, 4, 16, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex < 4, 32, 4, 16, 3, 4, true, false > <<<blocks, threads, 0>>>(texImages, texFilters, THCudaTensor_data(state, targets),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texFilters)); } else if (numFiltersPerGroup % 48 == 0) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images); cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex < 4, 32, 4, 12, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex < 4, 32, 4, 12, 3, 4, true, false > <<<blocks, threads, 0>>>(texImages, texFilters, THCudaTensor_data(state, targets),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texFilters)); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 3, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 3, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 16, 3, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 12, 3, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 8, 3, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 4, 3, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 16, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 12, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 16, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 12, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 8, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 4, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 16, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 12, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 64 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 16, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 12, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 8, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 2, 4, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else if (numImages % 32 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, false > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } else if (checkImgBounds == true) { if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors % 4 == 0) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 3) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else if (numFiltersPerGroup % 1 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, true > <<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } } getLastCudaError("filterActs: kernel execution failed"); } void convFilterActs(THCState* state, THCudaTensor* images, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups) { convFilterActsSt(state, images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1); } void convFilterActsSt(THCState* state, THCudaTensor* images, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _filterActs(state, images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true); } void localFilterActs(THCState* state, THCudaTensor* images, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups) { localFilterActsSt(state, images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1); } void localFilterActsSt(THCState* state, THCudaTensor* images, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _filterActs(state, images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false); }
c676b40b8f5e0f1eaf97ae96e78dc58e102c9a05.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "box3d1r-64x16-3-256_kernel.hu" #define BENCH_DIM 3 #define BENCH_FPP 53 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize][dimsize])A1; if (scop) { if (dimsize >= 3 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice)); #ifdef STENCILBENCH hipDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 12; const AN5D_TYPE __side3Len = 60; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 12; const AN5D_TYPE __side3Len = 60; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 12; const AN5D_TYPE __side3Len = 60; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH hipDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = 0.0375f*A[t%2][i-1][j][k] + 0.0371f*A[t%2][i-1][j-1][k-1] + 0.0372f*A[t%2][i-1][j-1][k] + 0.0373f*A[t%2][i-1][j-1][k+1] + 0.0374f*A[t%2][i-1][j][k-1] + 0.0376f*A[t%2][i-1][j][k+1] + 0.0377f*A[t%2][i-1][j+1][k-1] + 0.0378f*A[t%2][i-1][j+1][k] + 0.0379f*A[t%2][i-1][j+1][k+1] + 0.0355f*A[t%2][i][j][k] + 0.0351f*A[t%2][i][j-1][k-1] + 0.0352f*A[t%2][i][j-1][k] + 0.0353f*A[t%2][i][j-1][k+1] + 0.0354f*A[t%2][i][j][k-1] + 0.0356f*A[t%2][i][j][k+1] + 0.0357f*A[t%2][i][j+1][k-1] + 0.0358f*A[t%2][i][j+1][k] + 0.0359f*A[t%2][i][j+1][k+1] + 0.0365f*A[t%2][i+1][j][k] + 0.0361f*A[t%2][i+1][j-1][k-1] + 0.0362f*A[t%2][i+1][j-1][k] + 0.0363f*A[t%2][i+1][j-1][k+1] + 0.0364f*A[t%2][i+1][j][k-1] + 0.0366f*A[t%2][i+1][j][k+1] + 0.0367f*A[t%2][i+1][j+1][k-1] + 0.0368f*A[t%2][i+1][j+1][k] + 0.0369f*A[t%2][i+1][j+1][k+1]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
c676b40b8f5e0f1eaf97ae96e78dc58e102c9a05.cu
#include <assert.h> #include <stdio.h> #include "box3d1r-64x16-3-256_kernel.hu" #define BENCH_DIM 3 #define BENCH_FPP 53 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize][dimsize])A1; if (scop) { if (dimsize >= 3 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice)); #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 12; const AN5D_TYPE __side3Len = 60; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 12; const AN5D_TYPE __side3Len = 60; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 14; const AN5D_TYPE __side3Len = 62; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 12; const AN5D_TYPE __side3Len = 60; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = 0.0375f*A[t%2][i-1][j][k] + 0.0371f*A[t%2][i-1][j-1][k-1] + 0.0372f*A[t%2][i-1][j-1][k] + 0.0373f*A[t%2][i-1][j-1][k+1] + 0.0374f*A[t%2][i-1][j][k-1] + 0.0376f*A[t%2][i-1][j][k+1] + 0.0377f*A[t%2][i-1][j+1][k-1] + 0.0378f*A[t%2][i-1][j+1][k] + 0.0379f*A[t%2][i-1][j+1][k+1] + 0.0355f*A[t%2][i][j][k] + 0.0351f*A[t%2][i][j-1][k-1] + 0.0352f*A[t%2][i][j-1][k] + 0.0353f*A[t%2][i][j-1][k+1] + 0.0354f*A[t%2][i][j][k-1] + 0.0356f*A[t%2][i][j][k+1] + 0.0357f*A[t%2][i][j+1][k-1] + 0.0358f*A[t%2][i][j+1][k] + 0.0359f*A[t%2][i][j+1][k+1] + 0.0365f*A[t%2][i+1][j][k] + 0.0361f*A[t%2][i+1][j-1][k-1] + 0.0362f*A[t%2][i+1][j-1][k] + 0.0363f*A[t%2][i+1][j-1][k+1] + 0.0364f*A[t%2][i+1][j][k-1] + 0.0366f*A[t%2][i+1][j][k+1] + 0.0367f*A[t%2][i+1][j+1][k-1] + 0.0368f*A[t%2][i+1][j+1][k] + 0.0369f*A[t%2][i+1][j+1][k+1]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
53f5cdc870cb632690c1f7f8779e792c1bbb1e4f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "prefixsum_combine.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; hipMalloc(&in, XSIZE*YSIZE); int in_length = 1; float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int out_length = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( prefixsum_combine), dim3(gridBlock),dim3(threadBlock), 0, 0, in,in_length,out,out_length); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( prefixsum_combine), dim3(gridBlock),dim3(threadBlock), 0, 0, in,in_length,out,out_length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( prefixsum_combine), dim3(gridBlock),dim3(threadBlock), 0, 0, in,in_length,out,out_length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
53f5cdc870cb632690c1f7f8779e792c1bbb1e4f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "prefixsum_combine.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); int in_length = 1; float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int out_length = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); prefixsum_combine<<<gridBlock,threadBlock>>>(in,in_length,out,out_length); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { prefixsum_combine<<<gridBlock,threadBlock>>>(in,in_length,out,out_length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { prefixsum_combine<<<gridBlock,threadBlock>>>(in,in_length,out,out_length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2b9389ab3bbfe725d7e1cd1a211a0c63e8b88da1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "MReLU.hpp" #include <json.hpp> typedef TRTInfer::halfloat halfloat; template<typename _T> __global__ void MReLUKernel(_T* input, _T* output, _T bias, int edge); template<> __global__ void MReLUKernel(float* input, float* output, float bias, int edge) { KERNEL_POSITION; float x = input[position]; float a = x > 0 ? x : 0; output[position] = a + bias; } template<> __global__ void MReLUKernel(halfloat* input, halfloat* output, halfloat bias, int edge) { KERNEL_POSITION; halfloat x = input[position]; halfloat _zero = 0.0f; x = x > _zero ? x : _zero; output[position] = x + bias; } void MReLUConfig::init(){ INFO("init MReLU config: %s", info_.c_str()); INFO("MReLU weights = %d[%s]", this->weights_.size(), this->weights_[0]->shapeString()); Json::Value value; if(Json::Reader().parse(info_, value)){ INFO("MReLU kernel_size: %d", value["kernel_size"].asInt()); INFO("MReLU eps: %g", value["eps"].asFloat()); INFO("MReLU other: %s", value["other"].asCString()); } } nvinfer1::Dims MReLU::outputDims(int index, const nvinfer1::Dims* inputDims, int nbInputDims) { return inputDims[0]; } std::shared_ptr<LayerConfig> MReLU::config(const std::string& layerName) { auto cfg = std::shared_ptr<LayerConfig>(new MReLUConfig()); //halffloat cfg->supportDataType_ = {nvinfer1::DataType::kHALF, nvinfer1::DataType::kFLOAT}; //cfg->supportDataType_ = {nvinfer1::DataType::kHALF}; return cfg; } int MReLU::enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, hipStream_t stream) { int count = inputs[0].count(); auto grid = gridDims(count); auto block = blockDims(count); float bias = *this->config_->weights_[0]->cpu<float>(); if (config_->configDataType_ == TRTInfer::DataType::dtFloat) { hipLaunchKernelGGL(( MReLUKernel) , dim3(grid), dim3(block) , 0, 0, inputs[0].ptr<float>(), outputs[0].ptr<float>(), bias, count); } else if (config_->configDataType_ == TRTInfer::DataType::dtHalfloat) { hipLaunchKernelGGL(( MReLUKernel) , dim3(grid), dim3(block), 0, 0, inputs[0].ptr<halfloat>(), outputs[0].ptr<halfloat>(), halfloat(bias), count); } return 0; } RegisterPlugin(MReLU);
2b9389ab3bbfe725d7e1cd1a211a0c63e8b88da1.cu
#include "MReLU.hpp" #include <json.hpp> typedef TRTInfer::halfloat halfloat; template<typename _T> __global__ void MReLUKernel(_T* input, _T* output, _T bias, int edge); template<> __global__ void MReLUKernel(float* input, float* output, float bias, int edge) { KERNEL_POSITION; float x = input[position]; float a = x > 0 ? x : 0; output[position] = a + bias; } template<> __global__ void MReLUKernel(halfloat* input, halfloat* output, halfloat bias, int edge) { KERNEL_POSITION; halfloat x = input[position]; halfloat _zero = 0.0f; x = x > _zero ? x : _zero; output[position] = x + bias; } void MReLUConfig::init(){ INFO("init MReLU config: %s", info_.c_str()); INFO("MReLU weights = %d[%s]", this->weights_.size(), this->weights_[0]->shapeString()); Json::Value value; if(Json::Reader().parse(info_, value)){ INFO("MReLU kernel_size: %d", value["kernel_size"].asInt()); INFO("MReLU eps: %g", value["eps"].asFloat()); INFO("MReLU other: %s", value["other"].asCString()); } } nvinfer1::Dims MReLU::outputDims(int index, const nvinfer1::Dims* inputDims, int nbInputDims) { return inputDims[0]; } std::shared_ptr<LayerConfig> MReLU::config(const std::string& layerName) { auto cfg = std::shared_ptr<LayerConfig>(new MReLUConfig()); //定义我们这个插件支持half和float格式 cfg->supportDataType_ = {nvinfer1::DataType::kHALF, nvinfer1::DataType::kFLOAT}; //cfg->supportDataType_ = {nvinfer1::DataType::kHALF}; return cfg; } int MReLU::enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, cudaStream_t stream) { int count = inputs[0].count(); auto grid = gridDims(count); auto block = blockDims(count); float bias = *this->config_->weights_[0]->cpu<float>(); if (config_->configDataType_ == TRTInfer::DataType::dtFloat) { MReLUKernel <<<grid, block >>> (inputs[0].ptr<float>(), outputs[0].ptr<float>(), bias, count); } else if (config_->configDataType_ == TRTInfer::DataType::dtHalfloat) { MReLUKernel <<<grid, block>>> (inputs[0].ptr<halfloat>(), outputs[0].ptr<halfloat>(), halfloat(bias), count); } return 0; } RegisterPlugin(MReLU);
b61bae2925d071ddb41d36beb6893685fa24e97a.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2012 Jeffrey Blanchard, Erik Opavsky, and Emircan Uysaler * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "SMOSheader.cu" #define NUMBEROFALGORITHMS 2 char* namesOfMultiselectTimingFunctions[NUMBEROFALGORITHMS] = {"Sort and Choose Multiselect", "Bucket Multiselect"}; using namespace std; namespace CompareMultiselect { /* This function compares bucketMultiselect with the other algorithms given in the defined range of kVals and array size. */ template<typename T> void compareMultiselectAlgorithms(uint size, uint* kVals, uint numKs, uint numTests , uint *algorithmsToTest, uint generateType, uint kGenerateType, char* fileNamecsv , T* data = NULL) { // allocate space for operations T *h_vec, *h_vec_copy; float timeArray[NUMBEROFALGORITHMS][numTests]; T * resultsArray[NUMBEROFALGORITHMS][numTests]; float totalTimesPerAlgorithm[NUMBEROFALGORITHMS]; uint winnerArray[numTests]; uint timesWon[NUMBEROFALGORITHMS]; uint i,j,m,x; int runOrder[NUMBEROFALGORITHMS]; unsigned long long seed; //, seed2; results_t<T> *temp; ofstream fileCsv; timeval t1; //, t2; typedef results_t<T>* (*ptrToTimingFunction)(T*, uint, uint *, uint); typedef void (*ptrToGeneratingFunction)(T*, uint, hiprandGenerator_t); //these are the functions that can be called ptrToTimingFunction arrayOfTimingFunctions[NUMBEROFALGORITHMS] = {&timeSortAndChooseMultiselect<T>, &timeBucketMultiselect<T>}; ptrToGeneratingFunction *arrayOfGenerators; char** namesOfGeneratingFunctions; // this is the array of names of functions that generate problems of this type, // ie float, double, or uint namesOfGeneratingFunctions = returnNamesOfGenerators<T>(); arrayOfGenerators = (ptrToGeneratingFunction *) returnGenFunctions<T>(); printf("Files will be written to %s\n", fileNamecsv); fileCsv.open(fileNamecsv, ios_base::app); //zero out the totals and times won bzero(totalTimesPerAlgorithm, NUMBEROFALGORITHMS * sizeof(uint)); bzero(timesWon, NUMBEROFALGORITHMS * sizeof(uint)); //allocate space for h_vec, and h_vec_copy h_vec = (T *) malloc(size * sizeof(T)); h_vec_copy = (T *) malloc(size * sizeof(T)); //create the random generators. hiprandGenerator_t generator; srand(unsigned(time(NULL))); printf("The distribution is: %s\n", namesOfGeneratingFunctions[generateType]); printf("The k distribution is: %s\n", namesOfKGenerators[kGenerateType]); /***********************************************/ /*********** START RUNNING TESTS ************ /***********************************************/ for(i = 0; i < numTests; i++) { //hipDeviceReset(); gettimeofday(&t1, NULL); seed = t1.tv_usec * t1.tv_sec; for(m = 0; m < NUMBEROFALGORITHMS;m++) runOrder[m] = m; std::random_shuffle(runOrder, runOrder + NUMBEROFALGORITHMS); fileCsv << size << "," << numKs << "," << namesOfGeneratingFunctions[generateType] << "," << namesOfKGenerators[kGenerateType] << ","; hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(generator,seed); printf("Running test %u of %u for size: %u and numK: %u\n", i + 1, numTests, size, numKs); //generate the random vector using the specified distribution if(data == NULL) arrayOfGenerators[generateType](h_vec, size, generator); else h_vec = data; //copy the vector to h_vec_copy, which will be used to restore it later memcpy(h_vec_copy, h_vec, size * sizeof(T)); /* *************************************************** ****** In this file, the kDistribution is always set to UNIFORM (kGenerateType = 1) ****** so this regeneration of the order statistics is not needed. ****** It is saved here in case one wants to run these tests for a different kDistribution *************************************************** // if the kdistribution is random, we need to generate new a kList for each new random problem instance. if ( (kGenerateType != 1) && (i>0) ){ gettimeofday(&t2, NULL); seed2 = t2.tv_usec * t2.tv_sec; hiprandGenerator_t generator2; srand(unsigned(time(NULL))); hiprandCreateGenerator(&generator2, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(generator2,seed2); arrayOfKDistributionGenerators[kGenerateType](kVals, numKs, size, generator2); } */ winnerArray[i] = 0; float currentWinningTime = INFINITY; //run the various timing functions for(x = 0; x < NUMBEROFALGORITHMS; x++){ j = runOrder[x]; if(algorithmsToTest[j]){ //run timing function j printf("TESTING: %u\n", j); temp = arrayOfTimingFunctions[j](h_vec_copy, size, kVals, numKs); //record the time result timeArray[j][i] = temp->time; //record the value returned resultsArray[j][i] = temp->vals; //update the current "winner" if necessary if(timeArray[j][i] < currentWinningTime){ currentWinningTime = temp->time; winnerArray[i] = j; } //perform clean up free(temp); memcpy(h_vec_copy, h_vec, size * sizeof(T)); } } hiprandDestroyGenerator(generator); for(x = 0; x < NUMBEROFALGORITHMS; x++) if(algorithmsToTest[x]) fileCsv << namesOfMultiselectTimingFunctions[x] << "," << timeArray[x][i] << ","; // check for errors, and output information to recreate problem uint flag = 0; for(m = 1; m < NUMBEROFALGORITHMS;m++) if(algorithmsToTest[m]) for (j = 0; j < numKs; j++) { if(resultsArray[m][i][j] != resultsArray[0][i][j]) { flag++; fileCsv << "\nERROR ON TEST " << i << " of " << numTests << " tests!!!!!\n"; fileCsv << "vector size = " << size << "\nvector seed = " << seed << "\n"; fileCsv << "numKs = " << numKs << "\n"; fileCsv << "wrong k = " << kVals[j] << " kIndex = " << j << " wrong result = " << resultsArray[m][i][j] << " correct result = " << resultsArray[0][i][j] << "\n"; std::cout <<namesOfMultiselectTimingFunctions[m] << " did not return the correct answer on test " << i + 1 << " at k[" << j << "]. It got "<< resultsArray[m][i][j]; std::cout << " instead of " << resultsArray[0][i][j] << ".\n" ; std::cout << "RESULT:\t"; PrintFunctions::printBinary(resultsArray[m][i][j]); std::cout << "Right:\t"; PrintFunctions::printBinary(resultsArray[0][i][j]); } } fileCsv << flag << "\n"; } //calculate the total time each algorithm took for(i = 0; i < numTests; i++) for(j = 0; j < NUMBEROFALGORITHMS;j++) if(algorithmsToTest[j]) totalTimesPerAlgorithm[j] += timeArray[j][i]; //count the number of times each algorithm won. for(i = 0; i < numTests;i++) timesWon[winnerArray[i]]++; printf("\n\n"); //print out the average times for(i = 0; i < NUMBEROFALGORITHMS; i++) if(algorithmsToTest[i]) printf("%-20s averaged: %f ms\n", namesOfMultiselectTimingFunctions[i], totalTimesPerAlgorithm[i] / numTests); for(i = 0; i < NUMBEROFALGORITHMS; i++) if(algorithmsToTest[i]) printf("%s won %u times\n", namesOfMultiselectTimingFunctions[i], timesWon[i]); // free results for(i = 0; i < numTests; i++) for(m = 0; m < NUMBEROFALGORITHMS; m++) if(algorithmsToTest[m]) free(resultsArray[m][i]); //free h_vec and h_vec_copy if(data == NULL) free(h_vec); free(h_vec_copy); //close the file fileCsv.close(); } /* This function generates the array of kVals to work on and acts as a wrapper for comparison. */ template<typename T> void runTests (uint generateType, char* fileName, uint startPower, uint stopPower , uint timesToTestEachK, uint kDistribution, uint startK, uint stopK, uint kJump) { uint algorithmsToRun[NUMBEROFALGORITHMS]= {1, 1}; uint size; // uint i; uint arrayOfKs[stopK+1]; /* ***************************** **** In this file, the kDistribution is not random. **** The number of order statistics (numKs) is fixed at 101. **** We only need to generate the kDistribuion one time for each size. ***************************** */ unsigned long long seed; timeval t1; gettimeofday(&t1, NULL); seed = t1.tv_usec * t1.tv_sec; hiprandGenerator_t generator; srand(unsigned(time(NULL))); hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(generator,seed); // double the array size to the next powers of 2 for(size = (1 << startPower); size <= (1 << stopPower); size *= 2) { hipDeviceReset(); arrayOfKDistributionGenerators[kDistribution](arrayOfKs, stopK, size, generator); compareMultiselectAlgorithms<T>(size, arrayOfKs, stopK, timesToTestEachK, algorithmsToRun, generateType, kDistribution, fileName); } // end for(size) hiprandDestroyGenerator(generator); } // end runTests } // end namespace CompareMultiselect int main (int argc, char *argv[]) { using namespace CompareMultiselect; char *fileName, *hostName, *typeString; fileName = (char*) malloc(128 * sizeof(char)); typeString = (char*) malloc(10 * sizeof(char)); hostName = (char*) malloc(20 * sizeof(char)); gethostname(hostName, 20); #ifdef GPUNUMBER hipSetDevice(GPUNUMBER); #endif time_t rawtime; struct tm * timeinfo; time ( &rawtime ); timeinfo = localtime ( &rawtime ); char * humanTime = asctime(timeinfo); humanTime[strlen(humanTime)-1] = '\0'; uint testCount, type,distributionType,startPower,stopPower,kDistribution,startK ,stopK,jumpK; uint vecDistr[4]; vecDistr[0]=0; // Uniform vecDistr[1]=1; // Normal vecDistr[2]=3; // Half Normal vecDistr[3]=9; // Cauchy kDistribution=1; // Uniformly Spaced startPower=20; stopPower=29; startK=101; // This gives the 0,1,2,...,98,99,100 percentiles jumpK=1; stopK=101; testCount=25; for(int j=0; j<4; j++){ distributionType = vecDistr[j]; for(type=0; type<3; type++){ switch(type){ case 0: typeString = "float"; snprintf(fileName, 128, "%s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s", typeString, getDistributionOptions(type, distributionType), getKDistributionOptions(kDistribution), startPower, stopPower, startK, jumpK, stopK, testCount, hostName, humanTime); printf("File Name: %s \n", fileName); runTests<float>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); break; case 1: typeString = "double"; if (distributionType<2){ snprintf(fileName, 128, "%s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s", typeString, getDistributionOptions(type, distributionType), getKDistributionOptions(kDistribution), startPower, stopPower, startK, jumpK, stopK, testCount, hostName, humanTime); printf("File Name: %s \n", fileName); runTests<double>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); } // end if(distributionType) break; case 2: typeString = "uint"; if (distributionType<1){ snprintf(fileName, 128, "%s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s", typeString, getDistributionOptions(type, distributionType), getKDistributionOptions(kDistribution), startPower, stopPower, startK, jumpK, stopK, testCount, hostName, humanTime); printf("File Name: %s \n", fileName); runTests<uint>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); } // end if(distributionType) break; default: printf("You entered and invalid option, now exiting\n"); break; } // end switch(type) } // end for(type) } // end for (int j) free (fileName); return 0; }
b61bae2925d071ddb41d36beb6893685fa24e97a.cu
/* Copyright 2012 Jeffrey Blanchard, Erik Opavsky, and Emircan Uysaler * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "SMOSheader.cu" #define NUMBEROFALGORITHMS 2 char* namesOfMultiselectTimingFunctions[NUMBEROFALGORITHMS] = {"Sort and Choose Multiselect", "Bucket Multiselect"}; using namespace std; namespace CompareMultiselect { /* This function compares bucketMultiselect with the other algorithms given in the defined range of kVals and array size. */ template<typename T> void compareMultiselectAlgorithms(uint size, uint* kVals, uint numKs, uint numTests , uint *algorithmsToTest, uint generateType, uint kGenerateType, char* fileNamecsv , T* data = NULL) { // allocate space for operations T *h_vec, *h_vec_copy; float timeArray[NUMBEROFALGORITHMS][numTests]; T * resultsArray[NUMBEROFALGORITHMS][numTests]; float totalTimesPerAlgorithm[NUMBEROFALGORITHMS]; uint winnerArray[numTests]; uint timesWon[NUMBEROFALGORITHMS]; uint i,j,m,x; int runOrder[NUMBEROFALGORITHMS]; unsigned long long seed; //, seed2; results_t<T> *temp; ofstream fileCsv; timeval t1; //, t2; typedef results_t<T>* (*ptrToTimingFunction)(T*, uint, uint *, uint); typedef void (*ptrToGeneratingFunction)(T*, uint, curandGenerator_t); //these are the functions that can be called ptrToTimingFunction arrayOfTimingFunctions[NUMBEROFALGORITHMS] = {&timeSortAndChooseMultiselect<T>, &timeBucketMultiselect<T>}; ptrToGeneratingFunction *arrayOfGenerators; char** namesOfGeneratingFunctions; // this is the array of names of functions that generate problems of this type, // ie float, double, or uint namesOfGeneratingFunctions = returnNamesOfGenerators<T>(); arrayOfGenerators = (ptrToGeneratingFunction *) returnGenFunctions<T>(); printf("Files will be written to %s\n", fileNamecsv); fileCsv.open(fileNamecsv, ios_base::app); //zero out the totals and times won bzero(totalTimesPerAlgorithm, NUMBEROFALGORITHMS * sizeof(uint)); bzero(timesWon, NUMBEROFALGORITHMS * sizeof(uint)); //allocate space for h_vec, and h_vec_copy h_vec = (T *) malloc(size * sizeof(T)); h_vec_copy = (T *) malloc(size * sizeof(T)); //create the random generators. curandGenerator_t generator; srand(unsigned(time(NULL))); printf("The distribution is: %s\n", namesOfGeneratingFunctions[generateType]); printf("The k distribution is: %s\n", namesOfKGenerators[kGenerateType]); /***********************************************/ /*********** START RUNNING TESTS ************ /***********************************************/ for(i = 0; i < numTests; i++) { //cudaDeviceReset(); gettimeofday(&t1, NULL); seed = t1.tv_usec * t1.tv_sec; for(m = 0; m < NUMBEROFALGORITHMS;m++) runOrder[m] = m; std::random_shuffle(runOrder, runOrder + NUMBEROFALGORITHMS); fileCsv << size << "," << numKs << "," << namesOfGeneratingFunctions[generateType] << "," << namesOfKGenerators[kGenerateType] << ","; curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(generator,seed); printf("Running test %u of %u for size: %u and numK: %u\n", i + 1, numTests, size, numKs); //generate the random vector using the specified distribution if(data == NULL) arrayOfGenerators[generateType](h_vec, size, generator); else h_vec = data; //copy the vector to h_vec_copy, which will be used to restore it later memcpy(h_vec_copy, h_vec, size * sizeof(T)); /* *************************************************** ****** In this file, the kDistribution is always set to UNIFORM (kGenerateType = 1) ****** so this regeneration of the order statistics is not needed. ****** It is saved here in case one wants to run these tests for a different kDistribution *************************************************** // if the kdistribution is random, we need to generate new a kList for each new random problem instance. if ( (kGenerateType != 1) && (i>0) ){ gettimeofday(&t2, NULL); seed2 = t2.tv_usec * t2.tv_sec; curandGenerator_t generator2; srand(unsigned(time(NULL))); curandCreateGenerator(&generator2, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(generator2,seed2); arrayOfKDistributionGenerators[kGenerateType](kVals, numKs, size, generator2); } */ winnerArray[i] = 0; float currentWinningTime = INFINITY; //run the various timing functions for(x = 0; x < NUMBEROFALGORITHMS; x++){ j = runOrder[x]; if(algorithmsToTest[j]){ //run timing function j printf("TESTING: %u\n", j); temp = arrayOfTimingFunctions[j](h_vec_copy, size, kVals, numKs); //record the time result timeArray[j][i] = temp->time; //record the value returned resultsArray[j][i] = temp->vals; //update the current "winner" if necessary if(timeArray[j][i] < currentWinningTime){ currentWinningTime = temp->time; winnerArray[i] = j; } //perform clean up free(temp); memcpy(h_vec_copy, h_vec, size * sizeof(T)); } } curandDestroyGenerator(generator); for(x = 0; x < NUMBEROFALGORITHMS; x++) if(algorithmsToTest[x]) fileCsv << namesOfMultiselectTimingFunctions[x] << "," << timeArray[x][i] << ","; // check for errors, and output information to recreate problem uint flag = 0; for(m = 1; m < NUMBEROFALGORITHMS;m++) if(algorithmsToTest[m]) for (j = 0; j < numKs; j++) { if(resultsArray[m][i][j] != resultsArray[0][i][j]) { flag++; fileCsv << "\nERROR ON TEST " << i << " of " << numTests << " tests!!!!!\n"; fileCsv << "vector size = " << size << "\nvector seed = " << seed << "\n"; fileCsv << "numKs = " << numKs << "\n"; fileCsv << "wrong k = " << kVals[j] << " kIndex = " << j << " wrong result = " << resultsArray[m][i][j] << " correct result = " << resultsArray[0][i][j] << "\n"; std::cout <<namesOfMultiselectTimingFunctions[m] << " did not return the correct answer on test " << i + 1 << " at k[" << j << "]. It got "<< resultsArray[m][i][j]; std::cout << " instead of " << resultsArray[0][i][j] << ".\n" ; std::cout << "RESULT:\t"; PrintFunctions::printBinary(resultsArray[m][i][j]); std::cout << "Right:\t"; PrintFunctions::printBinary(resultsArray[0][i][j]); } } fileCsv << flag << "\n"; } //calculate the total time each algorithm took for(i = 0; i < numTests; i++) for(j = 0; j < NUMBEROFALGORITHMS;j++) if(algorithmsToTest[j]) totalTimesPerAlgorithm[j] += timeArray[j][i]; //count the number of times each algorithm won. for(i = 0; i < numTests;i++) timesWon[winnerArray[i]]++; printf("\n\n"); //print out the average times for(i = 0; i < NUMBEROFALGORITHMS; i++) if(algorithmsToTest[i]) printf("%-20s averaged: %f ms\n", namesOfMultiselectTimingFunctions[i], totalTimesPerAlgorithm[i] / numTests); for(i = 0; i < NUMBEROFALGORITHMS; i++) if(algorithmsToTest[i]) printf("%s won %u times\n", namesOfMultiselectTimingFunctions[i], timesWon[i]); // free results for(i = 0; i < numTests; i++) for(m = 0; m < NUMBEROFALGORITHMS; m++) if(algorithmsToTest[m]) free(resultsArray[m][i]); //free h_vec and h_vec_copy if(data == NULL) free(h_vec); free(h_vec_copy); //close the file fileCsv.close(); } /* This function generates the array of kVals to work on and acts as a wrapper for comparison. */ template<typename T> void runTests (uint generateType, char* fileName, uint startPower, uint stopPower , uint timesToTestEachK, uint kDistribution, uint startK, uint stopK, uint kJump) { uint algorithmsToRun[NUMBEROFALGORITHMS]= {1, 1}; uint size; // uint i; uint arrayOfKs[stopK+1]; /* ***************************** **** In this file, the kDistribution is not random. **** The number of order statistics (numKs) is fixed at 101. **** We only need to generate the kDistribuion one time for each size. ***************************** */ unsigned long long seed; timeval t1; gettimeofday(&t1, NULL); seed = t1.tv_usec * t1.tv_sec; curandGenerator_t generator; srand(unsigned(time(NULL))); curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(generator,seed); // double the array size to the next powers of 2 for(size = (1 << startPower); size <= (1 << stopPower); size *= 2) { cudaDeviceReset(); arrayOfKDistributionGenerators[kDistribution](arrayOfKs, stopK, size, generator); compareMultiselectAlgorithms<T>(size, arrayOfKs, stopK, timesToTestEachK, algorithmsToRun, generateType, kDistribution, fileName); } // end for(size) curandDestroyGenerator(generator); } // end runTests } // end namespace CompareMultiselect int main (int argc, char *argv[]) { using namespace CompareMultiselect; char *fileName, *hostName, *typeString; fileName = (char*) malloc(128 * sizeof(char)); typeString = (char*) malloc(10 * sizeof(char)); hostName = (char*) malloc(20 * sizeof(char)); gethostname(hostName, 20); #ifdef GPUNUMBER cudaSetDevice(GPUNUMBER); #endif time_t rawtime; struct tm * timeinfo; time ( &rawtime ); timeinfo = localtime ( &rawtime ); char * humanTime = asctime(timeinfo); humanTime[strlen(humanTime)-1] = '\0'; uint testCount, type,distributionType,startPower,stopPower,kDistribution,startK ,stopK,jumpK; uint vecDistr[4]; vecDistr[0]=0; // Uniform vecDistr[1]=1; // Normal vecDistr[2]=3; // Half Normal vecDistr[3]=9; // Cauchy kDistribution=1; // Uniformly Spaced startPower=20; stopPower=29; startK=101; // This gives the 0,1,2,...,98,99,100 percentiles jumpK=1; stopK=101; testCount=25; for(int j=0; j<4; j++){ distributionType = vecDistr[j]; for(type=0; type<3; type++){ switch(type){ case 0: typeString = "float"; snprintf(fileName, 128, "%s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s", typeString, getDistributionOptions(type, distributionType), getKDistributionOptions(kDistribution), startPower, stopPower, startK, jumpK, stopK, testCount, hostName, humanTime); printf("File Name: %s \n", fileName); runTests<float>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); break; case 1: typeString = "double"; if (distributionType<2){ snprintf(fileName, 128, "%s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s", typeString, getDistributionOptions(type, distributionType), getKDistributionOptions(kDistribution), startPower, stopPower, startK, jumpK, stopK, testCount, hostName, humanTime); printf("File Name: %s \n", fileName); runTests<double>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); } // end if(distributionType) break; case 2: typeString = "uint"; if (distributionType<1){ snprintf(fileName, 128, "%s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s", typeString, getDistributionOptions(type, distributionType), getKDistributionOptions(kDistribution), startPower, stopPower, startK, jumpK, stopK, testCount, hostName, humanTime); printf("File Name: %s \n", fileName); runTests<uint>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); } // end if(distributionType) break; default: printf("You entered and invalid option, now exiting\n"); break; } // end switch(type) } // end for(type) } // end for (int j) free (fileName); return 0; }
25297fb05916cfaded9c9e9954eea72b6026b37b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2011 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "common.h" /////////////////////////////////////////////////////////////////////////////// /// texture references /////////////////////////////////////////////////////////////////////////////// /// source image texture<float, 2, hipReadModeElementType> texSource; /// tracked image texture<float, 2, hipReadModeElementType> texTarget; /////////////////////////////////////////////////////////////////////////////// /// \brief compute image derivatives /// /// CUDA kernel, relies heavily on texture unit /// \param[in] width image width /// \param[in] height image height /// \param[in] stride image stride /// \param[out] Ix x derivative /// \param[out] Iy y derivative /// \param[out] Iz temporal derivative /////////////////////////////////////////////////////////////////////////////// __global__ void ComputeDerivativesKernel(int width, int height, int stride, float *Ix, float *Iy, float *Iz) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; const int pos = ix + iy * stride; if (ix >= width || iy >= height) return; float dx = 1.0f / (float)width; float dy = 1.0f / (float)height; float x = ((float)ix + 0.5f) * dx; float y = ((float)iy + 0.5f) * dy; float t0, t1; // x derivative t0 = tex2D(texSource, x - 2.0f * dx, y); t0 -= tex2D(texSource, x - 1.0f * dx, y) * 8.0f; t0 += tex2D(texSource, x + 1.0f * dx, y) * 8.0f; t0 -= tex2D(texSource, x + 2.0f * dx, y); t0 /= 12.0f; t1 = tex2D(texTarget, x - 2.0f * dx, y); t1 -= tex2D(texTarget, x - 1.0f * dx, y) * 8.0f; t1 += tex2D(texTarget, x + 1.0f * dx, y) * 8.0f; t1 -= tex2D(texTarget, x + 2.0f * dx, y); t1 /= 12.0f; Ix[pos] = (t0 + t1) * 0.5f; // t derivative Iz[pos] = tex2D(texTarget, x, y) - tex2D(texSource, x, y); // y derivative t0 = tex2D(texSource, x, y - 2.0f * dy); t0 -= tex2D(texSource, x, y - 1.0f * dy) * 8.0f; t0 += tex2D(texSource, x, y + 1.0f * dy) * 8.0f; t0 -= tex2D(texSource, x, y + 2.0f * dy); t0 /= 12.0f; t1 = tex2D(texTarget, x, y - 2.0f * dy); t1 -= tex2D(texTarget, x, y - 1.0f * dy) * 8.0f; t1 += tex2D(texTarget, x, y + 1.0f * dy) * 8.0f; t1 -= tex2D(texTarget, x, y + 2.0f * dy); t1 /= 12.0f; Iy[pos] = (t0 + t1) * 0.5f; } /////////////////////////////////////////////////////////////////////////////// /// \brief compute image derivatives /// /// \param[in] I0 source image /// \param[in] I1 tracked image /// \param[in] w image width /// \param[in] h image height /// \param[in] s image stride /// \param[out] Ix x derivative /// \param[out] Iy y derivative /// \param[out] Iz temporal derivative /////////////////////////////////////////////////////////////////////////////// static void ComputeDerivatives(const float *I0, const float *I1, int w, int h, int s, float *Ix, float *Iy, float *Iz) { dim3 threads(32, 6); dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); // mirror if a coordinate value is out-of-range texSource.addressMode[0] = hipAddressModeMirror; texSource.addressMode[1] = hipAddressModeMirror; texSource.filterMode = hipFilterModeLinear; texSource.normalized = true; texTarget.addressMode[0] = hipAddressModeMirror; texTarget.addressMode[1] = hipAddressModeMirror; texTarget.filterMode = hipFilterModeLinear; texTarget.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(0, texSource, I0, w, h, s * sizeof(float)); hipBindTexture2D(0, texTarget, I1, w, h, s * sizeof(float)); hipLaunchKernelGGL(( ComputeDerivativesKernel), dim3(blocks), dim3(threads), 0, 0, w, h, s, Ix, Iy, Iz); }
25297fb05916cfaded9c9e9954eea72b6026b37b.cu
/* * Copyright 1993-2011 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "common.h" /////////////////////////////////////////////////////////////////////////////// /// texture references /////////////////////////////////////////////////////////////////////////////// /// source image texture<float, 2, cudaReadModeElementType> texSource; /// tracked image texture<float, 2, cudaReadModeElementType> texTarget; /////////////////////////////////////////////////////////////////////////////// /// \brief compute image derivatives /// /// CUDA kernel, relies heavily on texture unit /// \param[in] width image width /// \param[in] height image height /// \param[in] stride image stride /// \param[out] Ix x derivative /// \param[out] Iy y derivative /// \param[out] Iz temporal derivative /////////////////////////////////////////////////////////////////////////////// __global__ void ComputeDerivativesKernel(int width, int height, int stride, float *Ix, float *Iy, float *Iz) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; const int pos = ix + iy * stride; if (ix >= width || iy >= height) return; float dx = 1.0f / (float)width; float dy = 1.0f / (float)height; float x = ((float)ix + 0.5f) * dx; float y = ((float)iy + 0.5f) * dy; float t0, t1; // x derivative t0 = tex2D(texSource, x - 2.0f * dx, y); t0 -= tex2D(texSource, x - 1.0f * dx, y) * 8.0f; t0 += tex2D(texSource, x + 1.0f * dx, y) * 8.0f; t0 -= tex2D(texSource, x + 2.0f * dx, y); t0 /= 12.0f; t1 = tex2D(texTarget, x - 2.0f * dx, y); t1 -= tex2D(texTarget, x - 1.0f * dx, y) * 8.0f; t1 += tex2D(texTarget, x + 1.0f * dx, y) * 8.0f; t1 -= tex2D(texTarget, x + 2.0f * dx, y); t1 /= 12.0f; Ix[pos] = (t0 + t1) * 0.5f; // t derivative Iz[pos] = tex2D(texTarget, x, y) - tex2D(texSource, x, y); // y derivative t0 = tex2D(texSource, x, y - 2.0f * dy); t0 -= tex2D(texSource, x, y - 1.0f * dy) * 8.0f; t0 += tex2D(texSource, x, y + 1.0f * dy) * 8.0f; t0 -= tex2D(texSource, x, y + 2.0f * dy); t0 /= 12.0f; t1 = tex2D(texTarget, x, y - 2.0f * dy); t1 -= tex2D(texTarget, x, y - 1.0f * dy) * 8.0f; t1 += tex2D(texTarget, x, y + 1.0f * dy) * 8.0f; t1 -= tex2D(texTarget, x, y + 2.0f * dy); t1 /= 12.0f; Iy[pos] = (t0 + t1) * 0.5f; } /////////////////////////////////////////////////////////////////////////////// /// \brief compute image derivatives /// /// \param[in] I0 source image /// \param[in] I1 tracked image /// \param[in] w image width /// \param[in] h image height /// \param[in] s image stride /// \param[out] Ix x derivative /// \param[out] Iy y derivative /// \param[out] Iz temporal derivative /////////////////////////////////////////////////////////////////////////////// static void ComputeDerivatives(const float *I0, const float *I1, int w, int h, int s, float *Ix, float *Iy, float *Iz) { dim3 threads(32, 6); dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); // mirror if a coordinate value is out-of-range texSource.addressMode[0] = cudaAddressModeMirror; texSource.addressMode[1] = cudaAddressModeMirror; texSource.filterMode = cudaFilterModeLinear; texSource.normalized = true; texTarget.addressMode[0] = cudaAddressModeMirror; texTarget.addressMode[1] = cudaAddressModeMirror; texTarget.filterMode = cudaFilterModeLinear; texTarget.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, texSource, I0, w, h, s * sizeof(float)); cudaBindTexture2D(0, texTarget, I1, w, h, s * sizeof(float)); ComputeDerivativesKernel<<<blocks, threads>>>(w, h, s, Ix, Iy, Iz); }
68e5d448e2ff36f94629f52a3e371daf15d0bf83.hip
// !!! This is a file automatically generated by hipify!!! #include <drivers/gauss_newton_driver.h> #include <solvers/sampled_trust_cg.h> #include <solvers/params.h> #include <core/errors.h> #include <device/cuda_utils.h> #include <device/gen_random.h> #include <device/device_defines.h> #include <device/handles.h> #include <functions/dev_initializations.h> #include <utilities/print_utils.h> #include <limits.h> #include <stdlib.h> #include <stdio.h> #include <float.h> void readVecFromFileGN( real *dev, real *host ) { int rows = readVector( host, INT_MAX, "./weights.txt", 0, NULL); copy_host_device( host, dev, rows * sizeof(real), hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); fprintf( stderr, "Finished reading Vec (%d) from file \n", rows ); for (int i = 0; i < 10; i ++) fprintf( stderr, "%6.10f \n", host[i] ); } void initGNTrustRegionParams( TRUST_REGION_PARAMS *params, int n ) { //sampled_tr_cg.m file. params->delta = 1200; params->maxDelta = 12000; params->eta1 = 0.8; params->eta2 = 1e-4; params->gamma1 = 2; params->gamma2 = 1.2; params->maxProps = ULONG_MAX; params->maxMatVecs = 1e15; params->maxEpochs = 2000; params->maxIters = 250; //defaults from curves_autoencoder.m params->alpha = 0.01; // SGD Momentum params->hs = floor( 0.1 * n ); // Hessian sample size //no regularization params->lambda = 0; //loop variants here. params->curIteration = 0; //SolverType params->hessianType = GAUSS_NEWTON; } void testGaussNewton (NN_MODEL *model, DEVICE_DATASET *data, SCRATCH_AREA *scratch ) { TRUST_REGION_PARAMS trParams; //begin here fprintf( stderr, "Initiating the Trust Region Test now..... \n\n\n"); initGNTrustRegionParams( &trParams, data->trainSizeX ); fprintf( stderr, "... Done parms initialization \n\n"); /* //set the weights to 0 here. #ifdef DEBUG_FIXED cuda_memset( data->weights, 0, sizeof(real) * model->pSize, ERROR_MEMSET ); #endif getRandomVector( model->pSize, NULL, scratch->nextDevPtr, RAND_NORMAL ); copy_device( data->weights, scratch->nextDevPtr, sizeof(real) * model->pSize, ERROR_MEMCPY_DEVICE_DEVICE ); #ifdef DEBUG_FIXED readVecFromFileTR( data->weights, scratch->nextHostPtr ); #endif real scale = 0.25; cublasCheckError( hipblasDscal( cublasHandle, model->pSize, &scale, data->weights, 1 )); */ /* int b = model->pSize; int numBlocks = b / BLOCK_SIZE + (( b % BLOCK_SIZE == 0) ? 0 : 1 ); kerInitOneVector <<< numBlocks, BLOCK_SIZE>>> ( data->weights, b ); hipDeviceSynchronize (); cudaCheckError (); */ cuda_memset( data->weights, 0, sizeof(real) * model->pSize, ERROR_MEMSET ); subsampledTrustRegionCG( model, data, &trParams, scratch ); fprintf( stderr, ".... Done testing of subsampledTrustRegion \n\n\n" ); }
68e5d448e2ff36f94629f52a3e371daf15d0bf83.cu
#include <drivers/gauss_newton_driver.h> #include <solvers/sampled_trust_cg.h> #include <solvers/params.h> #include <core/errors.h> #include <device/cuda_utils.h> #include <device/gen_random.h> #include <device/device_defines.h> #include <device/handles.h> #include <functions/dev_initializations.h> #include <utilities/print_utils.h> #include <limits.h> #include <stdlib.h> #include <stdio.h> #include <float.h> void readVecFromFileGN( real *dev, real *host ) { int rows = readVector( host, INT_MAX, "./weights.txt", 0, NULL); copy_host_device( host, dev, rows * sizeof(real), cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); fprintf( stderr, "Finished reading Vec (%d) from file \n", rows ); for (int i = 0; i < 10; i ++) fprintf( stderr, "%6.10f \n", host[i] ); } void initGNTrustRegionParams( TRUST_REGION_PARAMS *params, int n ) { //sampled_tr_cg.m file. params->delta = 1200; params->maxDelta = 12000; params->eta1 = 0.8; params->eta2 = 1e-4; params->gamma1 = 2; params->gamma2 = 1.2; params->maxProps = ULONG_MAX; params->maxMatVecs = 1e15; params->maxEpochs = 2000; params->maxIters = 250; //defaults from curves_autoencoder.m params->alpha = 0.01; // SGD Momentum params->hs = floor( 0.1 * n ); // Hessian sample size //no regularization params->lambda = 0; //loop variants here. params->curIteration = 0; //SolverType params->hessianType = GAUSS_NEWTON; } void testGaussNewton (NN_MODEL *model, DEVICE_DATASET *data, SCRATCH_AREA *scratch ) { TRUST_REGION_PARAMS trParams; //begin here fprintf( stderr, "Initiating the Trust Region Test now..... \n\n\n"); initGNTrustRegionParams( &trParams, data->trainSizeX ); fprintf( stderr, "... Done parms initialization \n\n"); /* //set the weights to 0 here. #ifdef DEBUG_FIXED cuda_memset( data->weights, 0, sizeof(real) * model->pSize, ERROR_MEMSET ); #endif getRandomVector( model->pSize, NULL, scratch->nextDevPtr, RAND_NORMAL ); copy_device( data->weights, scratch->nextDevPtr, sizeof(real) * model->pSize, ERROR_MEMCPY_DEVICE_DEVICE ); #ifdef DEBUG_FIXED readVecFromFileTR( data->weights, scratch->nextHostPtr ); #endif real scale = 0.25; cublasCheckError( cublasDscal( cublasHandle, model->pSize, &scale, data->weights, 1 )); */ /* int b = model->pSize; int numBlocks = b / BLOCK_SIZE + (( b % BLOCK_SIZE == 0) ? 0 : 1 ); kerInitOneVector <<< numBlocks, BLOCK_SIZE>>> ( data->weights, b ); cudaThreadSynchronize (); cudaCheckError (); */ cuda_memset( data->weights, 0, sizeof(real) * model->pSize, ERROR_MEMSET ); subsampledTrustRegionCG( model, data, &trParams, scratch ); fprintf( stderr, ".... Done testing of subsampledTrustRegion \n\n\n" ); }
4c0155dfa7d92f297d2885b9ce9b93bcb263131f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { hipEvent_t start; hipEvent_t stop; GpuTimer() { hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer() { hipEventDestroy(start); hipEventDestroy(stop); } void Start() { hipEventRecord(start, 0); hipEventSynchronize(start); } void Stop() { hipEventRecord(stop, 0); } float Elapsed() { float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; /* Scan within each block's data (work-inefficient), write results to "out", and write each block's sum to "blkSums" if "blkSums" is not NULL. */ __global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums) { // TODO extern __shared__ int s_data[]; int i = blockIdx.x * blockDim.x + threadIdx.x; s_data[threadIdx.x] = (i < n) ? in[i] : 0; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride *= 2) { int temp = 0; if (threadIdx.x >= stride) { temp = s_data[threadIdx.x - stride]; } __syncthreads(); if (threadIdx.x >= stride) { s_data[threadIdx.x] += temp; } __syncthreads(); } out[i] = s_data[threadIdx.x]; if (blkSums != NULL && threadIdx.x == 0) { blkSums[blockIdx.x] = s_data[blockDim.x - 1]; } } // TODO: You can define necessary functions here __global__ void scanSumKernel(int *in, int *blkSums, int n) { if (blockIdx.x >= 1) { int i = blockIdx.x * blockDim.x + threadIdx.x; in[i] += blkSums[blockIdx.x - 1]; } } void scan(int * in, int n, int * out, bool useDevice = false, dim3 blkSize = dim3(1)) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nScan by host\n"); out[0] = in[0]; for (int i = 1; i < n; i++) { out[i] = out[i - 1] + in[i]; } } else // Use device { printf("\nScan by device\n"); // TODO // Khoi tao kich thuoc grid va block dim3 gridSize((n - 1) / blkSize.x + 1); int smem_size = blkSize.x * sizeof(int); // Khoi tao bien blkSums int *blkSums = (int*)malloc(gridSize.x * sizeof(int)); // Cap phat bo nho int *d_in; CHECK(hipMalloc(&d_in, n * sizeof(int))); int *d_out; CHECK(hipMalloc(&d_out, n * sizeof(int))); int *d_blkSums; CHECK(hipMalloc(&d_blkSums, gridSize.x * sizeof(int))); // Sao chep du lieu tu host sang device CHECK(hipMemcpy(d_in, in, n * sizeof(int), hipMemcpyHostToDevice)); // Goi ham kernel scan scanBlkKernel << <gridSize, blkSize, smem_size >> > (d_in, n, d_out, d_blkSums); CHECK(hipGetLastError()); // Chep du lieu tu device sang host CHECK(hipMemcpy(blkSums, d_blkSums, gridSize.x * sizeof(int), hipMemcpyDeviceToHost)); // Goi ham scan tai host cho mang blkSums int *scan_blkSums = (int*)malloc(gridSize.x * sizeof(int)); scan_blkSums[0] = blkSums[0]; for (int i = 1; i < gridSize.x; i++) { scan_blkSums[i] = scan_blkSums[i - 1] + blkSums[i]; } // Chep du lieu tu host sang device CHECK(hipMemcpy(d_blkSums, scan_blkSums, gridSize.x * sizeof(int), hipMemcpyHostToDevice)); // Goi ham kernel de tinh tong scanSumKernel << <gridSize, blkSize >> > (d_out, d_blkSums, n); CHECK(hipGetLastError()); // Chep du lieu tu device sang host CHECK(hipMemcpy(out, d_out, n * sizeof(int), hipMemcpyDeviceToHost)); // Giai phong du lieu CHECK(hipFree(d_in)); CHECK(hipFree(d_out)); CHECK(hipFree(d_blkSums)); } timer.Stop(); printf("Processing time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { hipDeviceProp_t devProv; CHECK(hipGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(int * out, int * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 24) + 1; printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(int); int * in = (int *)malloc(bytes); int * out = (int *)malloc(bytes); // Device result int * correctOut = (int *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = (int)(rand() & 0xFF) - 127; // random int in [-127, 128] // DETERMINE BLOCK SIZE dim3 blockSize(512); if (argc == 2) { blockSize.x = atoi(argv[1]); } // SCAN BY HOST scan(in, n, correctOut); // SCAN BY DEVICE scan(in, n, out, true, blockSize); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
4c0155dfa7d92f297d2885b9ce9b93bcb263131f.cu
#include <stdio.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); cudaEventSynchronize(start); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; /* Scan within each block's data (work-inefficient), write results to "out", and write each block's sum to "blkSums" if "blkSums" is not NULL. */ __global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums) { // TODO extern __shared__ int s_data[]; int i = blockIdx.x * blockDim.x + threadIdx.x; s_data[threadIdx.x] = (i < n) ? in[i] : 0; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride *= 2) { int temp = 0; if (threadIdx.x >= stride) { temp = s_data[threadIdx.x - stride]; } __syncthreads(); if (threadIdx.x >= stride) { s_data[threadIdx.x] += temp; } __syncthreads(); } out[i] = s_data[threadIdx.x]; if (blkSums != NULL && threadIdx.x == 0) { blkSums[blockIdx.x] = s_data[blockDim.x - 1]; } } // TODO: You can define necessary functions here __global__ void scanSumKernel(int *in, int *blkSums, int n) { if (blockIdx.x >= 1) { int i = blockIdx.x * blockDim.x + threadIdx.x; in[i] += blkSums[blockIdx.x - 1]; } } void scan(int * in, int n, int * out, bool useDevice = false, dim3 blkSize = dim3(1)) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nScan by host\n"); out[0] = in[0]; for (int i = 1; i < n; i++) { out[i] = out[i - 1] + in[i]; } } else // Use device { printf("\nScan by device\n"); // TODO // Khoi tao kich thuoc grid va block dim3 gridSize((n - 1) / blkSize.x + 1); int smem_size = blkSize.x * sizeof(int); // Khoi tao bien blkSums int *blkSums = (int*)malloc(gridSize.x * sizeof(int)); // Cap phat bo nho int *d_in; CHECK(cudaMalloc(&d_in, n * sizeof(int))); int *d_out; CHECK(cudaMalloc(&d_out, n * sizeof(int))); int *d_blkSums; CHECK(cudaMalloc(&d_blkSums, gridSize.x * sizeof(int))); // Sao chep du lieu tu host sang device CHECK(cudaMemcpy(d_in, in, n * sizeof(int), cudaMemcpyHostToDevice)); // Goi ham kernel scan scanBlkKernel << <gridSize, blkSize, smem_size >> > (d_in, n, d_out, d_blkSums); CHECK(cudaGetLastError()); // Chep du lieu tu device sang host CHECK(cudaMemcpy(blkSums, d_blkSums, gridSize.x * sizeof(int), cudaMemcpyDeviceToHost)); // Goi ham scan tai host cho mang blkSums int *scan_blkSums = (int*)malloc(gridSize.x * sizeof(int)); scan_blkSums[0] = blkSums[0]; for (int i = 1; i < gridSize.x; i++) { scan_blkSums[i] = scan_blkSums[i - 1] + blkSums[i]; } // Chep du lieu tu host sang device CHECK(cudaMemcpy(d_blkSums, scan_blkSums, gridSize.x * sizeof(int), cudaMemcpyHostToDevice)); // Goi ham kernel de tinh tong scanSumKernel << <gridSize, blkSize >> > (d_out, d_blkSums, n); CHECK(cudaGetLastError()); // Chep du lieu tu device sang host CHECK(cudaMemcpy(out, d_out, n * sizeof(int), cudaMemcpyDeviceToHost)); // Giai phong du lieu CHECK(cudaFree(d_in)); CHECK(cudaFree(d_out)); CHECK(cudaFree(d_blkSums)); } timer.Stop(); printf("Processing time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { cudaDeviceProp devProv; CHECK(cudaGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(int * out, int * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 24) + 1; printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(int); int * in = (int *)malloc(bytes); int * out = (int *)malloc(bytes); // Device result int * correctOut = (int *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = (int)(rand() & 0xFF) - 127; // random int in [-127, 128] // DETERMINE BLOCK SIZE dim3 blockSize(512); if (argc == 2) { blockSize.x = atoi(argv[1]); } // SCAN BY HOST scan(in, n, correctOut); // SCAN BY DEVICE scan(in, n, out, true, blockSize); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
8fa2d9ef3e0b4a4c73dd656ef563ee3c05194dce.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <cfloat> #include <cstdlib> #include <iostream> #include <chrono> #include <hip/hip_runtime.h> #include <cusolverDn.h> #include "data_struct.h" // helper for CUDA error handling #define CUDA_SAFE_CALL( call ) { gpuAssert((call), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { std::cerr << "GPUassert: " << hipGetErrorString(code) << " " << file << " " << line << std::endl; if (abort) { exit(code); } } } // grid dim x * block dim x >= pixelNum // creating the mean image (meanImage) of all (imageNum many) images (images) which have pixelNum number of pixels __global__ void getMeanImage( const double* images, double* meanImage, std::size_t imageNum, std::size_t pixelNum ); // grid dim x * block dim x >= pixelNum // subtracting an image (meanImage) from a (imageNum large) set of images (images), each having pixelNum many pixels __global__ void subtractMean( double* images, const double* meanImage, std::size_t imageNum, std::size_t pixelNum ); // grid dim x >= imageNum // grid dim y * block dim x >= pixelNum // calculating the lower triangle of A*A^t. // Since A*A^t is symmetric, the lower triangle perfectly specifies it. Needed for eigenvalue calculation. // A is the set of images (subtracted the mean image from each) which contains imageNum images, each having pixelNum pixels. // S is the lower triangle output. __global__ void getLowerAAt( const double* A, double* S, std::size_t imageNum, std::size_t pixelNum ); // grid dim = 1, block dim = 1 (used to avoid copying back to host) // calculates the necessary number of principal components based ont he user input __global__ void getSufficientComponentNum(const double* eigenvalues, std::size_t* componentNum, std::size_t eigenRows, double epsilon); // this reorders the eigenvalues in descending order too // grid dim x >= componentNum // grid dim y * block dim x >= pixelNum // for speeding up eigen decomposition we used this trick: https://en.wikipedia.org/wiki/Eigenface#Computing_the_eigenvectors // this function restores the original eigenvectors from the one we got after applying the trick __global__ void restoreEigenvectors( const double* meanSubtractedImages , const double* reducedEigenvectors , double* restoredEigenvectors , std::size_t imageNum , std::size_t pixelNum , std::size_t componentNum ); // grid dim x * block dim x >= componentNum // normalizing each eigenvector __global__ void normalizeEigenvector(double* restoredEigenvectors, std::size_t pixelNum, std::size_t componentNum); // grid dim x >= componentNum // grid dim y * block dim x >= imageNum // calculating the weights of each used principal component in each of the training images (minus the mean of them) __global__ void getWeights( const double* restoredEigenvectors , const double* meanSubtractedImages , double* weights , std::size_t imageNum , std::size_t pixelNum , std::size_t componentNum ); // grid dim x >= componentNum // grid dim y * block dim x >= testImageNum // calculating weights of the used principal components in each of the test images (minus the training mean) __global__ void getTestWeights( const double* restoredEigenvectors , const double* meanImage , const double* testImages , double* testWeights , std::size_t testImageNum , std::size_t pixelNum , std::size_t componentNum ); // grid dim x >= imageNum // grid dim y * block dim x >= testImageNum // calculating the distances between each test image weight vector and training image weight vector __global__ void getDistances( const double* trainingWeights , const double* testWeights , double* distances , std::size_t trainImageNum , std::size_t testImageNum , std::size_t componentNum ); // grid dim x * block dim x >= testImageNum // selecting the minimum distance from the test/training distances // ultimately gives us the "closest" training image to each test image __global__ void getMinDistanceIdx( const double* distances , unsigned int* minDistanceImages , std::size_t trainImageNum , std::size_t testImageNum ); // device function for calculating the l2 norm on a 'size' large array __device__ double getL2Norm(const double* array, std::size_t size); // checks whether any CUDA capable device is availale void checkDeviceAvailability(); // gets max thread per block number on the first CUDA capable device int getMaxtThreadsPerBlock(); // helper for cuBlas error handling void checkCuSolverResult(cusolverStatus_t solverStatus); /// images should be in row major order /// componentNumIndicator is either 0 - all of them are used /// in (0,1) range - explanatory power based component selection /// or larger - rounded to intiger and that many are used extern "C" void gpuAssistedClassification( Data* training , Data* test , double componentNumIndicator ) { auto startTraining = std::chrono::high_resolution_clock::now(); std::size_t imageNum = training->size; std::size_t pixelNum = training->width * training->height; std::size_t testImageNum = test->size; std::size_t testPixelNum = test->width * test->height; checkDeviceAvailability(); int maxThreadsPerBlock = getMaxtThreadsPerBlock(); // preprocessing training images double** A = training->images; std::size_t imageBitSize = pixelNum * sizeof(double); std::size_t trainingSetBitSize = imageNum * imageBitSize; //pushing training images to GPU double* gpuA = nullptr; CUDA_SAFE_CALL( hipMalloc((void **)&gpuA, trainingSetBitSize) ); for(std::size_t i = 0; i < imageNum; ++i) { CUDA_SAFE_CALL( hipMemcpy(&(gpuA[i * pixelNum]), A[i], imageBitSize, hipMemcpyHostToDevice) ); } double* gpuMeanImage = nullptr; CUDA_SAFE_CALL( hipMalloc((void **)&gpuMeanImage, imageBitSize) ); int gridDim = ::ceil(static_cast<double>(pixelNum) / maxThreadsPerBlock); hipLaunchKernelGGL(( getMeanImage), dim3(gridDim), dim3(maxThreadsPerBlock), 0, 0, gpuA, gpuMeanImage, imageNum, pixelNum); CUDA_SAFE_CALL( hipPeekAtLastError() ); hipLaunchKernelGGL(( subtractMean), dim3(gridDim), dim3(maxThreadsPerBlock) , 0, 0, gpuA, gpuMeanImage, imageNum, pixelNum); CUDA_SAFE_CALL( hipPeekAtLastError() ); // calculating eigenvectors std::size_t imageNumSquareSize = imageNum * imageNum * sizeof(double); //pushing training images to GPU double* gpuS = nullptr; CUDA_SAFE_CALL( hipMalloc((void **)&gpuS, imageNumSquareSize) ); int gridDimY = ::ceil(static_cast<double>(imageNum) / maxThreadsPerBlock); hipLaunchKernelGGL(( getLowerAAt), dim3(dim3(imageNum, gridDimY)), dim3(maxThreadsPerBlock) , 0, 0, gpuA, gpuS, imageNum, pixelNum); CUDA_SAFE_CALL( hipPeekAtLastError() ); // calculating eigenvectors hipsolverDnHandle_t solver; cusolverStatus_t solverStatus; double *gpuEigenvalues; int *devInfo; double *gpuWorkspace; int workspaceSize = 0; solverStatus = hipsolverDnCreate(&solver); checkCuSolverResult(solverStatus); CUDA_SAFE_CALL( hipMalloc ((void**)&gpuEigenvalues, imageNum * sizeof(double)) ); CUDA_SAFE_CALL( hipMalloc ((void**)&devInfo ,sizeof(int)) ); hipsolverEigMode_t eigenvalueFlag = HIPSOLVER_EIG_MODE_VECTOR; //eigenvalues are calculated too hipblasFillMode_t fillMode = HIPBLAS_FILL_MODE_LOWER; // only the lower triangle has to store information // compute buffer size and prepare workspace solverStatus = hipsolverDnDsyevd_bufferSize( solver , eigenvalueFlag , fillMode , imageNum , gpuS , imageNum , gpuEigenvalues , &workspaceSize ); checkCuSolverResult(solverStatus); CUDA_SAFE_CALL( hipMalloc((void**)&gpuWorkspace, workspaceSize*sizeof(double)) ); // eigenvectors are stored in the same container (gpuS is overwritten) // WARNING contrary to OpenCV, eigenvalues (and corresponding eigenvectors) are in ascending order solverStatus = hipsolverDnDsyevd( solver , eigenvalueFlag , fillMode , imageNum , gpuS , imageNum , gpuEigenvalues , gpuWorkspace , workspaceSize , devInfo ); checkCuSolverResult(solverStatus); solverStatus = hipsolverDnDestroy(solver); checkCuSolverResult(solverStatus); double* gpuEigenvectors = gpuS; // only renaming // deciding on the number of used principal components std::size_t componentNum; if(componentNumIndicator < 1.0) { std::size_t* gpuComponentNum = nullptr; CUDA_SAFE_CALL( hipMalloc((void **)&gpuComponentNum, sizeof(std::size_t)) ); double varianceThreshold = componentNumIndicator; hipLaunchKernelGGL(( getSufficientComponentNum), dim3(1),dim3(1), 0, 0, gpuEigenvalues, gpuComponentNum, imageNum, varianceThreshold); CUDA_SAFE_CALL( hipPeekAtLastError() ); CUDA_SAFE_CALL( hipMemcpy(&componentNum, gpuComponentNum, sizeof(std::size_t), hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipFree(gpuComponentNum) ); } else { componentNum = round(componentNumIndicator); } // restoring the eigenvectors to the needed form CUDA_SAFE_CALL( hipFree(gpuEigenvalues) ); double* gpuRestoredEigenvectors = nullptr; CUDA_SAFE_CALL( hipMalloc((void **)&gpuRestoredEigenvectors, componentNum * (pixelNum) * sizeof(double)) ); gridDimY = ::ceil(static_cast<double>(pixelNum) / maxThreadsPerBlock); hipLaunchKernelGGL(( restoreEigenvectors), dim3(dim3(componentNum, gridDimY)), dim3(maxThreadsPerBlock) , 0, 0, gpuA , gpuEigenvectors , gpuRestoredEigenvectors , imageNum , pixelNum , componentNum ); CUDA_SAFE_CALL( hipPeekAtLastError() ); CUDA_SAFE_CALL( hipFree(gpuEigenvectors) ); gridDim = gridDimY; hipLaunchKernelGGL(( normalizeEigenvector), dim3(gridDim), dim3(maxThreadsPerBlock), 0, 0, gpuRestoredEigenvectors , pixelNum , componentNum ); CUDA_SAFE_CALL( hipPeekAtLastError() ); // calculating training weights double* gpuW = nullptr; CUDA_SAFE_CALL( hipMalloc((void **)&gpuW, componentNum * imageNum * sizeof(double)) ); gridDimY = ::ceil(static_cast<double>(imageNum) / maxThreadsPerBlock); hipLaunchKernelGGL(( getWeights), dim3(dim3(componentNum, gridDimY)), dim3(maxThreadsPerBlock), 0, 0, gpuRestoredEigenvectors , gpuA , gpuW , imageNum , pixelNum , componentNum ); CUDA_SAFE_CALL( hipPeekAtLastError() ); CUDA_SAFE_CALL( hipFree(gpuA) ); auto finishTraining = std::chrono::high_resolution_clock::now(); auto startClassifying = std::chrono::high_resolution_clock::now(); // classification std::size_t testSetBitSize = testImageNum * imageBitSize; // test images are the same size as training images //pushing test images to GPU double* gpuTestImages = nullptr; CUDA_SAFE_CALL( hipMalloc((void **)&gpuTestImages, testSetBitSize) ); for(std::size_t i = 0; i < testImageNum; ++i) { CUDA_SAFE_CALL( hipMemcpy(&(gpuTestImages[i * testPixelNum]), (test->images)[i], imageBitSize, hipMemcpyHostToDevice) ); } double* gpuTestWeights = nullptr; CUDA_SAFE_CALL( hipMalloc((void **)&gpuTestWeights, componentNum * testImageNum * sizeof(double)) ); gridDimY = ::ceil(static_cast<double>(testImageNum) / maxThreadsPerBlock); hipLaunchKernelGGL(( getTestWeights), dim3(dim3(componentNum, gridDimY)), dim3(maxThreadsPerBlock), 0, 0, gpuRestoredEigenvectors , gpuMeanImage , gpuTestImages , gpuTestWeights , testImageNum , testPixelNum , componentNum ); CUDA_SAFE_CALL( hipPeekAtLastError() ); CUDA_SAFE_CALL( hipFree(gpuRestoredEigenvectors) ); CUDA_SAFE_CALL( hipFree(gpuMeanImage) ); // calculating closest training image double* gpuDistances = nullptr; CUDA_SAFE_CALL( hipMalloc((void **)&gpuDistances, imageNum * testImageNum * sizeof(double)) ); gridDimY = ::ceil(static_cast<double>(testImageNum) / maxThreadsPerBlock); hipLaunchKernelGGL(( getDistances), dim3(dim3(imageNum, gridDimY)), dim3(maxThreadsPerBlock), 0, 0, gpuW , gpuTestWeights , gpuDistances , imageNum , testImageNum , componentNum ); CUDA_SAFE_CALL( hipPeekAtLastError() ); unsigned int* gpuMinDistanceIdxs = nullptr; CUDA_SAFE_CALL( hipMalloc((void **)&gpuMinDistanceIdxs, testImageNum * sizeof(unsigned int)) ); gridDim = ::ceil(static_cast<double>(testImageNum) / maxThreadsPerBlock); hipLaunchKernelGGL(( getMinDistanceIdx), dim3(gridDim), dim3(maxThreadsPerBlock), 0, 0, gpuDistances , gpuMinDistanceIdxs , imageNum , testImageNum ); CUDA_SAFE_CALL( hipPeekAtLastError() ); //translating closest training image to a label hipMemcpy(test->labels, gpuMinDistanceIdxs, testImageNum * sizeof(unsigned int), hipMemcpyDeviceToHost); for(std::size_t i = 0; i < testImageNum; ++i) { test->labels[i] = training->labels[test->labels[i]]; } CUDA_SAFE_CALL( hipDeviceReset() ); auto finishClassifying = std::chrono::high_resolution_clock::now(); std::cout << static_cast<std::chrono::duration<double>>(finishTraining - startTraining ).count() << "\t"; std::cout << static_cast<std::chrono::duration<double>>(finishClassifying - startClassifying).count() << "\t"; } void checkDeviceAvailability() { int deviceCount = 0; CUDA_SAFE_CALL( hipGetDeviceCount(&deviceCount) ); if(0 == deviceCount) { std::cout << "No CUDA capable devices were found." << std::endl; exit(2); } } int getMaxtThreadsPerBlock() { struct hipDeviceProp_t properties; hipGetDeviceProperties(&properties, 0); // assuming there is only one device int maxThreadsPerBlock = properties.maxThreadsPerBlock; return maxThreadsPerBlock; } __global__ void getMeanImage( const double* images, double* meanImage, std::size_t imageNum, std::size_t pixelNum ) { std::size_t col = blockIdx.x * blockDim.x + threadIdx.x; if(col >= pixelNum) { return; } meanImage[col] = 0.0; for(std::size_t row = 0; row < imageNum; ++row) { meanImage[col] += images[row*pixelNum + col]; } meanImage[col] /= imageNum; } __global__ void subtractMean( double* images, const double* meanImage, std::size_t imageNum, std::size_t pixelNum ) { std::size_t col = blockIdx.x * blockDim.x + threadIdx.x; if(col >= pixelNum) { return; } for(std::size_t row = 0; row < imageNum; ++row) { images[row*pixelNum + col] -= meanImage[col]; if(images[row*pixelNum + col] < 0.0) { images[row*pixelNum + col] = 0.0; } } } __global__ void getLowerAAt( const double* A, double* S, std::size_t imageNum, std::size_t pixelNum ) { std::size_t row = blockIdx.x; std::size_t col = blockIdx.y * blockDim.x + threadIdx.x; if(row >= imageNum || col >= imageNum) { return; } S[row * imageNum + col] = 0.0; for(std::size_t i = 0; i < pixelNum; ++i) { S[row * imageNum + col] += A[row * pixelNum + i] * A[col * pixelNum + i]; } } __global__ void getSufficientComponentNum(const double* eigenvalues, std::size_t* componentNum, std::size_t eigenRows, double epsilon) { double variance = 0; for(std::size_t i = 0; i < eigenRows; ++i) { variance += eigenvalues[i]; } variance *= eigenRows; (*componentNum) = 1; double subVariance = eigenRows * eigenvalues[eigenRows - 1]; double explanatoryScore = subVariance / variance; for(; (*componentNum) < eigenRows && explanatoryScore <= epsilon; (*componentNum) += 1) { subVariance += eigenRows * eigenvalues[eigenRows - (*componentNum) - 1]; explanatoryScore = subVariance / variance; } } __global__ void restoreEigenvectors( const double* meanSubtractedImages , const double* reducedEigenvectors , double* restoredEigenvectors , std::size_t imageNum , std::size_t pixelNum , std::size_t componentNum ) { std::size_t row = blockIdx.x; std::size_t col = blockIdx.y * blockDim.x + threadIdx.x; if(col >= pixelNum || row >= componentNum) { return; } restoredEigenvectors[row * pixelNum + col] = 0.0; for(std::size_t i = 0; i < imageNum; ++i) { restoredEigenvectors[row * pixelNum + col] += reducedEigenvectors[(imageNum - row - 1) * imageNum + i] * meanSubtractedImages[i * pixelNum + col]; } } __global__ void normalizeEigenvector(double* restoredEigenvectors, std::size_t pixelNum, std::size_t componentNum) { std::size_t row = blockIdx.x * blockDim.x + threadIdx.x; if(row >= componentNum) { return; } double norm = getL2Norm(&(restoredEigenvectors[row * pixelNum]), pixelNum); for (int i = 0; i < pixelNum; ++i) { restoredEigenvectors[row * pixelNum + i] /= norm; } } __global__ void getWeights( const double* restoredEigenvectors , const double* meanSubtractedImages , double* weights , std::size_t imageNum , std::size_t pixelNum , std::size_t componentNum ) { std::size_t row = blockIdx.x; std::size_t col = blockIdx.y * blockDim.x + threadIdx.x; if(col >= imageNum || row >= componentNum) { return; } weights[row * imageNum + col] = 0.0; for(std::size_t i = 0; i < pixelNum; ++i) { weights[row * imageNum + col] += restoredEigenvectors[row * pixelNum + i] * meanSubtractedImages[col * pixelNum + i]; } } __global__ void getTestWeights( const double* restoredEigenvectors , const double* meanImage , const double* testImages , double* testWeights , std::size_t testImageNum , std::size_t pixelNum , std::size_t componentNum ) { std::size_t row = blockIdx.x; std::size_t col = blockIdx.y * blockDim.x + threadIdx.x; if(col >= testImageNum || row >= componentNum) { return; } testWeights[row * testImageNum + col] = 0.0; for(std::size_t i = 0; i < pixelNum; ++i) { double testImagePixelValue = testImages[col * pixelNum + i] - meanImage[i]; if(testImagePixelValue < 0.0) { testImagePixelValue = 0.0; } testWeights[row * testImageNum + col] += restoredEigenvectors[row * pixelNum + i] * (testImagePixelValue); } } __global__ void getDistances( const double* trainingWeights , const double* testWeights , double* distances , std::size_t trainImageNum , std::size_t testImageNum , std::size_t componentNum ) { std::size_t row = blockIdx.x; std::size_t col = blockIdx.y * blockDim.x + threadIdx.x; if(col >= testImageNum || row >= trainImageNum) { return; } distances[row * testImageNum + col] = 0.0; for(std::size_t i = 0; i < componentNum; ++i) { distances[row * testImageNum + col] += fabs(trainingWeights[i * trainImageNum + row] - testWeights[i * testImageNum + col]); } } __global__ void getMinDistanceIdx( const double* distances , unsigned int* minDistanceImages , std::size_t trainImageNum , std::size_t testImageNum ) { std::size_t col = blockIdx.x * blockDim.x + threadIdx.x; if(col >= testImageNum) { return; } double minDistance = DBL_MAX; unsigned int minDistanceImageIdx = 0; for(unsigned int i = 0; i < trainImageNum; ++i) { if(distances[i * testImageNum + col] < minDistance) { minDistance = distances[i * testImageNum + col]; minDistanceImageIdx = i; } } minDistanceImages[col] = minDistanceImageIdx; } __device__ double getL2Norm(const double* array, std::size_t size) { double norm = 0; for (int i = 0; i < size; ++i) { norm += ::pow(array[i], 2); } norm = std::sqrt(norm); return norm; } void checkCuSolverResult(cusolverStatus_t solverStatus) { if(CUSOLVER_STATUS_SUCCESS != solverStatus) { std::cout << "Error during eigen calculation. Error code: " << solverStatus << std::endl; exit(3); } }
8fa2d9ef3e0b4a4c73dd656ef563ee3c05194dce.cu
#include <cmath> #include <cfloat> #include <cstdlib> #include <iostream> #include <chrono> #include <cuda_runtime.h> #include <cusolverDn.h> #include "data_struct.h" // helper for CUDA error handling #define CUDA_SAFE_CALL( call ) { gpuAssert((call), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { std::cerr << "GPUassert: " << cudaGetErrorString(code) << " " << file << " " << line << std::endl; if (abort) { exit(code); } } } // grid dim x * block dim x >= pixelNum // creating the mean image (meanImage) of all (imageNum many) images (images) which have pixelNum number of pixels __global__ void getMeanImage( const double* images, double* meanImage, std::size_t imageNum, std::size_t pixelNum ); // grid dim x * block dim x >= pixelNum // subtracting an image (meanImage) from a (imageNum large) set of images (images), each having pixelNum many pixels __global__ void subtractMean( double* images, const double* meanImage, std::size_t imageNum, std::size_t pixelNum ); // grid dim x >= imageNum // grid dim y * block dim x >= pixelNum // calculating the lower triangle of A*A^t. // Since A*A^t is symmetric, the lower triangle perfectly specifies it. Needed for eigenvalue calculation. // A is the set of images (subtracted the mean image from each) which contains imageNum images, each having pixelNum pixels. // S is the lower triangle output. __global__ void getLowerAAt( const double* A, double* S, std::size_t imageNum, std::size_t pixelNum ); // grid dim = 1, block dim = 1 (used to avoid copying back to host) // calculates the necessary number of principal components based ont he user input __global__ void getSufficientComponentNum(const double* eigenvalues, std::size_t* componentNum, std::size_t eigenRows, double epsilon); // this reorders the eigenvalues in descending order too // grid dim x >= componentNum // grid dim y * block dim x >= pixelNum // for speeding up eigen decomposition we used this trick: https://en.wikipedia.org/wiki/Eigenface#Computing_the_eigenvectors // this function restores the original eigenvectors from the one we got after applying the trick __global__ void restoreEigenvectors( const double* meanSubtractedImages , const double* reducedEigenvectors , double* restoredEigenvectors , std::size_t imageNum , std::size_t pixelNum , std::size_t componentNum ); // grid dim x * block dim x >= componentNum // normalizing each eigenvector __global__ void normalizeEigenvector(double* restoredEigenvectors, std::size_t pixelNum, std::size_t componentNum); // grid dim x >= componentNum // grid dim y * block dim x >= imageNum // calculating the weights of each used principal component in each of the training images (minus the mean of them) __global__ void getWeights( const double* restoredEigenvectors , const double* meanSubtractedImages , double* weights , std::size_t imageNum , std::size_t pixelNum , std::size_t componentNum ); // grid dim x >= componentNum // grid dim y * block dim x >= testImageNum // calculating weights of the used principal components in each of the test images (minus the training mean) __global__ void getTestWeights( const double* restoredEigenvectors , const double* meanImage , const double* testImages , double* testWeights , std::size_t testImageNum , std::size_t pixelNum , std::size_t componentNum ); // grid dim x >= imageNum // grid dim y * block dim x >= testImageNum // calculating the distances between each test image weight vector and training image weight vector __global__ void getDistances( const double* trainingWeights , const double* testWeights , double* distances , std::size_t trainImageNum , std::size_t testImageNum , std::size_t componentNum ); // grid dim x * block dim x >= testImageNum // selecting the minimum distance from the test/training distances // ultimately gives us the "closest" training image to each test image __global__ void getMinDistanceIdx( const double* distances , unsigned int* minDistanceImages , std::size_t trainImageNum , std::size_t testImageNum ); // device function for calculating the l2 norm on a 'size' large array __device__ double getL2Norm(const double* array, std::size_t size); // checks whether any CUDA capable device is availale void checkDeviceAvailability(); // gets max thread per block number on the first CUDA capable device int getMaxtThreadsPerBlock(); // helper for cuBlas error handling void checkCuSolverResult(cusolverStatus_t solverStatus); /// images should be in row major order /// componentNumIndicator is either 0 - all of them are used /// in (0,1) range - explanatory power based component selection /// or larger - rounded to intiger and that many are used extern "C" void gpuAssistedClassification( Data* training , Data* test , double componentNumIndicator ) { auto startTraining = std::chrono::high_resolution_clock::now(); std::size_t imageNum = training->size; std::size_t pixelNum = training->width * training->height; std::size_t testImageNum = test->size; std::size_t testPixelNum = test->width * test->height; checkDeviceAvailability(); int maxThreadsPerBlock = getMaxtThreadsPerBlock(); // preprocessing training images double** A = training->images; std::size_t imageBitSize = pixelNum * sizeof(double); std::size_t trainingSetBitSize = imageNum * imageBitSize; //pushing training images to GPU double* gpuA = nullptr; CUDA_SAFE_CALL( cudaMalloc((void **)&gpuA, trainingSetBitSize) ); for(std::size_t i = 0; i < imageNum; ++i) { CUDA_SAFE_CALL( cudaMemcpy(&(gpuA[i * pixelNum]), A[i], imageBitSize, cudaMemcpyHostToDevice) ); } double* gpuMeanImage = nullptr; CUDA_SAFE_CALL( cudaMalloc((void **)&gpuMeanImage, imageBitSize) ); int gridDim = std::ceil(static_cast<double>(pixelNum) / maxThreadsPerBlock); getMeanImage<<< gridDim, maxThreadsPerBlock>>>(gpuA, gpuMeanImage, imageNum, pixelNum); CUDA_SAFE_CALL( cudaPeekAtLastError() ); subtractMean<<< gridDim, maxThreadsPerBlock >>>(gpuA, gpuMeanImage, imageNum, pixelNum); CUDA_SAFE_CALL( cudaPeekAtLastError() ); // calculating eigenvectors std::size_t imageNumSquareSize = imageNum * imageNum * sizeof(double); //pushing training images to GPU double* gpuS = nullptr; CUDA_SAFE_CALL( cudaMalloc((void **)&gpuS, imageNumSquareSize) ); int gridDimY = std::ceil(static_cast<double>(imageNum) / maxThreadsPerBlock); getLowerAAt<<< dim3(imageNum, gridDimY), maxThreadsPerBlock >>>(gpuA, gpuS, imageNum, pixelNum); CUDA_SAFE_CALL( cudaPeekAtLastError() ); // calculating eigenvectors cusolverDnHandle_t solver; cusolverStatus_t solverStatus; double *gpuEigenvalues; int *devInfo; double *gpuWorkspace; int workspaceSize = 0; solverStatus = cusolverDnCreate(&solver); checkCuSolverResult(solverStatus); CUDA_SAFE_CALL( cudaMalloc ((void**)&gpuEigenvalues, imageNum * sizeof(double)) ); CUDA_SAFE_CALL( cudaMalloc ((void**)&devInfo ,sizeof(int)) ); cusolverEigMode_t eigenvalueFlag = CUSOLVER_EIG_MODE_VECTOR; //eigenvalues are calculated too cublasFillMode_t fillMode = CUBLAS_FILL_MODE_LOWER; // only the lower triangle has to store information // compute buffer size and prepare workspace solverStatus = cusolverDnDsyevd_bufferSize( solver , eigenvalueFlag , fillMode , imageNum , gpuS , imageNum , gpuEigenvalues , &workspaceSize ); checkCuSolverResult(solverStatus); CUDA_SAFE_CALL( cudaMalloc((void**)&gpuWorkspace, workspaceSize*sizeof(double)) ); // eigenvectors are stored in the same container (gpuS is overwritten) // WARNING contrary to OpenCV, eigenvalues (and corresponding eigenvectors) are in ascending order solverStatus = cusolverDnDsyevd( solver , eigenvalueFlag , fillMode , imageNum , gpuS , imageNum , gpuEigenvalues , gpuWorkspace , workspaceSize , devInfo ); checkCuSolverResult(solverStatus); solverStatus = cusolverDnDestroy(solver); checkCuSolverResult(solverStatus); double* gpuEigenvectors = gpuS; // only renaming // deciding on the number of used principal components std::size_t componentNum; if(componentNumIndicator < 1.0) { std::size_t* gpuComponentNum = nullptr; CUDA_SAFE_CALL( cudaMalloc((void **)&gpuComponentNum, sizeof(std::size_t)) ); double varianceThreshold = componentNumIndicator; getSufficientComponentNum<<<1,1>>>(gpuEigenvalues, gpuComponentNum, imageNum, varianceThreshold); CUDA_SAFE_CALL( cudaPeekAtLastError() ); CUDA_SAFE_CALL( cudaMemcpy(&componentNum, gpuComponentNum, sizeof(std::size_t), cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL( cudaFree(gpuComponentNum) ); } else { componentNum = round(componentNumIndicator); } // restoring the eigenvectors to the needed form CUDA_SAFE_CALL( cudaFree(gpuEigenvalues) ); double* gpuRestoredEigenvectors = nullptr; CUDA_SAFE_CALL( cudaMalloc((void **)&gpuRestoredEigenvectors, componentNum * (pixelNum) * sizeof(double)) ); gridDimY = std::ceil(static_cast<double>(pixelNum) / maxThreadsPerBlock); restoreEigenvectors<<< dim3(componentNum, gridDimY), maxThreadsPerBlock >>>( gpuA , gpuEigenvectors , gpuRestoredEigenvectors , imageNum , pixelNum , componentNum ); CUDA_SAFE_CALL( cudaPeekAtLastError() ); CUDA_SAFE_CALL( cudaFree(gpuEigenvectors) ); gridDim = gridDimY; normalizeEigenvector<<<gridDim, maxThreadsPerBlock>>>( gpuRestoredEigenvectors , pixelNum , componentNum ); CUDA_SAFE_CALL( cudaPeekAtLastError() ); // calculating training weights double* gpuW = nullptr; CUDA_SAFE_CALL( cudaMalloc((void **)&gpuW, componentNum * imageNum * sizeof(double)) ); gridDimY = std::ceil(static_cast<double>(imageNum) / maxThreadsPerBlock); getWeights<<<dim3(componentNum, gridDimY), maxThreadsPerBlock>>>( gpuRestoredEigenvectors , gpuA , gpuW , imageNum , pixelNum , componentNum ); CUDA_SAFE_CALL( cudaPeekAtLastError() ); CUDA_SAFE_CALL( cudaFree(gpuA) ); auto finishTraining = std::chrono::high_resolution_clock::now(); auto startClassifying = std::chrono::high_resolution_clock::now(); // classification std::size_t testSetBitSize = testImageNum * imageBitSize; // test images are the same size as training images //pushing test images to GPU double* gpuTestImages = nullptr; CUDA_SAFE_CALL( cudaMalloc((void **)&gpuTestImages, testSetBitSize) ); for(std::size_t i = 0; i < testImageNum; ++i) { CUDA_SAFE_CALL( cudaMemcpy(&(gpuTestImages[i * testPixelNum]), (test->images)[i], imageBitSize, cudaMemcpyHostToDevice) ); } double* gpuTestWeights = nullptr; CUDA_SAFE_CALL( cudaMalloc((void **)&gpuTestWeights, componentNum * testImageNum * sizeof(double)) ); gridDimY = std::ceil(static_cast<double>(testImageNum) / maxThreadsPerBlock); getTestWeights<<<dim3(componentNum, gridDimY), maxThreadsPerBlock>>>( gpuRestoredEigenvectors , gpuMeanImage , gpuTestImages , gpuTestWeights , testImageNum , testPixelNum , componentNum ); CUDA_SAFE_CALL( cudaPeekAtLastError() ); CUDA_SAFE_CALL( cudaFree(gpuRestoredEigenvectors) ); CUDA_SAFE_CALL( cudaFree(gpuMeanImage) ); // calculating closest training image double* gpuDistances = nullptr; CUDA_SAFE_CALL( cudaMalloc((void **)&gpuDistances, imageNum * testImageNum * sizeof(double)) ); gridDimY = std::ceil(static_cast<double>(testImageNum) / maxThreadsPerBlock); getDistances<<<dim3(imageNum, gridDimY), maxThreadsPerBlock>>>(gpuW , gpuTestWeights , gpuDistances , imageNum , testImageNum , componentNum ); CUDA_SAFE_CALL( cudaPeekAtLastError() ); unsigned int* gpuMinDistanceIdxs = nullptr; CUDA_SAFE_CALL( cudaMalloc((void **)&gpuMinDistanceIdxs, testImageNum * sizeof(unsigned int)) ); gridDim = std::ceil(static_cast<double>(testImageNum) / maxThreadsPerBlock); getMinDistanceIdx<<<gridDim, maxThreadsPerBlock>>>( gpuDistances , gpuMinDistanceIdxs , imageNum , testImageNum ); CUDA_SAFE_CALL( cudaPeekAtLastError() ); //translating closest training image to a label cudaMemcpy(test->labels, gpuMinDistanceIdxs, testImageNum * sizeof(unsigned int), cudaMemcpyDeviceToHost); for(std::size_t i = 0; i < testImageNum; ++i) { test->labels[i] = training->labels[test->labels[i]]; } CUDA_SAFE_CALL( cudaDeviceReset() ); auto finishClassifying = std::chrono::high_resolution_clock::now(); std::cout << static_cast<std::chrono::duration<double>>(finishTraining - startTraining ).count() << "\t"; std::cout << static_cast<std::chrono::duration<double>>(finishClassifying - startClassifying).count() << "\t"; } void checkDeviceAvailability() { int deviceCount = 0; CUDA_SAFE_CALL( cudaGetDeviceCount(&deviceCount) ); if(0 == deviceCount) { std::cout << "No CUDA capable devices were found." << std::endl; exit(2); } } int getMaxtThreadsPerBlock() { struct cudaDeviceProp properties; cudaGetDeviceProperties(&properties, 0); // assuming there is only one device int maxThreadsPerBlock = properties.maxThreadsPerBlock; return maxThreadsPerBlock; } __global__ void getMeanImage( const double* images, double* meanImage, std::size_t imageNum, std::size_t pixelNum ) { std::size_t col = blockIdx.x * blockDim.x + threadIdx.x; if(col >= pixelNum) { return; } meanImage[col] = 0.0; for(std::size_t row = 0; row < imageNum; ++row) { meanImage[col] += images[row*pixelNum + col]; } meanImage[col] /= imageNum; } __global__ void subtractMean( double* images, const double* meanImage, std::size_t imageNum, std::size_t pixelNum ) { std::size_t col = blockIdx.x * blockDim.x + threadIdx.x; if(col >= pixelNum) { return; } for(std::size_t row = 0; row < imageNum; ++row) { images[row*pixelNum + col] -= meanImage[col]; if(images[row*pixelNum + col] < 0.0) { images[row*pixelNum + col] = 0.0; } } } __global__ void getLowerAAt( const double* A, double* S, std::size_t imageNum, std::size_t pixelNum ) { std::size_t row = blockIdx.x; std::size_t col = blockIdx.y * blockDim.x + threadIdx.x; if(row >= imageNum || col >= imageNum) { return; } S[row * imageNum + col] = 0.0; for(std::size_t i = 0; i < pixelNum; ++i) { S[row * imageNum + col] += A[row * pixelNum + i] * A[col * pixelNum + i]; } } __global__ void getSufficientComponentNum(const double* eigenvalues, std::size_t* componentNum, std::size_t eigenRows, double epsilon) { double variance = 0; for(std::size_t i = 0; i < eigenRows; ++i) { variance += eigenvalues[i]; } variance *= eigenRows; (*componentNum) = 1; double subVariance = eigenRows * eigenvalues[eigenRows - 1]; double explanatoryScore = subVariance / variance; for(; (*componentNum) < eigenRows && explanatoryScore <= epsilon; (*componentNum) += 1) { subVariance += eigenRows * eigenvalues[eigenRows - (*componentNum) - 1]; explanatoryScore = subVariance / variance; } } __global__ void restoreEigenvectors( const double* meanSubtractedImages , const double* reducedEigenvectors , double* restoredEigenvectors , std::size_t imageNum , std::size_t pixelNum , std::size_t componentNum ) { std::size_t row = blockIdx.x; std::size_t col = blockIdx.y * blockDim.x + threadIdx.x; if(col >= pixelNum || row >= componentNum) { return; } restoredEigenvectors[row * pixelNum + col] = 0.0; for(std::size_t i = 0; i < imageNum; ++i) { restoredEigenvectors[row * pixelNum + col] += reducedEigenvectors[(imageNum - row - 1) * imageNum + i] * meanSubtractedImages[i * pixelNum + col]; } } __global__ void normalizeEigenvector(double* restoredEigenvectors, std::size_t pixelNum, std::size_t componentNum) { std::size_t row = blockIdx.x * blockDim.x + threadIdx.x; if(row >= componentNum) { return; } double norm = getL2Norm(&(restoredEigenvectors[row * pixelNum]), pixelNum); for (int i = 0; i < pixelNum; ++i) { restoredEigenvectors[row * pixelNum + i] /= norm; } } __global__ void getWeights( const double* restoredEigenvectors , const double* meanSubtractedImages , double* weights , std::size_t imageNum , std::size_t pixelNum , std::size_t componentNum ) { std::size_t row = blockIdx.x; std::size_t col = blockIdx.y * blockDim.x + threadIdx.x; if(col >= imageNum || row >= componentNum) { return; } weights[row * imageNum + col] = 0.0; for(std::size_t i = 0; i < pixelNum; ++i) { weights[row * imageNum + col] += restoredEigenvectors[row * pixelNum + i] * meanSubtractedImages[col * pixelNum + i]; } } __global__ void getTestWeights( const double* restoredEigenvectors , const double* meanImage , const double* testImages , double* testWeights , std::size_t testImageNum , std::size_t pixelNum , std::size_t componentNum ) { std::size_t row = blockIdx.x; std::size_t col = blockIdx.y * blockDim.x + threadIdx.x; if(col >= testImageNum || row >= componentNum) { return; } testWeights[row * testImageNum + col] = 0.0; for(std::size_t i = 0; i < pixelNum; ++i) { double testImagePixelValue = testImages[col * pixelNum + i] - meanImage[i]; if(testImagePixelValue < 0.0) { testImagePixelValue = 0.0; } testWeights[row * testImageNum + col] += restoredEigenvectors[row * pixelNum + i] * (testImagePixelValue); } } __global__ void getDistances( const double* trainingWeights , const double* testWeights , double* distances , std::size_t trainImageNum , std::size_t testImageNum , std::size_t componentNum ) { std::size_t row = blockIdx.x; std::size_t col = blockIdx.y * blockDim.x + threadIdx.x; if(col >= testImageNum || row >= trainImageNum) { return; } distances[row * testImageNum + col] = 0.0; for(std::size_t i = 0; i < componentNum; ++i) { distances[row * testImageNum + col] += fabs(trainingWeights[i * trainImageNum + row] - testWeights[i * testImageNum + col]); } } __global__ void getMinDistanceIdx( const double* distances , unsigned int* minDistanceImages , std::size_t trainImageNum , std::size_t testImageNum ) { std::size_t col = blockIdx.x * blockDim.x + threadIdx.x; if(col >= testImageNum) { return; } double minDistance = DBL_MAX; unsigned int minDistanceImageIdx = 0; for(unsigned int i = 0; i < trainImageNum; ++i) { if(distances[i * testImageNum + col] < minDistance) { minDistance = distances[i * testImageNum + col]; minDistanceImageIdx = i; } } minDistanceImages[col] = minDistanceImageIdx; } __device__ double getL2Norm(const double* array, std::size_t size) { double norm = 0; for (int i = 0; i < size; ++i) { norm += std::pow(array[i], 2); } norm = std::sqrt(norm); return norm; } void checkCuSolverResult(cusolverStatus_t solverStatus) { if(CUSOLVER_STATUS_SUCCESS != solverStatus) { std::cout << "Error during eigen calculation. Error code: " << solverStatus << std::endl; exit(3); } }
2d1452dd4cb8a606834b42001580e1e3c422c767.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <thrust\reduce.h> #include <thrust\device_ptr.h> #include "Header.h" #include "helper_cuda.h" #include "timeutility.h" __device__ int insect(int* beg1, int* end1, int* beg2, int* end2) { int ret = 0; int node1 = *beg1; int node2 = *beg2; while (beg1 != end1 && beg2 != end2){ //if (node1 == node2){ // ret++; //} //if (node1 <= node2){ // node1 = *++beg1; //} //if(node1 >= node2){ // node2 = *++beg2; //} //vari //if (node1 == node2){ // ret++; // node1 = (*++beg1); // node2 = (*++beg2); //} //else if (node1 < node2){ // node1 = (*++beg1); //} //else{ // node2 = (*++beg2); //} if (node1 < node2){ node1 = *++beg1; } else if (node1 > node2){ node2 = *++beg2; } else{ node1 = *++beg1; node2 = *++beg2; ret++; } } return ret; } __device__ int inse(int* less, size_t llen, int* more, size_t mlen) { int ret = 0; for (size_t i = 0; i < llen; i++){ int beg = 0; int end = mlen; while (beg < end){ if (more[beg + (end - beg) / 2] == less[i]){ ret++; break; } else if (more[beg + (end - beg) / 2] < less[i]){ beg = beg + (end - beg) / 2 + 1; } else { end = beg + (end - beg) / 2; } } } return ret; } __global__ void kernel_counting(int* index, int* head, size_t head_len, int* tail, int* result, int block_idx_offset = 0) { int idx = blockDim.x * (blockIdx.x + block_idx_offset) + threadIdx.x; //int step = gridDim.x * blockDim.x; int h, t; //for (; idx < head_len; idx += step){ if (idx >= head_len)return; h = head[idx]; t = tail[idx]; //ori //result[idx] += insect(head + index[h], head + index[h + 1], head + index[t], head + index[t + 1]);//0.199s //faster result[idx] = insect(head + index[h], head + index[h + 1], head + index[t], head + idx);//0.160s //} //if (index[h + 1] - index[h] > idx - index[t]){// 2 branch ->0.237s // result[idx] = inse(head + index[t], idx - index[t], head + index[h], index[h + 1] - index[h]);//0.319s on asitter //} //else{ //result[idx] = inse(head + index[h], index[h + 1] - index[h], head + index[t], idx - index[t]);//0.189s on asitter //} } void multikernel(int* dev_index, size_t index_len, int* dev_head, size_t head_len, int* dev_tail, int* dev_result, int block, int thread) { const int kernel_per_stream = 60; const int max_stream = 16; hipStream_t stream[max_stream];//max concurrent kernl in compute capability 3.0 for (size_t i = 0; i < max_stream; i++){ checkCudaErrors(hipStreamCreate(&stream[i])); } block = (block - 1) / max_stream + 1; //add block = (block - 1) / kernel_per_stream + 1; //checkCudaErrors(hipFuncSetCacheConfig(kernel_counting, hipFuncCachePreferL1)); //for (size_t i = 0; i < max_stream; i++){ // kernel_counting<<<block, thread, 0, stream[i]>>>(dev_index, dev_head, head_len, dev_tail, dev_result, i * block); //} //for (size_t i = 0; i < max_stream; i++){ // for (size_t j = 0; j < kernel_per_stream; j++){ // kernel_counting << <block, thread, 0, stream[i] >> >(dev_index, dev_head, head_len, dev_tail, dev_result, (i * kernel_per_stream + j) * block); // } // //checkCudaErrors(hipGetLastError()); //} // for (size_t j = 0; j < kernel_per_stream; j++){ for (size_t i = 0; i < max_stream; i++){ kernel_counting << <block, thread, 0, stream[i] >> >(dev_index, dev_head, head_len, dev_tail, dev_result, (i * kernel_per_stream + j) * block); } //checkCudaErrors(hipGetLastError()); } checkCudaErrors(hipDeviceSynchronize()); for (size_t i = 0; i < max_stream; i++){ checkCudaErrors(hipStreamDestroy(stream[i])); } } int cuda_count_triangle(int* dev_index, size_t index_len, int* dev_head, size_t head_len, int* dev_tail, int* dev_result) { int ret = 0; int thread = 256;//;256 int block = (head_len - 1) / thread + 1; CpuTime ct; ct.startTimer(); hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipEventRecord(start, 0)); checkCudaErrors(hipFuncSetCacheConfig(kernel_counting, hipFuncCachePreferL1)); kernel_counting << <block, thread >> >(dev_index, dev_head, head_len, dev_tail, dev_result); //multikernel(dev_index, index_len, dev_head, head_len, dev_tail, dev_result, block, thread); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipEventSynchronize(stop)); float time = 0; checkCudaErrors(hipEventElapsedTime(&time, start, stop)); std::cout << "start to reduce" << std::endl; ret = thrust::reduce(thrust::device_ptr<int>(dev_result), thrust::device_ptr<int>(dev_result + head_len)); std::cout << "time profiled by cudaEvent: " << time / 1000 << "s" << std::endl; ct.stopAndPrint("!!!cuda_count_triangle!!!"); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); return ret; } int para_cuda_forward(std::vector<int>& index, std::vector<int>& head, std::vector<int>& tail) { int ret = 0; int* dev_index; int* dev_head; int* dev_tail; int* result; hipStream_t stream1; //hipStream_t stream2; CpuTime ct; ct.startTimer(); checkCudaErrors(hipStreamCreate(&stream1)); //checkCudaErrors(hipStreamCreate(&stream2)); checkCudaErrors(hipMalloc((void**)&result, sizeof(int) * (head.size()))); checkCudaErrors(hipMemsetAsync(result, 0, sizeof(int) * (head.size()), stream1)); checkCudaErrors(hipMalloc((void**)&dev_index, sizeof(int) * index.size())); checkCudaErrors(hipMemcpyAsync(dev_index, index.data(), sizeof(int) * index.size(), hipMemcpyHostToDevice, stream1)); checkCudaErrors(hipMalloc((void**)&dev_head, sizeof(int) * head.size())); checkCudaErrors(hipMemcpyAsync(dev_head, head.data(), sizeof(int) * head.size(), hipMemcpyHostToDevice, stream1)); checkCudaErrors(hipMalloc((void**)&dev_tail, sizeof(int) * tail.size())); checkCudaErrors(hipMemcpyAsync(dev_tail, tail.data(), sizeof(int) * tail.size(), hipMemcpyHostToDevice, stream1)); checkCudaErrors(hipStreamDestroy(stream1)); checkCudaErrors(hipDeviceSynchronize()); ct.stopAndPrint("@@@copy to device done"); ret = cuda_count_triangle(dev_index, index.size(), dev_head, head.size(), dev_tail, result); return ret; } void preprocess(std::ifstream& ifs, std::vector<int>& edge_tail_start_index, std::vector<int>& edge_head) { AdjList adjl; { OriEdgeList oel; CpuTime ct; ct.startTimer(); read_edges(ifs, oel); ct.stopAndPrint("read file"); ct.startTimer(); make_adj_list_cuda(oel, adjl); ct.stopAndPrint("build adj list"); //copyEdgeListToDev(oel, dev_edge_list); //copyAdjListToDev(adjl, dev_adj_list);//4.39s in kernel 7.969s in userspace for amazon ungraph, it's too slow //std::cout << "on device " << dev[0] << " " << dev[1] << std::endl; //std::cout << "on host " << oel.edges[0].first << " " << oel.edges[0].second << std::endl; //thrust::device_vector<thrust::device_vector<int>> dev_vec(adjl.adj_list.size()); //for (size_t i = 0; i < adjl.adj_list.size(); i++){ // dev_vec[i] = adjl.adj_list[i]; //} //std::cout << "dev_vec size: " << dev_vec.size() << std::endl; } int edge_count = 0; edge_tail_start_index.resize(adjl.adj_list.size() + 1); edge_head.reserve(adjl.edge_num); for (size_t i = 0; i < adjl.adj_list.size(); i++){ edge_tail_start_index[i] = edge_count; edge_count += adjl.adj_list[i].size(); std::copy(adjl.adj_list[i].begin(), adjl.adj_list[i].end(), std::back_inserter(edge_head)); } edge_tail_start_index.back() = edge_count; } int cuda_forward(std::ifstream& ifs) { CpuTime ct; ct.startTimer(); int ret = 0; std::vector<int> edge_tail_start_index; std::vector<int> edge_head; preprocess(ifs, edge_tail_start_index, edge_head); std::vector<int> tail; tail.resize(edge_head.size()); for (size_t i = 0; i < edge_tail_start_index.size() - 1; i++){ std::fill(tail.begin() + edge_tail_start_index[i], tail.begin() + edge_tail_start_index[i + 1], i); } //preprocessOnCuda(ifs); ct.stopAndPrint("time to preproc"); ret = para_cuda_forward(edge_tail_start_index, edge_head, tail); checkCudaErrors(hipDeviceReset()); return ret; }
2d1452dd4cb8a606834b42001580e1e3c422c767.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <thrust\reduce.h> #include <thrust\device_ptr.h> #include "Header.h" #include "helper_cuda.h" #include "timeutility.h" __device__ int insect(int* beg1, int* end1, int* beg2, int* end2) { int ret = 0; int node1 = *beg1; int node2 = *beg2; while (beg1 != end1 && beg2 != end2){ //if (node1 == node2){ // ret++; //} //if (node1 <= node2){ // node1 = *++beg1; //} //if(node1 >= node2){ // node2 = *++beg2; //} //vari //if (node1 == node2){ // ret++; // node1 = (*++beg1); // node2 = (*++beg2); //} //else if (node1 < node2){ // node1 = (*++beg1); //} //else{ // node2 = (*++beg2); //} if (node1 < node2){ node1 = *++beg1; } else if (node1 > node2){ node2 = *++beg2; } else{ node1 = *++beg1; node2 = *++beg2; ret++; } } return ret; } __device__ int inse(int* less, size_t llen, int* more, size_t mlen) { int ret = 0; for (size_t i = 0; i < llen; i++){ int beg = 0; int end = mlen; while (beg < end){ if (more[beg + (end - beg) / 2] == less[i]){ ret++; break; } else if (more[beg + (end - beg) / 2] < less[i]){ beg = beg + (end - beg) / 2 + 1; } else { end = beg + (end - beg) / 2; } } } return ret; } __global__ void kernel_counting(int* index, int* head, size_t head_len, int* tail, int* result, int block_idx_offset = 0) { int idx = blockDim.x * (blockIdx.x + block_idx_offset) + threadIdx.x; //int step = gridDim.x * blockDim.x; int h, t; //for (; idx < head_len; idx += step){ if (idx >= head_len)return; h = head[idx]; t = tail[idx]; //ori //result[idx] += insect(head + index[h], head + index[h + 1], head + index[t], head + index[t + 1]);//0.199s //faster result[idx] = insect(head + index[h], head + index[h + 1], head + index[t], head + idx);//0.160s //} //if (index[h + 1] - index[h] > idx - index[t]){// 2 branch ->0.237s // result[idx] = inse(head + index[t], idx - index[t], head + index[h], index[h + 1] - index[h]);//0.319s on asitter //} //else{ //result[idx] = inse(head + index[h], index[h + 1] - index[h], head + index[t], idx - index[t]);//0.189s on asitter //} } void multikernel(int* dev_index, size_t index_len, int* dev_head, size_t head_len, int* dev_tail, int* dev_result, int block, int thread) { const int kernel_per_stream = 60; const int max_stream = 16; cudaStream_t stream[max_stream];//max concurrent kernl in compute capability 3.0 for (size_t i = 0; i < max_stream; i++){ checkCudaErrors(cudaStreamCreate(&stream[i])); } block = (block - 1) / max_stream + 1; //add block = (block - 1) / kernel_per_stream + 1; //checkCudaErrors(cudaFuncSetCacheConfig(kernel_counting, cudaFuncCachePreferL1)); //for (size_t i = 0; i < max_stream; i++){ // kernel_counting<<<block, thread, 0, stream[i]>>>(dev_index, dev_head, head_len, dev_tail, dev_result, i * block); //} //for (size_t i = 0; i < max_stream; i++){ // for (size_t j = 0; j < kernel_per_stream; j++){ // kernel_counting << <block, thread, 0, stream[i] >> >(dev_index, dev_head, head_len, dev_tail, dev_result, (i * kernel_per_stream + j) * block); // } // //checkCudaErrors(cudaGetLastError()); //} // for (size_t j = 0; j < kernel_per_stream; j++){ for (size_t i = 0; i < max_stream; i++){ kernel_counting << <block, thread, 0, stream[i] >> >(dev_index, dev_head, head_len, dev_tail, dev_result, (i * kernel_per_stream + j) * block); } //checkCudaErrors(cudaGetLastError()); } checkCudaErrors(cudaDeviceSynchronize()); for (size_t i = 0; i < max_stream; i++){ checkCudaErrors(cudaStreamDestroy(stream[i])); } } int cuda_count_triangle(int* dev_index, size_t index_len, int* dev_head, size_t head_len, int* dev_tail, int* dev_result) { int ret = 0; int thread = 256;//;256 int block = (head_len - 1) / thread + 1; CpuTime ct; ct.startTimer(); cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaEventRecord(start, 0)); checkCudaErrors(cudaFuncSetCacheConfig(kernel_counting, cudaFuncCachePreferL1)); kernel_counting << <block, thread >> >(dev_index, dev_head, head_len, dev_tail, dev_result); //multikernel(dev_index, index_len, dev_head, head_len, dev_tail, dev_result, block, thread); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float time = 0; checkCudaErrors(cudaEventElapsedTime(&time, start, stop)); std::cout << "start to reduce" << std::endl; ret = thrust::reduce(thrust::device_ptr<int>(dev_result), thrust::device_ptr<int>(dev_result + head_len)); std::cout << "time profiled by cudaEvent: " << time / 1000 << "s" << std::endl; ct.stopAndPrint("!!!cuda_count_triangle!!!"); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); return ret; } int para_cuda_forward(std::vector<int>& index, std::vector<int>& head, std::vector<int>& tail) { int ret = 0; int* dev_index; int* dev_head; int* dev_tail; int* result; cudaStream_t stream1; //cudaStream_t stream2; CpuTime ct; ct.startTimer(); checkCudaErrors(cudaStreamCreate(&stream1)); //checkCudaErrors(cudaStreamCreate(&stream2)); checkCudaErrors(cudaMalloc((void**)&result, sizeof(int) * (head.size()))); checkCudaErrors(cudaMemsetAsync(result, 0, sizeof(int) * (head.size()), stream1)); checkCudaErrors(cudaMalloc((void**)&dev_index, sizeof(int) * index.size())); checkCudaErrors(cudaMemcpyAsync(dev_index, index.data(), sizeof(int) * index.size(), cudaMemcpyHostToDevice, stream1)); checkCudaErrors(cudaMalloc((void**)&dev_head, sizeof(int) * head.size())); checkCudaErrors(cudaMemcpyAsync(dev_head, head.data(), sizeof(int) * head.size(), cudaMemcpyHostToDevice, stream1)); checkCudaErrors(cudaMalloc((void**)&dev_tail, sizeof(int) * tail.size())); checkCudaErrors(cudaMemcpyAsync(dev_tail, tail.data(), sizeof(int) * tail.size(), cudaMemcpyHostToDevice, stream1)); checkCudaErrors(cudaStreamDestroy(stream1)); checkCudaErrors(cudaDeviceSynchronize()); ct.stopAndPrint("@@@copy to device done"); ret = cuda_count_triangle(dev_index, index.size(), dev_head, head.size(), dev_tail, result); return ret; } void preprocess(std::ifstream& ifs, std::vector<int>& edge_tail_start_index, std::vector<int>& edge_head) { AdjList adjl; { OriEdgeList oel; CpuTime ct; ct.startTimer(); read_edges(ifs, oel); ct.stopAndPrint("read file"); ct.startTimer(); make_adj_list_cuda(oel, adjl); ct.stopAndPrint("build adj list"); //copyEdgeListToDev(oel, dev_edge_list); //copyAdjListToDev(adjl, dev_adj_list);//4.39s in kernel 7.969s in userspace for amazon ungraph, it's too slow //std::cout << "on device " << dev[0] << " " << dev[1] << std::endl; //std::cout << "on host " << oel.edges[0].first << " " << oel.edges[0].second << std::endl; //thrust::device_vector<thrust::device_vector<int>> dev_vec(adjl.adj_list.size()); //for (size_t i = 0; i < adjl.adj_list.size(); i++){ // dev_vec[i] = adjl.adj_list[i]; //} //std::cout << "dev_vec size: " << dev_vec.size() << std::endl; } int edge_count = 0; edge_tail_start_index.resize(adjl.adj_list.size() + 1); edge_head.reserve(adjl.edge_num); for (size_t i = 0; i < adjl.adj_list.size(); i++){ edge_tail_start_index[i] = edge_count; edge_count += adjl.adj_list[i].size(); std::copy(adjl.adj_list[i].begin(), adjl.adj_list[i].end(), std::back_inserter(edge_head)); } edge_tail_start_index.back() = edge_count; } int cuda_forward(std::ifstream& ifs) { CpuTime ct; ct.startTimer(); int ret = 0; std::vector<int> edge_tail_start_index; std::vector<int> edge_head; preprocess(ifs, edge_tail_start_index, edge_head); std::vector<int> tail; tail.resize(edge_head.size()); for (size_t i = 0; i < edge_tail_start_index.size() - 1; i++){ std::fill(tail.begin() + edge_tail_start_index[i], tail.begin() + edge_tail_start_index[i + 1], i); } //preprocessOnCuda(ifs); ct.stopAndPrint("time to preproc"); ret = para_cuda_forward(edge_tail_start_index, edge_head, tail); checkCudaErrors(cudaDeviceReset()); return ret; }
cd3e6292aaf0344188658e4c3361dc5a6fc24780.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "local_contrast_subtractive_2d_layer_hessian_cuda.h" #include "../local_contrast_subtractive_layer.h" #include "../nn_types.h" #include "util_cuda.h" __global__ void local_contrast_subtractive_2d_blur_horizontal_hess_kernel( const float * __restrict input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights, int input_feature_map_count, int affected_feature_map_count, int window_width, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights; float res = *current_input * *current_weights; #pragma unroll 4 for(int i = 1; i < window_width; ++i) { current_weights++; if (i < x + 1) current_input_low--; if (i > x + 1) current_input_low++; if (i < width - x) current_input_high++; if (i > width - x) current_input_high--; res += (*current_input_low + *current_input_high) * *current_weights; } output[(z * height + y) * width + x] = res; } } template<int WINDOW_WIDTH> __global__ void local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel( const float * __restrict input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights, int input_feature_map_count, int affected_feature_map_count, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights; float res = *current_input * *current_weights; #pragma unroll for(int i = 1; i < WINDOW_WIDTH; ++i) { current_weights++; if (i < x + 1) current_input_low--; if (i > x + 1) current_input_low++; if (i < width - x) current_input_high++; if (i > width - x) current_input_high--; res += (*current_input_low + *current_input_high) * *current_weights; } output[(z * height + y) * width + x] = res; } } __global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_hess_kernel( const float * __restrict input, const float * __restrict original_input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights, int input_feature_map_count, int affected_feature_map_count, int window_height, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)((z * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights; float res = *current_input * *current_weights; #pragma unroll 4 for(int i = 1; i < window_height; ++i) { current_weights++; if (i < y + 1) current_input_low -= width; if (i > y + 1) current_input_low += width; if (i < height - y) current_input_high += width; if (i > height - y) current_input_high -= width; res += (*current_input_low + *current_input_high) * *current_weights; } int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x; output[offset] = original_input[offset] - res; } } template<int WINDOW_HEIGHT> __global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel( const float * __restrict input, const float * __restrict original_input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights, int input_feature_map_count, int affected_feature_map_count, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)((z * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights; float res = *current_input * *current_weights; #pragma unroll for(int i = 1; i < WINDOW_HEIGHT; ++i) { current_weights++; if (i < y + 1) current_input_low -= width; if (i > y + 1) current_input_low += width; if (i < height - y) current_input_high += width; if (i > height - y) current_input_high -= width; res += (*current_input_low + *current_input_high) * *current_weights; } int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x; output[offset] = original_input[offset] - res; } } __global__ void local_contrast_subtractive_2d_copy_unaffected_hess_kernel( const float * __restrict original_input, float * __restrict output, const unsigned int * __restrict unaffected_feature_map_list, int input_feature_map_count, int unaffected_feature_map_count, int elem_count_per_fature_map, int entry_count) { int elem_id = blockIdx.x * blockDim.x + threadIdx.x; int unaffected_feature_map_index = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; bool in_bounds = (elem_id < elem_count_per_fature_map) && (unaffected_feature_map_index < unaffected_feature_map_count) && (entry_id < entry_count); if (in_bounds) { int unaffected_feature_map_id = unaffected_feature_map_list[unaffected_feature_map_index]; int offset = (entry_id * input_feature_map_count + unaffected_feature_map_id) * elem_count_per_fature_map + elem_id; output[offset] = original_input[offset]; } } __global__ void local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_hess_kernel( const float * __restrict input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights_squared, float central_mult, int input_feature_map_count, int affected_feature_map_count, int window_height, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)((z * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights_squared; float res = *current_input * *current_weights; #pragma unroll 4 for(int i = 1; i < window_height; ++i) { current_weights++; if (i < y + 1) current_input_low -= width; if (i > y + 1) current_input_low += width; if (i < height - y) current_input_high += width; if (i > height - y) current_input_high -= width; res += (*current_input_low + *current_input_high) * *current_weights; } int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x; output[offset] = output[offset] * central_mult + res; } } template<int WINDOW_HEIGHT> __global__ void local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel( const float * __restrict input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights_squared, float central_mult, int input_feature_map_count, int affected_feature_map_count, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)((z * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights_squared; float res = *current_input * *current_weights; #pragma unroll for(int i = 1; i < WINDOW_HEIGHT; ++i) { current_weights++; if (i < y + 1) current_input_low -= width; if (i > y + 1) current_input_low += width; if (i < height - y) current_input_high += width; if (i > height - y) current_input_high -= width; res += (*current_input_low + *current_input_high) * *current_weights; } int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x; output[offset] = output[offset] * central_mult + res; } } namespace nnforge { namespace cuda { local_contrast_subtractive_2d_layer_hessian_cuda::local_contrast_subtractive_2d_layer_hessian_cuda() { } local_contrast_subtractive_2d_layer_hessian_cuda::~local_contrast_subtractive_2d_layer_hessian_cuda() { } void local_contrast_subtractive_2d_layer_hessian_cuda::enqueue_test( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access( *cuda_config, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], affected_feature_map_count * entry_count); switch(half_window_sizes[0]) { case 1: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<1>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 2: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<2>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 3: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<3>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 4: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<4>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 5: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<5>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 6: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<6>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 7: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<7>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 8: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<8>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 9: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<9>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 10: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<10>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; default: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_hess_kernel), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, half_window_sizes[0], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; } std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access( *cuda_config, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], affected_feature_map_count * entry_count); switch(half_window_sizes[1]) { case 1: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<1>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 2: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<2>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 3: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<3>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 4: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<4>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 5: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<5>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 6: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<6>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 7: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<7>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 8: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<8>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 9: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<9>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 10: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<10>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; default: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_hess_kernel), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, half_window_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; } if (unaffected_feature_map_count > 0) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access( *cuda_config, input_elem_count_per_feature_map, unaffected_feature_map_count, entry_count); hipLaunchKernelGGL(( local_contrast_subtractive_2d_copy_unaffected_hess_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_neurons_buffer, *output_neurons_buffer, *schema_data[5], input_configuration_specific.feature_map_count, unaffected_feature_map_count, input_elem_count_per_feature_map, entry_count); } } void local_contrast_subtractive_2d_layer_hessian_cuda::enqueue_backprop( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access( *cuda_config, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], affected_feature_map_count * entry_count); switch(half_window_sizes[0]) { case 1: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<1>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 2: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<2>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 3: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<3>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 4: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<4>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 5: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<5>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 6: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<6>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 7: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<7>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 8: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<8>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 9: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<9>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 10: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<10>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; default: hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_hess_kernel), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, half_window_sizes[0], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; } std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access( *cuda_config, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], affected_feature_map_count * entry_count); switch(half_window_sizes[1]) { case 1: hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<1>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 2: hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<2>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 3: hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<3>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 4: hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<4>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 5: hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<5>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 6: hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<6>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 7: hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<7>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 8: hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<8>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 9: hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<9>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 10: hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<10>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; default: hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_hess_kernel), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, half_window_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; } } void local_contrast_subtractive_2d_layer_hessian_cuda::hessian_configured() { nnforge_shared_ptr<const local_contrast_subtractive_layer> layer_derived = nnforge_dynamic_pointer_cast<const local_contrast_subtractive_layer>(layer_schema); affected_feature_map_count = static_cast<int>(layer_derived->feature_maps_affected.size()); unaffected_feature_map_count = static_cast<int>(layer_derived->feature_maps_unaffected.size()); for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it) half_window_sizes.push_back(static_cast<int>((*it + 1) >> 1)); central_mult = 1.0F - (2.0F * layer_derived->window_weights_list[0][0] * layer_derived->window_weights_list[1][0]); } std::vector<size_t> local_contrast_subtractive_2d_layer_hessian_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(input_elem_count_per_feature_map * affected_feature_map_count * sizeof(float)); return res; } bool local_contrast_subtractive_2d_layer_hessian_cuda::is_in_place_backprop() const { return true; } } }
cd3e6292aaf0344188658e4c3361dc5a6fc24780.cu
/* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "local_contrast_subtractive_2d_layer_hessian_cuda.h" #include "../local_contrast_subtractive_layer.h" #include "../nn_types.h" #include "util_cuda.h" __global__ void local_contrast_subtractive_2d_blur_horizontal_hess_kernel( const float * __restrict input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights, int input_feature_map_count, int affected_feature_map_count, int window_width, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights; float res = *current_input * *current_weights; #pragma unroll 4 for(int i = 1; i < window_width; ++i) { current_weights++; if (i < x + 1) current_input_low--; if (i > x + 1) current_input_low++; if (i < width - x) current_input_high++; if (i > width - x) current_input_high--; res += (*current_input_low + *current_input_high) * *current_weights; } output[(z * height + y) * width + x] = res; } } template<int WINDOW_WIDTH> __global__ void local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel( const float * __restrict input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights, int input_feature_map_count, int affected_feature_map_count, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights; float res = *current_input * *current_weights; #pragma unroll for(int i = 1; i < WINDOW_WIDTH; ++i) { current_weights++; if (i < x + 1) current_input_low--; if (i > x + 1) current_input_low++; if (i < width - x) current_input_high++; if (i > width - x) current_input_high--; res += (*current_input_low + *current_input_high) * *current_weights; } output[(z * height + y) * width + x] = res; } } __global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_hess_kernel( const float * __restrict input, const float * __restrict original_input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights, int input_feature_map_count, int affected_feature_map_count, int window_height, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)((z * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights; float res = *current_input * *current_weights; #pragma unroll 4 for(int i = 1; i < window_height; ++i) { current_weights++; if (i < y + 1) current_input_low -= width; if (i > y + 1) current_input_low += width; if (i < height - y) current_input_high += width; if (i > height - y) current_input_high -= width; res += (*current_input_low + *current_input_high) * *current_weights; } int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x; output[offset] = original_input[offset] - res; } } template<int WINDOW_HEIGHT> __global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel( const float * __restrict input, const float * __restrict original_input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights, int input_feature_map_count, int affected_feature_map_count, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)((z * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights; float res = *current_input * *current_weights; #pragma unroll for(int i = 1; i < WINDOW_HEIGHT; ++i) { current_weights++; if (i < y + 1) current_input_low -= width; if (i > y + 1) current_input_low += width; if (i < height - y) current_input_high += width; if (i > height - y) current_input_high -= width; res += (*current_input_low + *current_input_high) * *current_weights; } int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x; output[offset] = original_input[offset] - res; } } __global__ void local_contrast_subtractive_2d_copy_unaffected_hess_kernel( const float * __restrict original_input, float * __restrict output, const unsigned int * __restrict unaffected_feature_map_list, int input_feature_map_count, int unaffected_feature_map_count, int elem_count_per_fature_map, int entry_count) { int elem_id = blockIdx.x * blockDim.x + threadIdx.x; int unaffected_feature_map_index = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; bool in_bounds = (elem_id < elem_count_per_fature_map) && (unaffected_feature_map_index < unaffected_feature_map_count) && (entry_id < entry_count); if (in_bounds) { int unaffected_feature_map_id = unaffected_feature_map_list[unaffected_feature_map_index]; int offset = (entry_id * input_feature_map_count + unaffected_feature_map_id) * elem_count_per_fature_map + elem_id; output[offset] = original_input[offset]; } } __global__ void local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_hess_kernel( const float * __restrict input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights_squared, float central_mult, int input_feature_map_count, int affected_feature_map_count, int window_height, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)((z * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights_squared; float res = *current_input * *current_weights; #pragma unroll 4 for(int i = 1; i < window_height; ++i) { current_weights++; if (i < y + 1) current_input_low -= width; if (i > y + 1) current_input_low += width; if (i < height - y) current_input_high += width; if (i > height - y) current_input_high -= width; res += (*current_input_low + *current_input_high) * *current_weights; } int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x; output[offset] = output[offset] * central_mult + res; } } template<int WINDOW_HEIGHT> __global__ void local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel( const float * __restrict input, float * __restrict output, const unsigned int * __restrict affected_feature_map_list, const float * __restrict weights_squared, float central_mult, int input_feature_map_count, int affected_feature_map_count, int width, int height, int entry_count) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; int entry_id = z / affected_feature_map_count; bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count); if (in_bounds) { int affected_feature_map_index = z - (entry_id * affected_feature_map_count); int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index]; const float * current_input = input + (int)((z * height + y) * width + x); const float * current_input_low = current_input; const float * current_input_high = current_input; const float * current_weights = weights_squared; float res = *current_input * *current_weights; #pragma unroll for(int i = 1; i < WINDOW_HEIGHT; ++i) { current_weights++; if (i < y + 1) current_input_low -= width; if (i > y + 1) current_input_low += width; if (i < height - y) current_input_high += width; if (i > height - y) current_input_high -= width; res += (*current_input_low + *current_input_high) * *current_weights; } int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x; output[offset] = output[offset] * central_mult + res; } } namespace nnforge { namespace cuda { local_contrast_subtractive_2d_layer_hessian_cuda::local_contrast_subtractive_2d_layer_hessian_cuda() { } local_contrast_subtractive_2d_layer_hessian_cuda::~local_contrast_subtractive_2d_layer_hessian_cuda() { } void local_contrast_subtractive_2d_layer_hessian_cuda::enqueue_test( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access( *cuda_config, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], affected_feature_map_count * entry_count); switch(half_window_sizes[0]) { case 1: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<1><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 2: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<2><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 3: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<3><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 4: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<4><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 5: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<5><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 6: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<6><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 7: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<7><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 8: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<8><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 9: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<9><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 10: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<10><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; default: local_contrast_subtractive_2d_blur_horizontal_hess_kernel<<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>( *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, half_window_sizes[0], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; } std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access( *cuda_config, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], affected_feature_map_count * entry_count); switch(half_window_sizes[1]) { case 1: local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<1><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 2: local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<2><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 3: local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<3><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 4: local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<4><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 5: local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<5><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 6: local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<6><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 7: local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<7><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 8: local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<8><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 9: local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<9><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 10: local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<10><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; default: local_contrast_subtractive_2d_blur_vertical_and_subtract_hess_kernel<<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>( *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, half_window_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; } if (unaffected_feature_map_count > 0) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access( *cuda_config, input_elem_count_per_feature_map, unaffected_feature_map_count, entry_count); local_contrast_subtractive_2d_copy_unaffected_hess_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_neurons_buffer, *output_neurons_buffer, *schema_data[5], input_configuration_specific.feature_map_count, unaffected_feature_map_count, input_elem_count_per_feature_map, entry_count); } } void local_contrast_subtractive_2d_layer_hessian_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access( *cuda_config, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], affected_feature_map_count * entry_count); switch(half_window_sizes[0]) { case 1: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<1><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 2: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<2><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 3: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<3><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 4: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<4><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 5: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<5><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 6: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<6><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 7: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<7><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 8: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<8><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 9: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<9><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 10: local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<10><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; default: local_contrast_subtractive_2d_blur_horizontal_hess_kernel<<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>( *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, half_window_sizes[0], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; } std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access( *cuda_config, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], affected_feature_map_count * entry_count); switch(half_window_sizes[1]) { case 1: local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<1><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 2: local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<2><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 3: local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<3><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 4: local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<4><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 5: local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<5><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 6: local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<6><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 7: local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<7><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 8: local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<8><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 9: local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<9><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; case 10: local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<10><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; default: local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_hess_kernel<<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>( *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, half_window_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count); break; } } void local_contrast_subtractive_2d_layer_hessian_cuda::hessian_configured() { nnforge_shared_ptr<const local_contrast_subtractive_layer> layer_derived = nnforge_dynamic_pointer_cast<const local_contrast_subtractive_layer>(layer_schema); affected_feature_map_count = static_cast<int>(layer_derived->feature_maps_affected.size()); unaffected_feature_map_count = static_cast<int>(layer_derived->feature_maps_unaffected.size()); for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it) half_window_sizes.push_back(static_cast<int>((*it + 1) >> 1)); central_mult = 1.0F - (2.0F * layer_derived->window_weights_list[0][0] * layer_derived->window_weights_list[1][0]); } std::vector<size_t> local_contrast_subtractive_2d_layer_hessian_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(input_elem_count_per_feature_map * affected_feature_map_count * sizeof(float)); return res; } bool local_contrast_subtractive_2d_layer_hessian_cuda::is_in_place_backprop() const { return true; } } }
2ebd54d200bc19d5c5d312ffc63d3693e4161d88.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "__dds.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int nrows = 1; int nnz = 1; double *A = NULL; hipMalloc(&A, XSIZE*YSIZE); double *B = NULL; hipMalloc(&B, XSIZE*YSIZE); int *Cir = NULL; hipMalloc(&Cir, XSIZE*YSIZE); int *Cic = NULL; hipMalloc(&Cic, XSIZE*YSIZE); double *P = NULL; hipMalloc(&P, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( __dds), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,nnz,A,B,Cir,Cic,P); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( __dds), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,nnz,A,B,Cir,Cic,P); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( __dds), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,nnz,A,B,Cir,Cic,P); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2ebd54d200bc19d5c5d312ffc63d3693e4161d88.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "__dds.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int nrows = 1; int nnz = 1; double *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); double *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); int *Cir = NULL; cudaMalloc(&Cir, XSIZE*YSIZE); int *Cic = NULL; cudaMalloc(&Cic, XSIZE*YSIZE); double *P = NULL; cudaMalloc(&P, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); __dds<<<gridBlock,threadBlock>>>(nrows,nnz,A,B,Cir,Cic,P); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { __dds<<<gridBlock,threadBlock>>>(nrows,nnz,A,B,Cir,Cic,P); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { __dds<<<gridBlock,threadBlock>>>(nrows,nnz,A,B,Cir,Cic,P); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a9100be43551c67fa086263e1d12005faf427615.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> //#include "mat_mult_from_doc.h" #include "mat_mult_cublas.h" #include "mat_mult_gpu.h" #include "mat_mult_seq.h" #include "mat_mult_shared.h" void Usage(int retVal, char *argv0) { fprintf(retVal == 0? stdout : stderr, "USAGE: %s <matrix A input file> <matrix B input file> " "<alpha> <S,G,C> <output matrix file> [o]\n", argv0); exit(retVal); } int GetInputs(int argc, char *argv[], Matrix *a, Matrix *b, float *alpha, char *which, int *useOldFormat) { int trans = MATRIX_FILE_NO_TRANSPOSE; *which = argv[4][0]; if (*which == 'C') trans = MATRIX_FILE_TRANSPOSE; if (argc < 6) RET_ERROR("must have at least 5 cmd line args"); *useOldFormat = 0; if (argc > 6 && strncmp(argv[6], "o", 2) == 0) *useOldFormat = 1; if (*useOldFormat) { if (MatrixFromFile(argv[1], a, trans) != SUCCESS) RET_ERROR("could not read matrix A"); if (MatrixFromFile(argv[2], b, trans) != SUCCESS) RET_ERROR("could not read matrix B"); } else { if (MatrixFromCOOFile(argv[1], a, trans) != SUCCESS) RET_ERROR("could not read matrix A"); if (MatrixFromCOOFile(argv[2], b, trans) != SUCCESS) RET_ERROR("could not read matrix B"); } *alpha = strtof(argv[3], (char**)NULL); return SUCCESS; } int main(int argc, char *argv[]) { Matrix A, B, C; Matrix *out; float alpha; char which; int useOldFormat; int trans = MATRIX_FILE_NO_TRANSPOSE; if (GetInputs(argc, argv, &A, &B, &alpha, &which, &useOldFormat) != SUCCESS) Usage(1, argv[0]); if (which == 'C') { printf("Using CUBLAS implementation\n"); out = &B; trans = MATRIX_FILE_TRANSPOSE; if (MatMultCublas(A, B, alpha) != SUCCESS) RET_ERROR("MatMultCublas failed"); } else { if (which == 'G') { printf("Using GPU implementation\n"); out = &B; if (MatMultGPU(A, B, alpha) != SUCCESS) RET_ERROR("MatMultGPU failed"); } else { out = &C; C.height = A.height; C.width = B.width; C.stride = C.width; hipError_t r; r = hipHostMalloc(&C.els, C.width * C.height * sizeof(C.els[0])); if (r != hipSuccess) if (! (C.els = (float*) malloc(C.width*C.height*sizeof(C.els[0])))) RET_ERROR("couldn't allocate host mem for C matrix"); printf("Using sequential implementation\n"); if (MatMultSeq(&A, &B, &C, alpha) != SUCCESS) RET_ERROR("MatMultSeq failed"); } } if (useOldFormat) { if (MatrixToFile(argv[5], out, trans) != SUCCESS) RET_ERROR("could not write result matrix to %s", argv[5]); } else { if (MatrixToCOOFile(argv[5], out, trans) != SUCCESS) RET_ERROR("could not write result matrix to %s", argv[5]); } return 0; }
a9100be43551c67fa086263e1d12005faf427615.cu
#include <stdio.h> //#include "mat_mult_from_doc.h" #include "mat_mult_cublas.h" #include "mat_mult_gpu.h" #include "mat_mult_seq.h" #include "mat_mult_shared.h" void Usage(int retVal, char *argv0) { fprintf(retVal == 0? stdout : stderr, "USAGE: %s <matrix A input file> <matrix B input file> " "<alpha> <S,G,C> <output matrix file> [o]\n", argv0); exit(retVal); } int GetInputs(int argc, char *argv[], Matrix *a, Matrix *b, float *alpha, char *which, int *useOldFormat) { int trans = MATRIX_FILE_NO_TRANSPOSE; *which = argv[4][0]; if (*which == 'C') trans = MATRIX_FILE_TRANSPOSE; if (argc < 6) RET_ERROR("must have at least 5 cmd line args"); *useOldFormat = 0; if (argc > 6 && strncmp(argv[6], "o", 2) == 0) *useOldFormat = 1; if (*useOldFormat) { if (MatrixFromFile(argv[1], a, trans) != SUCCESS) RET_ERROR("could not read matrix A"); if (MatrixFromFile(argv[2], b, trans) != SUCCESS) RET_ERROR("could not read matrix B"); } else { if (MatrixFromCOOFile(argv[1], a, trans) != SUCCESS) RET_ERROR("could not read matrix A"); if (MatrixFromCOOFile(argv[2], b, trans) != SUCCESS) RET_ERROR("could not read matrix B"); } *alpha = strtof(argv[3], (char**)NULL); return SUCCESS; } int main(int argc, char *argv[]) { Matrix A, B, C; Matrix *out; float alpha; char which; int useOldFormat; int trans = MATRIX_FILE_NO_TRANSPOSE; if (GetInputs(argc, argv, &A, &B, &alpha, &which, &useOldFormat) != SUCCESS) Usage(1, argv[0]); if (which == 'C') { printf("Using CUBLAS implementation\n"); out = &B; trans = MATRIX_FILE_TRANSPOSE; if (MatMultCublas(A, B, alpha) != SUCCESS) RET_ERROR("MatMultCublas failed"); } else { if (which == 'G') { printf("Using GPU implementation\n"); out = &B; if (MatMultGPU(A, B, alpha) != SUCCESS) RET_ERROR("MatMultGPU failed"); } else { out = &C; C.height = A.height; C.width = B.width; C.stride = C.width; cudaError_t r; r = cudaMallocHost(&C.els, C.width * C.height * sizeof(C.els[0])); if (r != cudaSuccess) if (! (C.els = (float*) malloc(C.width*C.height*sizeof(C.els[0])))) RET_ERROR("couldn't allocate host mem for C matrix"); printf("Using sequential implementation\n"); if (MatMultSeq(&A, &B, &C, alpha) != SUCCESS) RET_ERROR("MatMultSeq failed"); } } if (useOldFormat) { if (MatrixToFile(argv[5], out, trans) != SUCCESS) RET_ERROR("could not write result matrix to %s", argv[5]); } else { if (MatrixToCOOFile(argv[5], out, trans) != SUCCESS) RET_ERROR("could not write result matrix to %s", argv[5]); } return 0; }
b5b149d115adba3f64157e0818e24f844540dce9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <bits/stdc++.h> #include <algorithm> #include <mpi.h> #include <thrust/extrema.h> #include <thrust/device_vector.h> #define CSC(call) \ do { \ hipError_t res = call; \ if (res != hipSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, hipGetErrorString(res)); \ exit(0); \ } \ } while(0) // // 3d 1d #define _i(i, j, k) (((k) + 1) * (ny + 2) * (nx + 2) + ((j) + 1) * (nx + 2) + (i) + 1) #define _ixy(i, j) ((j) * nx + (i)) #define _ixz(i, k) ((k) * nx + (i)) #define _iyz(j, k) ((k) * ny + (j)) // () #define _ib(i, j, k) ((k) * nby * nbx + (j) * nbx + (i)) #define _ibz(id) ((id) / nby / nbx) #define _iby(id) (((id) % (nby * nbx)) / nbx) #define _ibx(id) ((id) % nbx) __global__ void kernel_copy_yz(double *plane_yz, double *data, int nx, int ny, int nz, int i, int dir, int bc) { int idy = blockIdx.y * blockDim.y + threadIdx.y; int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.x * gridDim.x; int j, k; if (dir) { for (k = idy; k < nz; k += offsety) for (j = idx; j < ny; j += offsetx) plane_yz[_iyz(j, k)] = data[_i(i, j, k)]; } else { if (plane_yz) { for (k = idy; k < nz; k += offsety) for (j = idx; j < ny; j += offsetx) data[_i(i, j, k)] = plane_yz[_iyz(j, k)]; } else { for (k = idy; k < nz; k += offsety) for (j = idx; j < ny; j += offsetx) data[_i(i, j, k)] = bc; } } } __global__ void kernel_copy_xz(double *plane_xz, double *data, int nx, int ny, int nz, int j, int dir, int bc) { int idy = blockIdx.y * blockDim.y + threadIdx.y; int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.x * gridDim.x; int i, k; if (dir) { for (k = idy; k < nz; k += offsety) for (i = idx; i < nx; i += offsetx) plane_xz[_ixz(i, k)] = data[_i(i, j, k)]; } else { if (plane_xz) { for (k = idy; k < nz; k += offsety) for (i = idx; i < nx; i += offsetx) data[_i(i, j, k)] = plane_xz[_ixz(i, k)]; } else { for (k = idy; k < nz; k += offsety) for (i = idx; i < nx; i += offsetx) data[_i(i, j, k)] = bc; } } } __global__ void kernel_copy_xy(double *plane_xy, double *data, int nx, int ny, int nz, int k, int dir, int bc) { int idy = blockIdx.y * blockDim.y + threadIdx.y; int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.x * gridDim.x; int i, j; if (dir) { for (j = idy; j < ny; j += offsety) for (i = idx; i < nx; i += offsetx) plane_xy[_ixy(i, j)] = data[_i(i, j, k)]; } else { if (plane_xy) { for (j = idy; j < ny; j += offsety) for (i = idx; i < nx; i += offsetx) data[_i(i, j, k)] = plane_xy[_ixy(i, j)]; } else { for (j = idy; j < ny; j += offsety) for (i = idx; i < nx; i += offsetx) data[_i(i, j, k)] = bc; } } } __global__ void kernel(double *next, double *data, int nx, int ny, int nz, double hx, double hy, double hz) { int idz = blockIdx.z * blockDim.z + threadIdx.z; int idy = blockIdx.y * blockDim.y + threadIdx.y; int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetz = blockDim.z * gridDim.z; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.x * gridDim.x; int i, j, k; for (i = idx; i < nx; i += offsetx) for (j = idy; j < ny; j += offsety) for (k = idz; k < nz; k += offsetz) { next[_i(i, j, k)] = 0.5 * ((data[_i(i + 1, j, k)] + data[_i(i - 1, j, k)]) / (hx * hx) + (data[_i(i, j + 1, k)] + data[_i(i, j - 1, k)]) / (hy * hy) + (data[_i(i, j, k + 1)] + data[_i(i, j, k - 1)]) / (hz * hz)) / (1.0 / (hx * hx) + 1.0 / (hy * hy) + 1.0 / (hz * hz)); } } __global__ void kernel_error(double *next, double *data, int nx, int ny, int nz) { int idz = blockIdx.z * blockDim.z + threadIdx.z; int idy = blockIdx.y * blockDim.y + threadIdx.y; int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetz = blockDim.z * gridDim.z; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.x * gridDim.x; int i, j, k; for (i = idx - 1; i < nx + 1; i += offsetx) for (j = idy - 1; j < ny + 1; j += offsety) for (k = idz - 1; k < nz + 1; k += offsetz) { data[_i(i, j, k)] = ((i != -1 && j != -1 && k != -1 && i != nx && j != ny && k != nz)) * fabs(next[_i(i, j, k)] - data[_i(i, j, k)]); } } int main(int argc, char *argv[]) { std::ios_base::sync_with_stdio(false); std::cin.tie(NULL); std::cout.tie(NULL); int id, ib, jb, kb, nbx, nby, nbz, nx, ny, nz, it = 0; int i, j, k; int numproc, proc_name_len; char proc_name[MPI_MAX_PROCESSOR_NAME]; double lx, ly, lz, hx, hy, hz, bc_down, bc_up, bc_left, bc_right, bc_front, bc_back, eps, u_0; double *data, *temp, *next, *buff; double *dev_data, *dev_next, *dev_buff; char file_name[1024]; MPI_Status status; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Get_processor_name(proc_name, &proc_name_len); if (id == 0) { // std::cin >> nbx >> nby >> nbz; // () std::cin >> nx >> ny >> nz; // std::cin >> file_name; std::cin >> eps; std::cin >> lx >> ly >> lz; std::cin >> bc_down >> bc_up >> bc_left >> bc_right >> bc_front >> bc_back; std::cin >> u_0; std::cerr << nbx << " " << nby << " " << nbz << std::endl; std::cerr << nx << " " << ny << " " << nz << std::endl; std::cerr << file_name << std::endl; std::cerr << eps << std::endl; std::cerr << lx << " " << ly << " " << lz << " " << std::endl; std::cerr << bc_down << " " << bc_up << " " << bc_left << " " << bc_right << " " << bc_front << " " << bc_back << std::endl; std::cerr << u_0 << std::endl; } MPI_Bcast(&nx, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI_Bcast(&ny, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI_Bcast(&nz, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI_Bcast(&nbx, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nby, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nbz, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&lx, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&ly, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&lz, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_down, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_up, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_left, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_right, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_front, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_back, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&eps, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&u_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(file_name, 1024, MPI_CHAR, 0, MPI_COMM_WORLD); ib = _ibx(id); // 3- jb = _iby(id); kb = _ibz(id); hx = lx / (nx * nbx); hy = ly / (ny * nby); hz = lz / (nz * nbz); int _size_b = (nx + 2) * (ny + 2) * (nz + 2); int _size_plane = (::max(nx, ::max(ny, nz)) * ::max(nx, ::max(ny, nz)) + 2); data = (double *) malloc(sizeof(double) * _size_b); // double *_data = (double *) malloc(sizeof(double) * _size_b); next = (double *) malloc(sizeof(double) * _size_b); buff = (double *) malloc(sizeof(double) * _size_plane); CSC(hipMalloc(&dev_data, sizeof(double) * _size_b)); CSC(hipMalloc(&dev_next, sizeof(double) * _size_b)); CSC(hipMalloc(&dev_buff, sizeof(double) * _size_plane)); for (i = 0; i < nx; i++) // for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) data[_i(i, j, k)] = u_0; CSC(hipMemcpy(dev_data, data, sizeof(double) * _size_b, hipMemcpyHostToDevice)); dim3 blocks(32, 32); dim3 threads(32, 32); for (; true; it++) { // diff = 0; // if (ib + 1 < nbx) { hipLaunchKernelGGL(( kernel_copy_yz), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, nx - 1, true, 0.0); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, sizeof(double) * _size_plane, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // std::cerr << buff[_iyz(j, k)] << " " << data[_i(nx - 1, j, k)] << "\n"; // buff[_iyz(j, k)] = data[_i(nx - 1, j, k)]; // } MPI_Send(buff, ny * nz, MPI_DOUBLE, _ib(ib + 1, jb, kb), id, MPI_COMM_WORLD); } if (ib > 0) { MPI_Recv(buff, ny * nz, MPI_DOUBLE, _ib(ib - 1, jb, kb), _ib(ib - 1, jb, kb), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, sizeof(double) * _size_plane, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel_copy_yz), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, -1, false, 0.0); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // std::cerr << _data[_i(-1, j, k)] << " " << buff[_iyz(j, k)] << "\n"; // data[_i(-1, j, k)] = buff[_iyz(j, k)]; // } } else { hipLaunchKernelGGL(( kernel_copy_yz), dim3(blocks), dim3(threads), 0, 0, NULL, dev_data, nx, ny, nz, -1, false, bc_left); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // if(_data[_i(-1, j, k)] != bc_left) // std::cerr << "bc_left: " << _data[_i(-1, j, k)] << " " << bc_left << "\n"; // data[_i(-1, j, k)] = bc_left; // } } if (jb + 1 < nby) { hipLaunchKernelGGL(( kernel_copy_xz), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, ny - 1, true, 0.0); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, sizeof(double) * _size_plane, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (i = 0; i < nx; i++) { // std::cerr << buff[_ixz(i, k)] << " " << data[_i(i, ny - 1, k)] << "\n"; // buff[_ixz(i, k)] = data[_i(i, ny - 1, k)]; // } MPI_Send(buff, nz * nx, MPI_DOUBLE, _ib(ib, jb + 1, kb), id, MPI_COMM_WORLD); } if (jb > 0) { MPI_Recv(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb - 1, kb), _ib(ib, jb - 1, kb), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, sizeof(double) * _size_plane, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel_copy_xz), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, -1, false, 0.0); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (i = 0; i < nx; i++) { // std::cerr << _data[_i(i, -1, k)] << " " << buff[_ixz(i, k)] << "\n"; // data[_i(i, -1, k)] = buff[_ixz(i, k)]; // } } else { hipLaunchKernelGGL(( kernel_copy_xz), dim3(blocks), dim3(threads), 0, 0, NULL, dev_data, nx, ny, nz, -1, false, bc_front); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (i = 0; i < nx; i++) { // if(_data[_i(i, -1, k)] != bc_front) // std::cerr << "bc_front: " << _data[_i(i, -1, k)] << " " << bc_front << "\n"; // data[_i(i, -1, k)] = bc_front; // } } if (kb + 1 < nbz) { hipLaunchKernelGGL(( kernel_copy_xy), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, nz - 1, true, 0.0); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, sizeof(double) * _size_plane, hipMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; i++) { // if(buff[_ixy(i, j)] != data[_i(i, j, nz - 1)]) // std::cerr << buff[_ixy(i, j)] << " " << data[_i(i, j, nz - 1)] << "\n"; // buff[_ixy(i, j)] = data[_i(i, j, nz - 1)]; // } MPI_Send(buff, ny * nx, MPI_DOUBLE, _ib(ib, jb, kb + 1), id, MPI_COMM_WORLD); } if (kb > 0) { MPI_Recv(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb - 1), _ib(ib, jb, kb - 1), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, sizeof(double) * _size_plane, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel_copy_xy), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, -1, false, 0.0); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; i++) { // if(_data[_i(i, j, -1)] != buff[_ixy(i, j)]) { // std::cerr << _data[_i(i, j, -1)] << " " << buff[_ixy(i, j)] << "\n"; // } // data[_i(i, j, -1)] = buff[_ixy(i, j)]; // } } else { hipLaunchKernelGGL(( kernel_copy_xy), dim3(blocks), dim3(threads), 0, 0, NULL, dev_data, nx, ny, nz, -1, false, bc_down); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; ++i) { // if(_data[_i(i, j, -1)] != bc_down) { // std::cerr << _data[_i(i, j, -1)] << " " << bc_down << "\n"; // } // data[_i(i, j, -1)] = bc_down; // } } if (ib > 0) { hipLaunchKernelGGL(( kernel_copy_yz), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, 0, true, 0.0); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, sizeof(double) * _size_plane, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // if(buff[_iyz(j, k)] != data[_i(0, j, k)]) // std::cerr << buff[_iyz(j, k)] << " " << data[_i(0, j, k)] << "\n"; // buff[_iyz(j, k)] = data[_i(0, j, k)]; // } MPI_Send(buff, nz * ny, MPI_DOUBLE, _ib(ib - 1, jb, kb), id, MPI_COMM_WORLD); } if (ib + 1 < nbx) { MPI_Recv(buff, ny * nz, MPI_DOUBLE, _ib(ib + 1, jb, kb), _ib(ib + 1, jb, kb), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, sizeof(double) * _size_plane, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel_copy_yz), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, nx, false, 0.0); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // if (_data[_i(nx, j, k)] != buff[_iyz(j, k)]) { // std::cerr << _data[_i(nx, j, k)] << " " << buff[_iyz(j, k)] << "\n"; // } // data[_i(nx, j, k)] = buff[_iyz(j, k)]; // } } else { hipLaunchKernelGGL(( kernel_copy_yz), dim3(blocks), dim3(threads), 0, 0, NULL, dev_data, nx, ny, nz, nx, false, bc_right); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; ++j) { // if (_data[_i(nx, j, k)] != bc_right) { // std::cerr << _data[_i(nx, j, k)] << " " << bc_right << "\n"; // } // data[_i(nx, j, k)] = bc_right; // } } if (jb > 0) { hipLaunchKernelGGL(( kernel_copy_xz), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, 0, true, 0.0); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, sizeof(double) * _size_plane, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (i = 0; i < nx; i++) { // if (buff[_ixz(i, k)] != data[_i(i, 0, k)]) { // std::cerr << buff[_ixz(i, k)] << " " << data[_i(i, 0, k)] << "\n"; // } // buff[_ixz(i, k)] = data[_i(i, 0, k)]; // } MPI_Send(buff, nz * nx, MPI_DOUBLE, _ib(ib, jb - 1, kb), id, MPI_COMM_WORLD); } if (jb + 1 < nby) { MPI_Recv(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb + 1, kb), _ib(ib, jb + 1, kb), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, sizeof(double) * _size_plane, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel_copy_xz), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, ny, false, 0.0); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; k++) // for (i = 0; i < nx; i++) { // if (_data[_i(i, ny, k)] != buff[_ixz(i, k)]) { // std::cerr << _data[_i(i, ny, k)] << " " << buff[_ixz(i, k)] << "\n"; // } // data[_i(i, ny, k)] = buff[_ixz(i, k)]; // } } else { hipLaunchKernelGGL(( kernel_copy_xz), dim3(blocks), dim3(threads), 0, 0, NULL, dev_data, nx, ny, nz, ny, false, bc_back); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (k = 0; k < nz; k++) // for (i = 0; i < nx; i++) { // if (_data[_i(i, ny, k)] != bc_back) { // std::cerr << _data[_i(i, ny, k)] << " " << bc_back << "\n"; // } // data[_i(i, ny, k)] = bc_back; // } } if (kb > 0) { hipLaunchKernelGGL(( kernel_copy_xy), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, 0, true, 0.0); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, sizeof(double) * _size_plane, hipMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; i++) { // if(buff[_ixy(i, j)] != data[_i(i, j, 0)]) { // std::cerr << buff[_ixy(i, j)] << " " << data[_i(i, j, 0)] << "\n"; // } // buff[_ixy(i, j)] = data[_i(i, j, 0)]; // } MPI_Send(buff, ny * nx, MPI_DOUBLE, _ib(ib, jb, kb - 1), id, MPI_COMM_WORLD); } if (kb + 1 < nbz) { MPI_Recv(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb + 1), _ib(ib, jb, kb + 1), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, sizeof(double) * _size_plane, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel_copy_xy), dim3(blocks), dim3(threads), 0, 0, dev_buff, dev_data, nx, ny, nz, nz, false, 0.0); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; i++) { // if(_data[_i(i, j, nz)] != buff[_ixy(i, j)]) { // std::cerr << _data[_i(i, j, nz)] << " " << buff[_ixy(i, j)] << "\n"; // } // data[_i(i, j, nz)] = buff[_ixy(i, j)]; // } } else { hipLaunchKernelGGL(( kernel_copy_xy), dim3(blocks), dim3(threads), 0, 0, NULL, dev_data, nx, ny, nz, nz, false, bc_up); // CSC(hipMemcpy(_data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; ++i) { // if(_data[_i(i, j, nz)] != bc_up) { // std::cerr << "bc_up: " << _data[_i(i, j, nz)] << " " << bc_up << "\n"; // } // data[_i(i, j, nz)] = bc_up; // } } // // CSC(hipMemcpy(dev_data, data, sizeof(double) * _size_b, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel), dim3(dim3(8, 8, 8)), dim3(dim3(32, 4, 4)), 0, 0, dev_next, dev_data, nx, ny, nz, hx, hy, hz); CSC(hipGetLastError()); hipLaunchKernelGGL(( kernel_error), dim3(dim3(8, 8, 8)), dim3(dim3(32, 4, 4)), 0, 0, dev_next, dev_data, nx, ny, nz); CSC(hipGetLastError()); double error = 0.0; thrust::device_ptr<double> p_arr = thrust::device_pointer_cast(dev_data); thrust::device_ptr<double> res = thrust::max_element(p_arr, p_arr + _size_b); error = *res; temp = dev_data; dev_data = dev_next; dev_next = temp; // CSC(hipMemcpy(data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); // for (i = 0; i < nx; i++) // for (j = 0; j < ny; j++) // for (k = 0; k < nz; k++) { // next[_i(i, j, k)] = 0.5 * ((data[_i(i + 1, j, k)] + data[_i(i - 1, j, k)]) / (hx * hx) + // (data[_i(i, j + 1, k)] + data[_i(i, j - 1, k)]) / (hy * hy) + // (data[_i(i, j, k + 1)] + data[_i(i, j, k - 1)]) / (hz * hz)) / // (1.0 / (hx * hx) + 1.0 / (hy * hy) + 1.0 / (hz * hz)); // diff = ::max(diff, fabs(next[_i(i, j, k)] - data[_i(i, j, k)])); // } // // // temp = next; // next = data; // data = temp; double *diffs = (double *) malloc(sizeof(double) * nbx * nby * nbz); MPI_Allgather(&error, 1, MPI_DOUBLE, diffs, 1, MPI_DOUBLE, MPI_COMM_WORLD); double gather_diff = 0; for (k = 0; k < nbx * nby * nbz; ++k) { gather_diff = ::max(gather_diff, diffs[k]); } if (gather_diff < eps) { break; } // std::cerr << gather_diff << " "; // // diffs = (double *) malloc(sizeof(double) * nbx * nby * nbz); // MPI_Allgather(&diff, 1, MPI_DOUBLE, diffs, 1, MPI_DOUBLE, // MPI_COMM_WORLD); // gather_diff = 0; // for (k = 0; k < nbx * nby * nbz; ++k) { // gather_diff = ::max(gather_diff, diffs[k]); // } // std::cerr << gather_diff << "\n"; } CSC(hipMemcpy(data, dev_data, sizeof(double) * _size_b, hipMemcpyDeviceToHost)); CSC(hipFree(dev_data)); CSC(hipFree(dev_next)); CSC(hipFree(dev_buff)); int n_size = 14; char *out_buff = (char *) malloc(sizeof(char) * nx * ny * nz * n_size); memset(out_buff, ' ', nx * ny * nz * n_size * sizeof(char)); for (k = 0; k < nz; k++) { for (j = 0; j < ny; j++) for (i = 0; i < nx; i++) sprintf(out_buff + ((k * nx * ny) + j * nx + i) * n_size, "%.6e", data[_i(i, j, k)]); } // if (id == 1) for (i = 0; i < nx * ny * nz * n_size; i++) { if (out_buff[i] == '\0') out_buff[i] = ' '; // fprintf(stderr, "%c", out_buff[i]); } // fprintf(stderr, "\n"); MPI_Datatype cell; MPI_Type_contiguous(n_size, MPI_CHAR, &cell); MPI_Type_commit(&cell); MPI_Datatype subarray; int subarray_starts[3] = {0, 0, 0}; int subarray_subsizes[3] = {nx, ny, nz}; int subarray_bigsizes[3] = {nx, ny, nz}; MPI_Type_create_subarray(3, subarray_bigsizes, subarray_subsizes, subarray_starts, MPI_ORDER_FORTRAN, cell, &subarray); // memtype MPI_Type_commit(&subarray); MPI_Datatype bigarray; int bigarray_starts[3] = {ib * nx, jb * ny, kb * nz}; int bigarray_subsizes[3] = {nx, ny, nz}; int bigarray_bigsizes[3] = {nx * nbx, ny * nby, nz * nbz}; MPI_Type_create_subarray(3, bigarray_bigsizes, bigarray_subsizes, bigarray_starts, MPI_ORDER_FORTRAN, cell, &bigarray); // memtype MPI_Type_commit(&bigarray); MPI_File fp; MPI_File_delete(file_name, MPI_INFO_NULL); MPI_File_open(MPI_COMM_WORLD, file_name, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp); MPI_File_set_view(fp, 0, MPI_CHAR, bigarray, "native", MPI_INFO_NULL); MPI_File_write_all(fp, out_buff, 1, subarray, MPI_STATUS_IGNORE); MPI_File_close(&fp); // if (id != 0) { // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // for (i = 0; i < nx; i++) // buff[i] = data[_i(i, j, k)]; // MPI_Send(buff, nx, MPI_DOUBLE, 0, id, MPI_COMM_WORLD); // } // } else { // FILE *fd; // fd = fopen(file_name.c_str(), "w"); // for (kb = 0; kb < nbz; kb++) // for (k = 0; k < nz; ++k) // for (jb = 0; jb < nby; jb++) // for (j = 0; j < ny; j++) // for (ib = 0; ib < nbx; ib++) { // if (_ib(ib, jb, kb) == 0) // for (i = 0; i < nx; i++) // buff[i] = data[_i(i, j, k)]; // else // MPI_Recv(buff, nx, MPI_DOUBLE, _ib(ib, jb, kb), _ib(ib, jb, kb), MPI_COMM_WORLD, // &status); // for (i = 0; i < nx; i++) { //// printf("%.2f ", buff[i]); // fprintf(fd, "%.7e ", buff[i]); // } // } // fclose(fd); // } MPI_Finalize(); // std::cerr << "it: " << it << "\n"; free(buff); free(data); free(next); free(out_buff); return 0; }
b5b149d115adba3f64157e0818e24f844540dce9.cu
#include <bits/stdc++.h> #include <algorithm> #include <mpi.h> #include <thrust/extrema.h> #include <thrust/device_vector.h> #define CSC(call) \ do { \ cudaError_t res = call; \ if (res != cudaSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, cudaGetErrorString(res)); \ exit(0); \ } \ } while(0) // Индексация внутри блока // из 3d табличного представления в 1d одиночный массив #define _i(i, j, k) (((k) + 1) * (ny + 2) * (nx + 2) + ((j) + 1) * (nx + 2) + (i) + 1) #define _ixy(i, j) ((j) * nx + (i)) #define _ixz(i, k) ((k) * nx + (i)) #define _iyz(j, k) ((k) * ny + (j)) // Индексация по блокам (процессам) #define _ib(i, j, k) ((k) * nby * nbx + (j) * nbx + (i)) #define _ibz(id) ((id) / nby / nbx) #define _iby(id) (((id) % (nby * nbx)) / nbx) #define _ibx(id) ((id) % nbx) __global__ void kernel_copy_yz(double *plane_yz, double *data, int nx, int ny, int nz, int i, int dir, int bc) { int idy = blockIdx.y * blockDim.y + threadIdx.y; int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.x * gridDim.x; int j, k; if (dir) { for (k = idy; k < nz; k += offsety) for (j = idx; j < ny; j += offsetx) plane_yz[_iyz(j, k)] = data[_i(i, j, k)]; } else { if (plane_yz) { for (k = idy; k < nz; k += offsety) for (j = idx; j < ny; j += offsetx) data[_i(i, j, k)] = plane_yz[_iyz(j, k)]; } else { for (k = idy; k < nz; k += offsety) for (j = idx; j < ny; j += offsetx) data[_i(i, j, k)] = bc; } } } __global__ void kernel_copy_xz(double *plane_xz, double *data, int nx, int ny, int nz, int j, int dir, int bc) { int idy = blockIdx.y * blockDim.y + threadIdx.y; int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.x * gridDim.x; int i, k; if (dir) { for (k = idy; k < nz; k += offsety) for (i = idx; i < nx; i += offsetx) plane_xz[_ixz(i, k)] = data[_i(i, j, k)]; } else { if (plane_xz) { for (k = idy; k < nz; k += offsety) for (i = idx; i < nx; i += offsetx) data[_i(i, j, k)] = plane_xz[_ixz(i, k)]; } else { for (k = idy; k < nz; k += offsety) for (i = idx; i < nx; i += offsetx) data[_i(i, j, k)] = bc; } } } __global__ void kernel_copy_xy(double *plane_xy, double *data, int nx, int ny, int nz, int k, int dir, int bc) { int idy = blockIdx.y * blockDim.y + threadIdx.y; int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.x * gridDim.x; int i, j; if (dir) { for (j = idy; j < ny; j += offsety) for (i = idx; i < nx; i += offsetx) plane_xy[_ixy(i, j)] = data[_i(i, j, k)]; } else { if (plane_xy) { for (j = idy; j < ny; j += offsety) for (i = idx; i < nx; i += offsetx) data[_i(i, j, k)] = plane_xy[_ixy(i, j)]; } else { for (j = idy; j < ny; j += offsety) for (i = idx; i < nx; i += offsetx) data[_i(i, j, k)] = bc; } } } __global__ void kernel(double *next, double *data, int nx, int ny, int nz, double hx, double hy, double hz) { int idz = blockIdx.z * blockDim.z + threadIdx.z; int idy = blockIdx.y * blockDim.y + threadIdx.y; int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetz = blockDim.z * gridDim.z; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.x * gridDim.x; int i, j, k; for (i = idx; i < nx; i += offsetx) for (j = idy; j < ny; j += offsety) for (k = idz; k < nz; k += offsetz) { next[_i(i, j, k)] = 0.5 * ((data[_i(i + 1, j, k)] + data[_i(i - 1, j, k)]) / (hx * hx) + (data[_i(i, j + 1, k)] + data[_i(i, j - 1, k)]) / (hy * hy) + (data[_i(i, j, k + 1)] + data[_i(i, j, k - 1)]) / (hz * hz)) / (1.0 / (hx * hx) + 1.0 / (hy * hy) + 1.0 / (hz * hz)); } } __global__ void kernel_error(double *next, double *data, int nx, int ny, int nz) { int idz = blockIdx.z * blockDim.z + threadIdx.z; int idy = blockIdx.y * blockDim.y + threadIdx.y; int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetz = blockDim.z * gridDim.z; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.x * gridDim.x; int i, j, k; for (i = idx - 1; i < nx + 1; i += offsetx) for (j = idy - 1; j < ny + 1; j += offsety) for (k = idz - 1; k < nz + 1; k += offsetz) { data[_i(i, j, k)] = ((i != -1 && j != -1 && k != -1 && i != nx && j != ny && k != nz)) * fabs(next[_i(i, j, k)] - data[_i(i, j, k)]); } } int main(int argc, char *argv[]) { std::ios_base::sync_with_stdio(false); std::cin.tie(NULL); std::cout.tie(NULL); int id, ib, jb, kb, nbx, nby, nbz, nx, ny, nz, it = 0; int i, j, k; int numproc, proc_name_len; char proc_name[MPI_MAX_PROCESSOR_NAME]; double lx, ly, lz, hx, hy, hz, bc_down, bc_up, bc_left, bc_right, bc_front, bc_back, eps, u_0; double *data, *temp, *next, *buff; double *dev_data, *dev_next, *dev_buff; char file_name[1024]; MPI_Status status; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Get_processor_name(proc_name, &proc_name_len); if (id == 0) { // Инициализация параметров расчета std::cin >> nbx >> nby >> nbz; // Размер сетки блоков (процессов) std::cin >> nx >> ny >> nz; // Размер блока std::cin >> file_name; std::cin >> eps; std::cin >> lx >> ly >> lz; std::cin >> bc_down >> bc_up >> bc_left >> bc_right >> bc_front >> bc_back; std::cin >> u_0; std::cerr << nbx << " " << nby << " " << nbz << std::endl; std::cerr << nx << " " << ny << " " << nz << std::endl; std::cerr << file_name << std::endl; std::cerr << eps << std::endl; std::cerr << lx << " " << ly << " " << lz << " " << std::endl; std::cerr << bc_down << " " << bc_up << " " << bc_left << " " << bc_right << " " << bc_front << " " << bc_back << std::endl; std::cerr << u_0 << std::endl; } MPI_Bcast(&nx, 1, MPI_INT, 0, MPI_COMM_WORLD); // Передача параметров расчета всем процессам MPI_Bcast(&ny, 1, MPI_INT, 0, MPI_COMM_WORLD); // Передача параметров расчета всем процессам MPI_Bcast(&nz, 1, MPI_INT, 0, MPI_COMM_WORLD); // Передача параметров расчета всем процессам MPI_Bcast(&nbx, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nby, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nbz, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&lx, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&ly, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&lz, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_down, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_up, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_left, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_right, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_front, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_back, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&eps, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&u_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(file_name, 1024, MPI_CHAR, 0, MPI_COMM_WORLD); ib = _ibx(id); // Переход к 3-мерной индексации процессов jb = _iby(id); kb = _ibz(id); hx = lx / (nx * nbx); hy = ly / (ny * nby); hz = lz / (nz * nbz); int _size_b = (nx + 2) * (ny + 2) * (nz + 2); int _size_plane = (std::max(nx, std::max(ny, nz)) * std::max(nx, std::max(ny, nz)) + 2); data = (double *) malloc(sizeof(double) * _size_b); // double *_data = (double *) malloc(sizeof(double) * _size_b); next = (double *) malloc(sizeof(double) * _size_b); buff = (double *) malloc(sizeof(double) * _size_plane); CSC(cudaMalloc(&dev_data, sizeof(double) * _size_b)); CSC(cudaMalloc(&dev_next, sizeof(double) * _size_b)); CSC(cudaMalloc(&dev_buff, sizeof(double) * _size_plane)); for (i = 0; i < nx; i++) // Инициализация блока for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) data[_i(i, j, k)] = u_0; CSC(cudaMemcpy(dev_data, data, sizeof(double) * _size_b, cudaMemcpyHostToDevice)); dim3 blocks(32, 32); dim3 threads(32, 32); for (; true; it++) { // diff = 0; // Отправка данных и прием в перемешку if (ib + 1 < nbx) { kernel_copy_yz<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, nx - 1, true, 0.0); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, sizeof(double) * _size_plane, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // std::cerr << buff[_iyz(j, k)] << " " << data[_i(nx - 1, j, k)] << "\n"; // buff[_iyz(j, k)] = data[_i(nx - 1, j, k)]; // } MPI_Send(buff, ny * nz, MPI_DOUBLE, _ib(ib + 1, jb, kb), id, MPI_COMM_WORLD); } if (ib > 0) { MPI_Recv(buff, ny * nz, MPI_DOUBLE, _ib(ib - 1, jb, kb), _ib(ib - 1, jb, kb), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, sizeof(double) * _size_plane, cudaMemcpyHostToDevice)); kernel_copy_yz<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, -1, false, 0.0); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // std::cerr << _data[_i(-1, j, k)] << " " << buff[_iyz(j, k)] << "\n"; // data[_i(-1, j, k)] = buff[_iyz(j, k)]; // } } else { kernel_copy_yz<<<blocks, threads>>>(NULL, dev_data, nx, ny, nz, -1, false, bc_left); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // if(_data[_i(-1, j, k)] != bc_left) // std::cerr << "bc_left: " << _data[_i(-1, j, k)] << " " << bc_left << "\n"; // data[_i(-1, j, k)] = bc_left; // } } if (jb + 1 < nby) { kernel_copy_xz<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, ny - 1, true, 0.0); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, sizeof(double) * _size_plane, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (i = 0; i < nx; i++) { // std::cerr << buff[_ixz(i, k)] << " " << data[_i(i, ny - 1, k)] << "\n"; // buff[_ixz(i, k)] = data[_i(i, ny - 1, k)]; // } MPI_Send(buff, nz * nx, MPI_DOUBLE, _ib(ib, jb + 1, kb), id, MPI_COMM_WORLD); } if (jb > 0) { MPI_Recv(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb - 1, kb), _ib(ib, jb - 1, kb), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, sizeof(double) * _size_plane, cudaMemcpyHostToDevice)); kernel_copy_xz<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, -1, false, 0.0); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (i = 0; i < nx; i++) { // std::cerr << _data[_i(i, -1, k)] << " " << buff[_ixz(i, k)] << "\n"; // data[_i(i, -1, k)] = buff[_ixz(i, k)]; // } } else { kernel_copy_xz<<<blocks, threads>>>(NULL, dev_data, nx, ny, nz, -1, false, bc_front); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (i = 0; i < nx; i++) { // if(_data[_i(i, -1, k)] != bc_front) // std::cerr << "bc_front: " << _data[_i(i, -1, k)] << " " << bc_front << "\n"; // data[_i(i, -1, k)] = bc_front; // } } if (kb + 1 < nbz) { kernel_copy_xy<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, nz - 1, true, 0.0); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, sizeof(double) * _size_plane, cudaMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; i++) { // if(buff[_ixy(i, j)] != data[_i(i, j, nz - 1)]) // std::cerr << buff[_ixy(i, j)] << " " << data[_i(i, j, nz - 1)] << "\n"; // buff[_ixy(i, j)] = data[_i(i, j, nz - 1)]; // } MPI_Send(buff, ny * nx, MPI_DOUBLE, _ib(ib, jb, kb + 1), id, MPI_COMM_WORLD); } if (kb > 0) { MPI_Recv(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb - 1), _ib(ib, jb, kb - 1), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, sizeof(double) * _size_plane, cudaMemcpyHostToDevice)); kernel_copy_xy<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, -1, false, 0.0); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; i++) { // if(_data[_i(i, j, -1)] != buff[_ixy(i, j)]) { // std::cerr << _data[_i(i, j, -1)] << " " << buff[_ixy(i, j)] << "\n"; // } // data[_i(i, j, -1)] = buff[_ixy(i, j)]; // } } else { kernel_copy_xy<<<blocks, threads>>>(NULL, dev_data, nx, ny, nz, -1, false, bc_down); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; ++i) { // if(_data[_i(i, j, -1)] != bc_down) { // std::cerr << _data[_i(i, j, -1)] << " " << bc_down << "\n"; // } // data[_i(i, j, -1)] = bc_down; // } } if (ib > 0) { kernel_copy_yz<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, 0, true, 0.0); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, sizeof(double) * _size_plane, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // if(buff[_iyz(j, k)] != data[_i(0, j, k)]) // std::cerr << buff[_iyz(j, k)] << " " << data[_i(0, j, k)] << "\n"; // buff[_iyz(j, k)] = data[_i(0, j, k)]; // } MPI_Send(buff, nz * ny, MPI_DOUBLE, _ib(ib - 1, jb, kb), id, MPI_COMM_WORLD); } if (ib + 1 < nbx) { MPI_Recv(buff, ny * nz, MPI_DOUBLE, _ib(ib + 1, jb, kb), _ib(ib + 1, jb, kb), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, sizeof(double) * _size_plane, cudaMemcpyHostToDevice)); kernel_copy_yz<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, nx, false, 0.0); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // if (_data[_i(nx, j, k)] != buff[_iyz(j, k)]) { // std::cerr << _data[_i(nx, j, k)] << " " << buff[_iyz(j, k)] << "\n"; // } // data[_i(nx, j, k)] = buff[_iyz(j, k)]; // } } else { kernel_copy_yz<<<blocks, threads>>>(NULL, dev_data, nx, ny, nz, nx, false, bc_right); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; ++j) { // if (_data[_i(nx, j, k)] != bc_right) { // std::cerr << _data[_i(nx, j, k)] << " " << bc_right << "\n"; // } // data[_i(nx, j, k)] = bc_right; // } } if (jb > 0) { kernel_copy_xz<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, 0, true, 0.0); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, sizeof(double) * _size_plane, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; ++k) // for (i = 0; i < nx; i++) { // if (buff[_ixz(i, k)] != data[_i(i, 0, k)]) { // std::cerr << buff[_ixz(i, k)] << " " << data[_i(i, 0, k)] << "\n"; // } // buff[_ixz(i, k)] = data[_i(i, 0, k)]; // } MPI_Send(buff, nz * nx, MPI_DOUBLE, _ib(ib, jb - 1, kb), id, MPI_COMM_WORLD); } if (jb + 1 < nby) { MPI_Recv(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb + 1, kb), _ib(ib, jb + 1, kb), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, sizeof(double) * _size_plane, cudaMemcpyHostToDevice)); kernel_copy_xz<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, ny, false, 0.0); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; k++) // for (i = 0; i < nx; i++) { // if (_data[_i(i, ny, k)] != buff[_ixz(i, k)]) { // std::cerr << _data[_i(i, ny, k)] << " " << buff[_ixz(i, k)] << "\n"; // } // data[_i(i, ny, k)] = buff[_ixz(i, k)]; // } } else { kernel_copy_xz<<<blocks, threads>>>(NULL, dev_data, nx, ny, nz, ny, false, bc_back); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (k = 0; k < nz; k++) // for (i = 0; i < nx; i++) { // if (_data[_i(i, ny, k)] != bc_back) { // std::cerr << _data[_i(i, ny, k)] << " " << bc_back << "\n"; // } // data[_i(i, ny, k)] = bc_back; // } } if (kb > 0) { kernel_copy_xy<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, 0, true, 0.0); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, sizeof(double) * _size_plane, cudaMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; i++) { // if(buff[_ixy(i, j)] != data[_i(i, j, 0)]) { // std::cerr << buff[_ixy(i, j)] << " " << data[_i(i, j, 0)] << "\n"; // } // buff[_ixy(i, j)] = data[_i(i, j, 0)]; // } MPI_Send(buff, ny * nx, MPI_DOUBLE, _ib(ib, jb, kb - 1), id, MPI_COMM_WORLD); } if (kb + 1 < nbz) { MPI_Recv(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb + 1), _ib(ib, jb, kb + 1), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, sizeof(double) * _size_plane, cudaMemcpyHostToDevice)); kernel_copy_xy<<<blocks, threads>>>(dev_buff, dev_data, nx, ny, nz, nz, false, 0.0); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; i++) { // if(_data[_i(i, j, nz)] != buff[_ixy(i, j)]) { // std::cerr << _data[_i(i, j, nz)] << " " << buff[_ixy(i, j)] << "\n"; // } // data[_i(i, j, nz)] = buff[_ixy(i, j)]; // } } else { kernel_copy_xy<<<blocks, threads>>>(NULL, dev_data, nx, ny, nz, nz, false, bc_up); // CSC(cudaMemcpy(_data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (j = 0; j < ny; ++j) // for (i = 0; i < nx; ++i) { // if(_data[_i(i, j, nz)] != bc_up) { // std::cerr << "bc_up: " << _data[_i(i, j, nz)] << " " << bc_up << "\n"; // } // data[_i(i, j, nz)] = bc_up; // } } // Перевычисление значений температуры // CSC(cudaMemcpy(dev_data, data, sizeof(double) * _size_b, cudaMemcpyHostToDevice)); kernel<<<dim3(8, 8, 8), dim3(32, 4, 4)>>>(dev_next, dev_data, nx, ny, nz, hx, hy, hz); CSC(cudaGetLastError()); kernel_error<<<dim3(8, 8, 8), dim3(32, 4, 4)>>>(dev_next, dev_data, nx, ny, nz); CSC(cudaGetLastError()); double error = 0.0; thrust::device_ptr<double> p_arr = thrust::device_pointer_cast(dev_data); thrust::device_ptr<double> res = thrust::max_element(p_arr, p_arr + _size_b); error = *res; temp = dev_data; dev_data = dev_next; dev_next = temp; // CSC(cudaMemcpy(data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); // for (i = 0; i < nx; i++) // for (j = 0; j < ny; j++) // for (k = 0; k < nz; k++) { // next[_i(i, j, k)] = 0.5 * ((data[_i(i + 1, j, k)] + data[_i(i - 1, j, k)]) / (hx * hx) + // (data[_i(i, j + 1, k)] + data[_i(i, j - 1, k)]) / (hy * hy) + // (data[_i(i, j, k + 1)] + data[_i(i, j, k - 1)]) / (hz * hz)) / // (1.0 / (hx * hx) + 1.0 / (hy * hy) + 1.0 / (hz * hz)); // diff = std::max(diff, fabs(next[_i(i, j, k)] - data[_i(i, j, k)])); // } // // // temp = next; // next = data; // data = temp; double *diffs = (double *) malloc(sizeof(double) * nbx * nby * nbz); MPI_Allgather(&error, 1, MPI_DOUBLE, diffs, 1, MPI_DOUBLE, MPI_COMM_WORLD); double gather_diff = 0; for (k = 0; k < nbx * nby * nbz; ++k) { gather_diff = std::max(gather_diff, diffs[k]); } if (gather_diff < eps) { break; } // std::cerr << gather_diff << " "; // // diffs = (double *) malloc(sizeof(double) * nbx * nby * nbz); // MPI_Allgather(&diff, 1, MPI_DOUBLE, diffs, 1, MPI_DOUBLE, // MPI_COMM_WORLD); // gather_diff = 0; // for (k = 0; k < nbx * nby * nbz; ++k) { // gather_diff = std::max(gather_diff, diffs[k]); // } // std::cerr << gather_diff << "\n"; } CSC(cudaMemcpy(data, dev_data, sizeof(double) * _size_b, cudaMemcpyDeviceToHost)); CSC(cudaFree(dev_data)); CSC(cudaFree(dev_next)); CSC(cudaFree(dev_buff)); int n_size = 14; char *out_buff = (char *) malloc(sizeof(char) * nx * ny * nz * n_size); memset(out_buff, ' ', nx * ny * nz * n_size * sizeof(char)); for (k = 0; k < nz; k++) { for (j = 0; j < ny; j++) for (i = 0; i < nx; i++) sprintf(out_buff + ((k * nx * ny) + j * nx + i) * n_size, "%.6e", data[_i(i, j, k)]); } // if (id == 1) for (i = 0; i < nx * ny * nz * n_size; i++) { if (out_buff[i] == '\0') out_buff[i] = ' '; // fprintf(stderr, "%c", out_buff[i]); } // fprintf(stderr, "\n"); MPI_Datatype cell; MPI_Type_contiguous(n_size, MPI_CHAR, &cell); MPI_Type_commit(&cell); MPI_Datatype subarray; int subarray_starts[3] = {0, 0, 0}; int subarray_subsizes[3] = {nx, ny, nz}; int subarray_bigsizes[3] = {nx, ny, nz}; MPI_Type_create_subarray(3, subarray_bigsizes, subarray_subsizes, subarray_starts, MPI_ORDER_FORTRAN, cell, &subarray); // memtype MPI_Type_commit(&subarray); MPI_Datatype bigarray; int bigarray_starts[3] = {ib * nx, jb * ny, kb * nz}; int bigarray_subsizes[3] = {nx, ny, nz}; int bigarray_bigsizes[3] = {nx * nbx, ny * nby, nz * nbz}; MPI_Type_create_subarray(3, bigarray_bigsizes, bigarray_subsizes, bigarray_starts, MPI_ORDER_FORTRAN, cell, &bigarray); // memtype MPI_Type_commit(&bigarray); MPI_File fp; MPI_File_delete(file_name, MPI_INFO_NULL); MPI_File_open(MPI_COMM_WORLD, file_name, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp); MPI_File_set_view(fp, 0, MPI_CHAR, bigarray, "native", MPI_INFO_NULL); MPI_File_write_all(fp, out_buff, 1, subarray, MPI_STATUS_IGNORE); MPI_File_close(&fp); // if (id != 0) { // for (k = 0; k < nz; ++k) // for (j = 0; j < ny; j++) { // for (i = 0; i < nx; i++) // buff[i] = data[_i(i, j, k)]; // MPI_Send(buff, nx, MPI_DOUBLE, 0, id, MPI_COMM_WORLD); // } // } else { // FILE *fd; // fd = fopen(file_name.c_str(), "w"); // for (kb = 0; kb < nbz; kb++) // for (k = 0; k < nz; ++k) // for (jb = 0; jb < nby; jb++) // for (j = 0; j < ny; j++) // for (ib = 0; ib < nbx; ib++) { // if (_ib(ib, jb, kb) == 0) // for (i = 0; i < nx; i++) // buff[i] = data[_i(i, j, k)]; // else // MPI_Recv(buff, nx, MPI_DOUBLE, _ib(ib, jb, kb), _ib(ib, jb, kb), MPI_COMM_WORLD, // &status); // for (i = 0; i < nx; i++) { //// printf("%.2f ", buff[i]); // fprintf(fd, "%.7e ", buff[i]); // } // } // fclose(fd); // } MPI_Finalize(); // std::cerr << "it: " << it << "\n"; free(buff); free(data); free(next); free(out_buff); return 0; }
475723c0967599747ebfdaa9e926b9d01e416859.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ********************* * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ // modified from // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu // modified from // https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda.cpp #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <torch/library.h> #include <THH/THHAtomics.cuh> #include "cuda_helpers.h" namespace vision { namespace ops { namespace { const int kMaxParallelImgs = 32; inline unsigned int GET_THREADS() { #ifdef __HIP_PLATFORM_HCC__ return 256; #endif if (at::cuda::getCurrentDeviceProperties()->major >= 6) { return 1024; } return 512; } inline unsigned int GET_BLOCKS( const unsigned int THREADS, const unsigned int N) { unsigned int kMaxGridNum = at::cuda::getCurrentDeviceProperties()->maxGridSize[0]; return ::min(kMaxGridNum, (N + THREADS - 1) / THREADS); } template <typename scalar_t> __device__ scalar_t bilinear_interpolate( const scalar_t* in, int height, int width, scalar_t h, scalar_t w) { if (h <= -1 || height <= h || w <= -1 || width <= w) { return 0; } int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = in[h_low * width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = in[h_low * width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = in[h_high * width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = in[h_high * width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __global__ void deformable_im2col_kernel( int n, const scalar_t* input_ptr, const scalar_t* offset_ptr, const scalar_t* mask_ptr, int height, int width, int weight_h, int weight_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int batch_sz, int n_in_channels, int n_offset_grps, int out_h, int out_w, bool use_mask, scalar_t* columns_ptr) { CUDA_1D_KERNEL_LOOP(index, n) { const int out_x = index % out_w; const int out_y = (index / out_w) % out_h; const int out_b = (index / (out_w * out_h)) % batch_sz; const int in_c = index / (out_w * out_h * batch_sz); const int out_c = in_c * weight_h * weight_w; int c_per_offset_grp = n_in_channels / n_offset_grps; const int grp_idx = in_c / c_per_offset_grp; columns_ptr += (out_c * (batch_sz * out_h * out_w) + out_b * (out_h * out_w) + out_y * out_w + out_x); input_ptr += (out_b * (n_in_channels * height * width) + in_c * (height * width)); offset_ptr += (out_b * n_offset_grps + grp_idx) * 2 * weight_h * weight_w * out_h * out_w; if (use_mask) { mask_ptr += (out_b * n_offset_grps + grp_idx) * weight_h * weight_w * out_h * out_w; } for (int i = 0; i < weight_h; ++i) { for (int j = 0; j < weight_w; ++j) { const int mask_idx = i * weight_w + j; const int offset_idx = 2 * mask_idx; scalar_t mask_value = 1; if (use_mask) { mask_value = mask_ptr[mask_idx * (out_h * out_w) + out_y * out_w + out_x]; } const scalar_t offset_h = offset_ptr[offset_idx * (out_h * out_w) + out_y * out_w + out_x]; const scalar_t offset_w = offset_ptr [(offset_idx + 1) * (out_h * out_w) + out_y * out_w + out_x]; const scalar_t y = (out_y * stride_h - pad_h) + i * dilation_h + offset_h; const scalar_t x = (out_x * stride_w - pad_w) + j * dilation_w + offset_w; *columns_ptr = mask_value * bilinear_interpolate(input_ptr, height, width, y, x); columns_ptr += batch_sz * out_h * out_w; } } } } void deformable_im2col( const at::Tensor& input, const at::Tensor& data_offset, const at::Tensor& data_mask, int n_in_channels, int height, int width, int weight_h, int weight_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int out_h, int out_w, int parallel_imgs, int deformable_group, bool use_mask, at::Tensor data_col) { int num_kernels = n_in_channels * out_h * out_w * parallel_imgs; const unsigned int threads = GET_THREADS(); const unsigned int blocks = GET_BLOCKS(threads, num_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "deformable_im2col", ([&] { hipLaunchKernelGGL(( deformable_im2col_kernel), dim3(blocks), dim3(threads), 0, 0, num_kernels, input.data_ptr<scalar_t>(), data_offset.data_ptr<scalar_t>(), data_mask.data_ptr<scalar_t>(), height, width, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, parallel_imgs, n_in_channels, deformable_group, out_h, out_w, use_mask, data_col.data_ptr<scalar_t>()); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_im2col: %s\n", hipGetErrorString(err)); } } int get_greatest_divisor_below_bound(int n, int bound) { for (int k = bound; k > 1; --k) { if (n % k == 0) { return k; } } return 1; } template <typename scalar_t> __global__ void deformable_col2im_kernel( int n, const scalar_t* col, const scalar_t* offset_ptr, const scalar_t* mask_ptr, int channels, int height, int width, int kernel_h, int kernel_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int batch_sz, int n_offset_grps, int out_h, int out_w, bool use_mask, scalar_t* grad_im) { CUDA_1D_KERNEL_LOOP(index, n) { const int out_x = index % out_w; const int out_y = (index / out_w) % out_h; const int b = (index / (out_w * out_h)) % batch_sz; const int j = (index / (out_w * out_h * batch_sz)) % kernel_w; const int i = (index / (out_w * out_h * batch_sz * kernel_w)) % kernel_h; const int c = index / (out_w * out_h * batch_sz * kernel_w * kernel_h); int c_per_offset_grp = channels / n_offset_grps; const int offset_grp = c / c_per_offset_grp; offset_ptr += (b * n_offset_grps + offset_grp) * 2 * kernel_h * kernel_w * out_h * out_w; if (use_mask) { mask_ptr += (b * n_offset_grps + offset_grp) * kernel_h * kernel_w * out_h * out_w; } const int mask_idx = i * kernel_w + j; const int offset_idx = 2 * mask_idx; const int offset_h_ptr = ((offset_idx)*out_h + out_y) * out_w + out_x; const int offset_w_ptr = ((offset_idx + 1) * out_h + out_y) * out_w + out_x; const scalar_t offset_h = offset_ptr[offset_h_ptr]; const scalar_t offset_w = offset_ptr[offset_w_ptr]; scalar_t mask_value = 1; if (use_mask) { mask_value = mask_ptr[(mask_idx * out_h + out_y) * out_w + out_x]; } const scalar_t y = (out_y * stride_h - pad_h) + i * dilation_h + offset_h; const scalar_t x = (out_x * stride_w - pad_w) + j * dilation_w + offset_w; for (int dy = -1; dy <= 1; dy++) { for (int dx = -1; dx <= 1; dx++) { int yp = int(y) + dy; int xp = int(x) + dx; if (0 <= yp && yp < height && 0 <= xp && xp < width && std::abs(y - yp) < 1 && std::abs(x - xp) < 1) { int grad_pos = ((b * channels + c) * height + yp) * width + xp; scalar_t weight = (1 - std::abs(y - yp)) * (1 - std::abs(x - xp)); atomicAdd(grad_im + grad_pos, mask_value * weight * col[index]); } } } } } void compute_grad_input( const at::Tensor& columns, const at::Tensor& offset, const at::Tensor& mask, int channels, int height, int width, int weight_h, int weight_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int parallel_imgs, int n_offset_grps, bool use_mask, at::Tensor grad_im) { int out_h = (height + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; int out_w = (width + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * weight_h * weight_w * out_h * out_w * parallel_imgs; const unsigned int threads = GET_THREADS(); const unsigned int blocks = GET_BLOCKS(threads, num_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( columns.scalar_type(), "compute_grad_input", ([&] { hipLaunchKernelGGL(( deformable_col2im_kernel), dim3(blocks), dim3(threads), 0, 0, num_kernels, columns.data_ptr<scalar_t>(), offset.data_ptr<scalar_t>(), mask.data_ptr<scalar_t>(), channels, height, width, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, parallel_imgs, n_offset_grps, out_h, out_w, use_mask, grad_im.data_ptr<scalar_t>()); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in compute_grad_input: %s\n", hipGetErrorString(err)); } } template <typename scalar_t> __device__ scalar_t get_coordinate_weight( const scalar_t* im_data, int height, int width, scalar_t y, scalar_t x, bool is_y_direction) { int y_l = floor(y); int x_l = floor(x); int y_h = y_l + 1; int x_h = x_l + 1; bool valid_y_l = 0 <= y_l && y_l < height; bool valid_y_h = 0 <= y_h && y_h < height; bool valid_x_l = 0 <= x_l && x_l < width; bool valid_x_h = 0 <= x_h && x_h < width; scalar_t zero = 0; scalar_t v_yx = (valid_y_l && valid_x_l) ? im_data[y_l * width + x_l] : zero; scalar_t v_yX = (valid_y_l && valid_x_h) ? im_data[y_l * width + x_h] : zero; scalar_t v_Yx = (valid_y_h && valid_x_l) ? im_data[y_h * width + x_l] : zero; scalar_t v_YX = (valid_y_h && valid_x_h) ? im_data[y_h * width + x_h] : zero; if (is_y_direction) { scalar_t dx = x - x_l; return dx * (v_YX - v_yX) + (1 - dx) * (v_Yx - v_yx); } else { scalar_t dy = y - y_l; return dy * (v_YX - v_Yx) + (1 - dy) * (v_yX - v_yx); } } template <typename scalar_t> __global__ void deformable_col2im_coord_kernel( int n, const scalar_t* col_ptr, const scalar_t* im_ptr, const scalar_t* offset_ptr, const scalar_t* mask_ptr, int channels, int height, int width, int weight_h, int weight_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int batch_sz, int offset_channels, int n_offset_grps, int out_h, int out_w, const bool use_mask, scalar_t* grad_offset, scalar_t* grad_mask) { CUDA_1D_KERNEL_LOOP(index, n) { scalar_t grad_offset_val = 0; scalar_t grad_mask_val = 0; int w = index % out_w; int h = (index / out_w) % out_h; int w_w = (index / (out_w * out_h * 2)) % weight_w; int w_h = (index / (out_w * out_h * 2 * weight_w)) % weight_h; int c = (index / (out_w * out_h)) % offset_channels; int b = index / (out_w * out_h * offset_channels); const int offset_grp = c / (2 * weight_h * weight_w); const int col_step = weight_h * weight_w; int c_per_offset_grp = channels / n_offset_grps; col_ptr += offset_grp * c_per_offset_grp * weight_h * weight_w * batch_sz * out_w * out_h; im_ptr += (b * n_offset_grps + offset_grp) * c_per_offset_grp * height * width; offset_ptr += (b * n_offset_grps + offset_grp) * 2 * weight_h * weight_w * out_h * out_w; if (use_mask) { mask_ptr += (b * n_offset_grps + offset_grp) * weight_h * weight_w * out_h * out_w; } const int offset_c = c - offset_grp * 2 * weight_h * weight_w; const bool is_y_direction = offset_c % 2 == 0; const int c_bound = c_per_offset_grp * weight_h * weight_w; for (int col_c = (offset_c / 2); col_c < c_bound; col_c += col_step) { const int col_pos = (((col_c * batch_sz + b) * out_h) + h) * out_w + w; int out_x = col_pos % out_w; int out_y = (col_pos / out_w) % out_h; int j = (col_pos / (out_w * out_h * batch_sz)) % weight_w; int i = (col_pos / (out_w * out_h * batch_sz * weight_w)) % weight_h; const int mask_idx = i * weight_w + j; const int offset_h_ptr = (((2 * mask_idx) * out_h + out_y) * out_w + out_x); const int offset_w_ptr = (((2 * mask_idx + 1) * out_h + out_y) * out_w + out_x); const scalar_t offset_h = offset_ptr[offset_h_ptr]; const scalar_t offset_w = offset_ptr[offset_w_ptr]; scalar_t mask_value = 1; if (use_mask) { mask_value = mask_ptr[(mask_idx * out_h + out_y) * out_w + out_x]; } scalar_t y = (out_y * stride_h - pad_h) + i * dilation_h + offset_h; scalar_t x = (out_x * stride_w - pad_w) + j * dilation_w + offset_w; const scalar_t weight = get_coordinate_weight(im_ptr, height, width, y, x, is_y_direction); grad_offset_val += mask_value * weight * col_ptr[col_pos]; if (use_mask && is_y_direction) { grad_mask_val += col_ptr[col_pos] * bilinear_interpolate(im_ptr, height, width, y, x); } im_ptr += height * width; } grad_offset[index] = grad_offset_val; if (use_mask && is_y_direction) { const int idx = ((((b * n_offset_grps + offset_grp) * weight_h + w_h) * weight_w + w_w) * out_h + h) * out_w + w; grad_mask[idx] = grad_mask_val; } } } void compute_grad_offset_and_mask( const at::Tensor& columns, const at::Tensor& input, const at::Tensor& offset, const at::Tensor& mask, int channels, int height, int width, int weight_h, int weight_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int parallel_imgs, int n_offset_grps, bool use_mask, at::Tensor grad_offset, at::Tensor grad_mask) { int out_h = (height + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; int out_w = (width + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; int num_kernels = out_h * out_w * 2 * weight_h * weight_w * n_offset_grps * parallel_imgs; const unsigned int threads = GET_THREADS(); const unsigned int blocks = GET_BLOCKS(threads, num_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( columns.scalar_type(), "compute_grad_offset_and_mask", ([&] { hipLaunchKernelGGL(( deformable_col2im_coord_kernel), dim3(blocks), dim3(threads), 0, 0, num_kernels, columns.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), offset.data_ptr<scalar_t>(), mask.data_ptr<scalar_t>(), channels, height, width, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, parallel_imgs, 2 * weight_h * weight_w * n_offset_grps, n_offset_grps, out_h, out_w, use_mask, grad_offset.data_ptr<scalar_t>(), grad_mask.data_ptr<scalar_t>()); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf( "error in compute_grad_offset_and_mask: %s\n", hipGetErrorString(err)); } } std::tuple<at::Tensor, at::Tensor, at::Tensor> backward_gradient_inputs( at::Tensor input, at::Tensor weight, at::Tensor offset, at::Tensor mask, at::Tensor grad_out, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int n_weight_grps, int n_offset_grps, int n_parallel_imgs, bool use_mask) { at::DeviceGuard guard(input.device()); int batch_sz = input.size(0); long n_in_channels = input.size(1); long in_h = input.size(2); long in_w = input.size(3); n_parallel_imgs = ::min(batch_sz, n_parallel_imgs); long n_out_channels = weight.size(0); int weight_h = weight.size(2); int weight_w = weight.size(3); long out_w = (in_w + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; long out_h = (in_h + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; auto grad_input = at::zeros_like(input); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); if (batch_sz == 0) { return std::make_tuple(grad_input, grad_offset, grad_mask); } auto columns = at::empty( {n_in_channels * weight_w * weight_h, n_parallel_imgs * out_h * out_w}, input.options()); // Separate into blocks grad_input = grad_input.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); input = input.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); grad_offset = grad_offset.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); offset = offset.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); if (use_mask) { grad_mask = grad_mask.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * weight_h * weight_w, out_h, out_w}); mask = mask.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * weight_h * weight_w, out_h, out_w}); } grad_out = grad_out .reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_weight_grps, n_out_channels / n_weight_grps, out_h, out_w}) .permute({0, 2, 3, 1, 4, 5}); weight = weight.reshape( {n_weight_grps, weight.size(0) / n_weight_grps, weight.size(1), weight.size(2), weight.size(3)}); columns = columns.view( {n_weight_grps, columns.size(0) / n_weight_grps, columns.size(1)}); for (int elt = 0; elt < batch_sz / n_parallel_imgs; elt++) { columns.zero_(); // Separate into weight groups for (int g = 0; g < n_weight_grps; g++) { columns[g] = columns[g].addmm_( weight[g].flatten(1).transpose(0, 1), grad_out[elt][g].flatten(1)); } compute_grad_offset_and_mask( columns, input[elt], offset[elt], mask[elt], n_in_channels, in_h, in_w, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, n_parallel_imgs, n_offset_grps, use_mask, grad_offset[elt], grad_mask[elt]); compute_grad_input( columns, offset[elt], mask[elt], n_in_channels, in_h, in_w, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, n_parallel_imgs, n_offset_grps, use_mask, grad_input[elt]); } grad_input = grad_input.view({batch_sz, n_in_channels, in_h, in_w}); grad_offset = grad_offset.view( {batch_sz, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); if (use_mask) { grad_mask = grad_mask.view( {batch_sz, n_offset_grps * weight_h * weight_w, out_h, out_w}); } return std::make_tuple(grad_input, grad_offset, grad_mask); } at::Tensor backward_gradient_parameters( at::Tensor input, const at::Tensor& weight, at::Tensor offset, at::Tensor mask, const at::Tensor& grad_out, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int n_weight_grps, int n_offset_grps, int n_parallel_imgs, bool use_mask) { at::DeviceGuard guard(input.device()); int batch_sz = input.size(0); long n_in_channels = input.size(1); long in_h = input.size(2); long in_w = input.size(3); n_parallel_imgs = ::min(batch_sz, n_parallel_imgs); long n_out_channels = weight.size(0); int weight_h = weight.size(2); int weight_w = weight.size(3); long out_h = grad_out.size(2); long out_w = grad_out.size(3); auto grad_weight = at::zeros_like(weight); if (batch_sz == 0) { return grad_weight; } at::Tensor grad_out_buf = grad_out .reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_weight_grps, n_out_channels / n_weight_grps, out_h, out_w}) .permute({0, 2, 3, 1, 4, 5}) .contiguous(); input = input.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); offset = offset.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); if (use_mask) { mask = mask.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * weight_h * weight_w, out_h, out_w}); } grad_weight = grad_weight.reshape( {n_weight_grps, grad_weight.size(0) / n_weight_grps, grad_weight.size(1), grad_weight.size(2), grad_weight.size(3)}); auto columns = at::empty( {n_weight_grps, n_in_channels * weight_w * weight_h / n_weight_grps, n_parallel_imgs * out_h * out_w}, input.options()); for (int elt = 0; elt < batch_sz / n_parallel_imgs; elt++) { deformable_im2col( input[elt], offset[elt], mask[elt], n_in_channels, in_h, in_w, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, out_h, out_w, n_parallel_imgs, n_offset_grps, use_mask, columns); for (int g = 0; g < n_weight_grps; g++) { grad_weight[g] = grad_weight[g] .flatten(1) .addmm_( grad_out_buf[elt][g].flatten(1), columns[g].transpose(1, 0)) .view_as(grad_weight[g]); } } grad_weight = grad_weight.view( {grad_weight.size(0) * grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4)}); return grad_weight; } at::Tensor deform_conv2d_forward_kernel( const at::Tensor& input, const at::Tensor& weight, const at::Tensor& offset, const at::Tensor& mask, const at::Tensor& bias, int64_t stride_h, int64_t stride_w, int64_t pad_h, int64_t pad_w, int64_t dilation_h, int64_t dilation_w, int64_t n_weight_grps, int64_t n_offset_grps, bool use_mask) { at::Tensor input_c = input.contiguous(); at::Tensor offset_c = offset.contiguous(); at::Tensor weight_c = weight.contiguous(); at::Tensor mask_c = mask.contiguous(); at::Tensor bias_c = bias.contiguous(); TORCH_CHECK(input_c.ndimension() == 4); TORCH_CHECK(offset_c.ndimension() == 4); TORCH_CHECK(!use_mask || mask_c.ndimension() == 4); TORCH_CHECK(weight_c.ndimension() == 4); TORCH_CHECK(input_c.is_cuda(), "input must be a CUDA tensor"); at::DeviceGuard guard(input_c.device()); int batch_sz = input_c.size(0); int in_channels = input_c.size(1); int in_h = input_c.size(2); int in_w = input_c.size(3); int n_parallel_imgs = get_greatest_divisor_below_bound(batch_sz, kMaxParallelImgs); int out_channels = weight_c.size(0); int weight_h = weight_c.size(2); int weight_w = weight_c.size(3); int ker_h = dilation_h * (weight_h - 1) + 1; int ker_w = dilation_w * (weight_w - 1) + 1; int out_h = ((in_h + 2 * pad_h - ker_h) / stride_h) + 1; int out_w = ((in_w + 2 * pad_w - ker_w) / stride_w) + 1; TORCH_CHECK( weight_h > 0 && weight_w > 0, "weight_h: ", weight_h, " weight_w: ", weight_w); TORCH_CHECK( stride_h > 0 && stride_w > 0, "stride_h: ", stride_h, " stride_w: ", stride_w); TORCH_CHECK(pad_h >= 0 && pad_w >= 0, "pad_h: ", pad_h, " pad_w: ", pad_w); TORCH_CHECK( dilation_h > 0 && dilation_w > 0, "dilation_h: ", dilation_h, " dilation_w: ", dilation_w); TORCH_CHECK(weight_c.size(1) * n_weight_grps == input_c.size(1)); TORCH_CHECK(weight_c.size(0) % n_weight_grps == 0); TORCH_CHECK( (offset_c.size(1) == n_offset_grps * 2 * weight_h * weight_w), "offset.shape[1] is not valid: got: ", offset_c.size(1), " expected: ", n_offset_grps * 2 * weight_h * weight_w); TORCH_CHECK( (!use_mask || mask_c.size(1) == n_offset_grps * weight_h * weight_w), "mask.shape[1] is not valid: got: ", mask_c.size(1), " expected: ", n_offset_grps * weight_h * weight_w); TORCH_CHECK(input_c.size(1) % n_offset_grps == 0); TORCH_CHECK( (offset_c.size(0) == input_c.size(0)), "invalid batch size of offset"); TORCH_CHECK( (offset_c.size(2) == out_h && offset_c.size(3) == out_w), "offset output dims: (", offset_c.size(2), ", ", offset_c.size(3), ") - ", "computed output dims: (", out_h, ", ", out_w, ")"); TORCH_CHECK( (mask_c.size(0) == input_c.size(0)), "invalid batch size of mask"); TORCH_CHECK( (!use_mask || (mask_c.size(2) == out_h && mask_c.size(3) == out_w)), "mask output dims: (", mask_c.size(2), ", ", mask_c.size(3), ") - ", "computed output dims: (", out_h, ", ", out_w, ")"); TORCH_CHECK( out_h > 0 && out_w > 0, "Calculated output size too small - out_h: ", out_h, " out_w: ", out_w); auto out = at::zeros({batch_sz, out_channels, out_h, out_w}, input_c.options()); if (batch_sz == 0) { return out; } // Separate batches into blocks out = out.view( {batch_sz / n_parallel_imgs, n_parallel_imgs, out_channels, out_h, out_w}); input_c = input_c.view( {batch_sz / n_parallel_imgs, n_parallel_imgs, in_channels, in_h, in_w}); offset_c = offset_c.view( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); if (use_mask) { mask_c = mask_c.view( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * weight_h * weight_w, out_h, out_w}); } at::Tensor out_buf = at::zeros( {batch_sz / n_parallel_imgs, out_channels, n_parallel_imgs * out_h, out_w}, out.options()); // Separate channels into convolution groups out_buf = out_buf.view( {out_buf.size(0), n_weight_grps, out_buf.size(1) / n_weight_grps, out_buf.size(2), out_buf.size(3)}); weight_c = weight_c.view( {n_weight_grps, weight_c.size(0) / n_weight_grps, weight_c.size(1), weight_c.size(2), weight_c.size(3)}); // Sample points and perform convolution auto columns = at::zeros( {in_channels * weight_h * weight_w, n_parallel_imgs * out_h * out_w}, input_c.options()); for (int b = 0; b < batch_sz / n_parallel_imgs; b++) { deformable_im2col( input_c[b], offset_c[b], mask_c[b], in_channels, in_h, in_w, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, out_h, out_w, n_parallel_imgs, n_offset_grps, use_mask, columns); columns = columns.view( {n_weight_grps, columns.size(0) / n_weight_grps, columns.size(1)}); for (int g = 0; g < n_weight_grps; g++) { out_buf[b][g] = out_buf[b][g] .flatten(1) .addmm_(weight_c[g].flatten(1), columns[g]) .view_as(out_buf[b][g]); } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); } out_buf = out_buf.view( {batch_sz / n_parallel_imgs, out_channels, n_parallel_imgs, out_h, out_w}); out_buf.transpose_(1, 2); out.copy_(out_buf); out = out.view({batch_sz, out_channels, out_h, out_w}); return out + bias_c.view({1, out_channels, 1, 1}); } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor> deform_conv2d_backward_kernel( const at::Tensor& grad_out, const at::Tensor& input, const at::Tensor& weight, const at::Tensor& offset, const at::Tensor& mask, const at::Tensor& bias, int64_t stride_h, int64_t stride_w, int64_t pad_h, int64_t pad_w, int64_t dilation_h, int64_t dilation_w, int64_t n_weight_grps, int64_t n_offset_grps, bool use_mask) { at::Tensor grad_out_c = grad_out.contiguous(); at::Tensor input_c = input.contiguous(); at::Tensor weight_c = weight.contiguous(); at::Tensor offset_c = offset.contiguous(); at::Tensor mask_c = mask.contiguous(); at::Tensor bias_c = bias.contiguous(); const int batch_sz = input_c.size(0); const int n_parallel_imgs = get_greatest_divisor_below_bound(batch_sz, kMaxParallelImgs); auto grad_input_and_offset_and_mask = backward_gradient_inputs( input_c, weight_c, offset_c, mask_c, grad_out_c, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, n_weight_grps, n_offset_grps, n_parallel_imgs, use_mask); auto grad_input = std::get<0>(grad_input_and_offset_and_mask); auto grad_offset = std::get<1>(grad_input_and_offset_and_mask); auto grad_mask = std::get<2>(grad_input_and_offset_and_mask); auto grad_weight = backward_gradient_parameters( input_c, weight_c, offset_c, mask_c, grad_out_c, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, n_weight_grps, n_offset_grps, n_parallel_imgs, use_mask); auto value = grad_out_c.sum({0, 2, 3}); auto grad_bias = at::ones_like(bias_c) * value; return std::make_tuple( grad_input, grad_weight, grad_offset, grad_mask, grad_bias); } } // namespace TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { m.impl( TORCH_SELECTIVE_NAME("torchvision::deform_conv2d"), TORCH_FN(deform_conv2d_forward_kernel)); m.impl( TORCH_SELECTIVE_NAME("torchvision::_deform_conv2d_backward"), TORCH_FN(deform_conv2d_backward_kernel)); } } // namespace ops } // namespace vision
475723c0967599747ebfdaa9e926b9d01e416859.cu
/*! ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ********************* * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ // modified from // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu // modified from // https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda.cpp #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <torch/library.h> #include <THC/THCAtomics.cuh> #include "cuda_helpers.h" namespace vision { namespace ops { namespace { const int kMaxParallelImgs = 32; inline unsigned int GET_THREADS() { #ifdef __HIP_PLATFORM_HCC__ return 256; #endif if (at::cuda::getCurrentDeviceProperties()->major >= 6) { return 1024; } return 512; } inline unsigned int GET_BLOCKS( const unsigned int THREADS, const unsigned int N) { unsigned int kMaxGridNum = at::cuda::getCurrentDeviceProperties()->maxGridSize[0]; return std::min(kMaxGridNum, (N + THREADS - 1) / THREADS); } template <typename scalar_t> __device__ scalar_t bilinear_interpolate( const scalar_t* in, int height, int width, scalar_t h, scalar_t w) { if (h <= -1 || height <= h || w <= -1 || width <= w) { return 0; } int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = in[h_low * width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = in[h_low * width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = in[h_high * width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = in[h_high * width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __global__ void deformable_im2col_kernel( int n, const scalar_t* input_ptr, const scalar_t* offset_ptr, const scalar_t* mask_ptr, int height, int width, int weight_h, int weight_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int batch_sz, int n_in_channels, int n_offset_grps, int out_h, int out_w, bool use_mask, scalar_t* columns_ptr) { CUDA_1D_KERNEL_LOOP(index, n) { const int out_x = index % out_w; const int out_y = (index / out_w) % out_h; const int out_b = (index / (out_w * out_h)) % batch_sz; const int in_c = index / (out_w * out_h * batch_sz); const int out_c = in_c * weight_h * weight_w; int c_per_offset_grp = n_in_channels / n_offset_grps; const int grp_idx = in_c / c_per_offset_grp; columns_ptr += (out_c * (batch_sz * out_h * out_w) + out_b * (out_h * out_w) + out_y * out_w + out_x); input_ptr += (out_b * (n_in_channels * height * width) + in_c * (height * width)); offset_ptr += (out_b * n_offset_grps + grp_idx) * 2 * weight_h * weight_w * out_h * out_w; if (use_mask) { mask_ptr += (out_b * n_offset_grps + grp_idx) * weight_h * weight_w * out_h * out_w; } for (int i = 0; i < weight_h; ++i) { for (int j = 0; j < weight_w; ++j) { const int mask_idx = i * weight_w + j; const int offset_idx = 2 * mask_idx; scalar_t mask_value = 1; if (use_mask) { mask_value = mask_ptr[mask_idx * (out_h * out_w) + out_y * out_w + out_x]; } const scalar_t offset_h = offset_ptr[offset_idx * (out_h * out_w) + out_y * out_w + out_x]; const scalar_t offset_w = offset_ptr [(offset_idx + 1) * (out_h * out_w) + out_y * out_w + out_x]; const scalar_t y = (out_y * stride_h - pad_h) + i * dilation_h + offset_h; const scalar_t x = (out_x * stride_w - pad_w) + j * dilation_w + offset_w; *columns_ptr = mask_value * bilinear_interpolate(input_ptr, height, width, y, x); columns_ptr += batch_sz * out_h * out_w; } } } } void deformable_im2col( const at::Tensor& input, const at::Tensor& data_offset, const at::Tensor& data_mask, int n_in_channels, int height, int width, int weight_h, int weight_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int out_h, int out_w, int parallel_imgs, int deformable_group, bool use_mask, at::Tensor data_col) { int num_kernels = n_in_channels * out_h * out_w * parallel_imgs; const unsigned int threads = GET_THREADS(); const unsigned int blocks = GET_BLOCKS(threads, num_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "deformable_im2col", ([&] { deformable_im2col_kernel<<<blocks, threads>>>( num_kernels, input.data_ptr<scalar_t>(), data_offset.data_ptr<scalar_t>(), data_mask.data_ptr<scalar_t>(), height, width, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, parallel_imgs, n_in_channels, deformable_group, out_h, out_w, use_mask, data_col.data_ptr<scalar_t>()); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); } } int get_greatest_divisor_below_bound(int n, int bound) { for (int k = bound; k > 1; --k) { if (n % k == 0) { return k; } } return 1; } template <typename scalar_t> __global__ void deformable_col2im_kernel( int n, const scalar_t* col, const scalar_t* offset_ptr, const scalar_t* mask_ptr, int channels, int height, int width, int kernel_h, int kernel_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int batch_sz, int n_offset_grps, int out_h, int out_w, bool use_mask, scalar_t* grad_im) { CUDA_1D_KERNEL_LOOP(index, n) { const int out_x = index % out_w; const int out_y = (index / out_w) % out_h; const int b = (index / (out_w * out_h)) % batch_sz; const int j = (index / (out_w * out_h * batch_sz)) % kernel_w; const int i = (index / (out_w * out_h * batch_sz * kernel_w)) % kernel_h; const int c = index / (out_w * out_h * batch_sz * kernel_w * kernel_h); int c_per_offset_grp = channels / n_offset_grps; const int offset_grp = c / c_per_offset_grp; offset_ptr += (b * n_offset_grps + offset_grp) * 2 * kernel_h * kernel_w * out_h * out_w; if (use_mask) { mask_ptr += (b * n_offset_grps + offset_grp) * kernel_h * kernel_w * out_h * out_w; } const int mask_idx = i * kernel_w + j; const int offset_idx = 2 * mask_idx; const int offset_h_ptr = ((offset_idx)*out_h + out_y) * out_w + out_x; const int offset_w_ptr = ((offset_idx + 1) * out_h + out_y) * out_w + out_x; const scalar_t offset_h = offset_ptr[offset_h_ptr]; const scalar_t offset_w = offset_ptr[offset_w_ptr]; scalar_t mask_value = 1; if (use_mask) { mask_value = mask_ptr[(mask_idx * out_h + out_y) * out_w + out_x]; } const scalar_t y = (out_y * stride_h - pad_h) + i * dilation_h + offset_h; const scalar_t x = (out_x * stride_w - pad_w) + j * dilation_w + offset_w; for (int dy = -1; dy <= 1; dy++) { for (int dx = -1; dx <= 1; dx++) { int yp = int(y) + dy; int xp = int(x) + dx; if (0 <= yp && yp < height && 0 <= xp && xp < width && std::abs(y - yp) < 1 && std::abs(x - xp) < 1) { int grad_pos = ((b * channels + c) * height + yp) * width + xp; scalar_t weight = (1 - std::abs(y - yp)) * (1 - std::abs(x - xp)); atomicAdd(grad_im + grad_pos, mask_value * weight * col[index]); } } } } } void compute_grad_input( const at::Tensor& columns, const at::Tensor& offset, const at::Tensor& mask, int channels, int height, int width, int weight_h, int weight_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int parallel_imgs, int n_offset_grps, bool use_mask, at::Tensor grad_im) { int out_h = (height + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; int out_w = (width + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * weight_h * weight_w * out_h * out_w * parallel_imgs; const unsigned int threads = GET_THREADS(); const unsigned int blocks = GET_BLOCKS(threads, num_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( columns.scalar_type(), "compute_grad_input", ([&] { deformable_col2im_kernel<<<blocks, threads>>>( num_kernels, columns.data_ptr<scalar_t>(), offset.data_ptr<scalar_t>(), mask.data_ptr<scalar_t>(), channels, height, width, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, parallel_imgs, n_offset_grps, out_h, out_w, use_mask, grad_im.data_ptr<scalar_t>()); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in compute_grad_input: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> __device__ scalar_t get_coordinate_weight( const scalar_t* im_data, int height, int width, scalar_t y, scalar_t x, bool is_y_direction) { int y_l = floor(y); int x_l = floor(x); int y_h = y_l + 1; int x_h = x_l + 1; bool valid_y_l = 0 <= y_l && y_l < height; bool valid_y_h = 0 <= y_h && y_h < height; bool valid_x_l = 0 <= x_l && x_l < width; bool valid_x_h = 0 <= x_h && x_h < width; scalar_t zero = 0; scalar_t v_yx = (valid_y_l && valid_x_l) ? im_data[y_l * width + x_l] : zero; scalar_t v_yX = (valid_y_l && valid_x_h) ? im_data[y_l * width + x_h] : zero; scalar_t v_Yx = (valid_y_h && valid_x_l) ? im_data[y_h * width + x_l] : zero; scalar_t v_YX = (valid_y_h && valid_x_h) ? im_data[y_h * width + x_h] : zero; if (is_y_direction) { scalar_t dx = x - x_l; return dx * (v_YX - v_yX) + (1 - dx) * (v_Yx - v_yx); } else { scalar_t dy = y - y_l; return dy * (v_YX - v_Yx) + (1 - dy) * (v_yX - v_yx); } } template <typename scalar_t> __global__ void deformable_col2im_coord_kernel( int n, const scalar_t* col_ptr, const scalar_t* im_ptr, const scalar_t* offset_ptr, const scalar_t* mask_ptr, int channels, int height, int width, int weight_h, int weight_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int batch_sz, int offset_channels, int n_offset_grps, int out_h, int out_w, const bool use_mask, scalar_t* grad_offset, scalar_t* grad_mask) { CUDA_1D_KERNEL_LOOP(index, n) { scalar_t grad_offset_val = 0; scalar_t grad_mask_val = 0; int w = index % out_w; int h = (index / out_w) % out_h; int w_w = (index / (out_w * out_h * 2)) % weight_w; int w_h = (index / (out_w * out_h * 2 * weight_w)) % weight_h; int c = (index / (out_w * out_h)) % offset_channels; int b = index / (out_w * out_h * offset_channels); const int offset_grp = c / (2 * weight_h * weight_w); const int col_step = weight_h * weight_w; int c_per_offset_grp = channels / n_offset_grps; col_ptr += offset_grp * c_per_offset_grp * weight_h * weight_w * batch_sz * out_w * out_h; im_ptr += (b * n_offset_grps + offset_grp) * c_per_offset_grp * height * width; offset_ptr += (b * n_offset_grps + offset_grp) * 2 * weight_h * weight_w * out_h * out_w; if (use_mask) { mask_ptr += (b * n_offset_grps + offset_grp) * weight_h * weight_w * out_h * out_w; } const int offset_c = c - offset_grp * 2 * weight_h * weight_w; const bool is_y_direction = offset_c % 2 == 0; const int c_bound = c_per_offset_grp * weight_h * weight_w; for (int col_c = (offset_c / 2); col_c < c_bound; col_c += col_step) { const int col_pos = (((col_c * batch_sz + b) * out_h) + h) * out_w + w; int out_x = col_pos % out_w; int out_y = (col_pos / out_w) % out_h; int j = (col_pos / (out_w * out_h * batch_sz)) % weight_w; int i = (col_pos / (out_w * out_h * batch_sz * weight_w)) % weight_h; const int mask_idx = i * weight_w + j; const int offset_h_ptr = (((2 * mask_idx) * out_h + out_y) * out_w + out_x); const int offset_w_ptr = (((2 * mask_idx + 1) * out_h + out_y) * out_w + out_x); const scalar_t offset_h = offset_ptr[offset_h_ptr]; const scalar_t offset_w = offset_ptr[offset_w_ptr]; scalar_t mask_value = 1; if (use_mask) { mask_value = mask_ptr[(mask_idx * out_h + out_y) * out_w + out_x]; } scalar_t y = (out_y * stride_h - pad_h) + i * dilation_h + offset_h; scalar_t x = (out_x * stride_w - pad_w) + j * dilation_w + offset_w; const scalar_t weight = get_coordinate_weight(im_ptr, height, width, y, x, is_y_direction); grad_offset_val += mask_value * weight * col_ptr[col_pos]; if (use_mask && is_y_direction) { grad_mask_val += col_ptr[col_pos] * bilinear_interpolate(im_ptr, height, width, y, x); } im_ptr += height * width; } grad_offset[index] = grad_offset_val; if (use_mask && is_y_direction) { const int idx = ((((b * n_offset_grps + offset_grp) * weight_h + w_h) * weight_w + w_w) * out_h + h) * out_w + w; grad_mask[idx] = grad_mask_val; } } } void compute_grad_offset_and_mask( const at::Tensor& columns, const at::Tensor& input, const at::Tensor& offset, const at::Tensor& mask, int channels, int height, int width, int weight_h, int weight_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int parallel_imgs, int n_offset_grps, bool use_mask, at::Tensor grad_offset, at::Tensor grad_mask) { int out_h = (height + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; int out_w = (width + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; int num_kernels = out_h * out_w * 2 * weight_h * weight_w * n_offset_grps * parallel_imgs; const unsigned int threads = GET_THREADS(); const unsigned int blocks = GET_BLOCKS(threads, num_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( columns.scalar_type(), "compute_grad_offset_and_mask", ([&] { deformable_col2im_coord_kernel<<<blocks, threads>>>( num_kernels, columns.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), offset.data_ptr<scalar_t>(), mask.data_ptr<scalar_t>(), channels, height, width, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, parallel_imgs, 2 * weight_h * weight_w * n_offset_grps, n_offset_grps, out_h, out_w, use_mask, grad_offset.data_ptr<scalar_t>(), grad_mask.data_ptr<scalar_t>()); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in compute_grad_offset_and_mask: %s\n", cudaGetErrorString(err)); } } std::tuple<at::Tensor, at::Tensor, at::Tensor> backward_gradient_inputs( at::Tensor input, at::Tensor weight, at::Tensor offset, at::Tensor mask, at::Tensor grad_out, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int n_weight_grps, int n_offset_grps, int n_parallel_imgs, bool use_mask) { at::DeviceGuard guard(input.device()); int batch_sz = input.size(0); long n_in_channels = input.size(1); long in_h = input.size(2); long in_w = input.size(3); n_parallel_imgs = std::min(batch_sz, n_parallel_imgs); long n_out_channels = weight.size(0); int weight_h = weight.size(2); int weight_w = weight.size(3); long out_w = (in_w + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; long out_h = (in_h + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; auto grad_input = at::zeros_like(input); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); if (batch_sz == 0) { return std::make_tuple(grad_input, grad_offset, grad_mask); } auto columns = at::empty( {n_in_channels * weight_w * weight_h, n_parallel_imgs * out_h * out_w}, input.options()); // Separate into blocks grad_input = grad_input.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); input = input.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); grad_offset = grad_offset.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); offset = offset.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); if (use_mask) { grad_mask = grad_mask.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * weight_h * weight_w, out_h, out_w}); mask = mask.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * weight_h * weight_w, out_h, out_w}); } grad_out = grad_out .reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_weight_grps, n_out_channels / n_weight_grps, out_h, out_w}) .permute({0, 2, 3, 1, 4, 5}); weight = weight.reshape( {n_weight_grps, weight.size(0) / n_weight_grps, weight.size(1), weight.size(2), weight.size(3)}); columns = columns.view( {n_weight_grps, columns.size(0) / n_weight_grps, columns.size(1)}); for (int elt = 0; elt < batch_sz / n_parallel_imgs; elt++) { columns.zero_(); // Separate into weight groups for (int g = 0; g < n_weight_grps; g++) { columns[g] = columns[g].addmm_( weight[g].flatten(1).transpose(0, 1), grad_out[elt][g].flatten(1)); } compute_grad_offset_and_mask( columns, input[elt], offset[elt], mask[elt], n_in_channels, in_h, in_w, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, n_parallel_imgs, n_offset_grps, use_mask, grad_offset[elt], grad_mask[elt]); compute_grad_input( columns, offset[elt], mask[elt], n_in_channels, in_h, in_w, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, n_parallel_imgs, n_offset_grps, use_mask, grad_input[elt]); } grad_input = grad_input.view({batch_sz, n_in_channels, in_h, in_w}); grad_offset = grad_offset.view( {batch_sz, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); if (use_mask) { grad_mask = grad_mask.view( {batch_sz, n_offset_grps * weight_h * weight_w, out_h, out_w}); } return std::make_tuple(grad_input, grad_offset, grad_mask); } at::Tensor backward_gradient_parameters( at::Tensor input, const at::Tensor& weight, at::Tensor offset, at::Tensor mask, const at::Tensor& grad_out, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int n_weight_grps, int n_offset_grps, int n_parallel_imgs, bool use_mask) { at::DeviceGuard guard(input.device()); int batch_sz = input.size(0); long n_in_channels = input.size(1); long in_h = input.size(2); long in_w = input.size(3); n_parallel_imgs = std::min(batch_sz, n_parallel_imgs); long n_out_channels = weight.size(0); int weight_h = weight.size(2); int weight_w = weight.size(3); long out_h = grad_out.size(2); long out_w = grad_out.size(3); auto grad_weight = at::zeros_like(weight); if (batch_sz == 0) { return grad_weight; } at::Tensor grad_out_buf = grad_out .reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_weight_grps, n_out_channels / n_weight_grps, out_h, out_w}) .permute({0, 2, 3, 1, 4, 5}) .contiguous(); input = input.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); offset = offset.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); if (use_mask) { mask = mask.reshape( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * weight_h * weight_w, out_h, out_w}); } grad_weight = grad_weight.reshape( {n_weight_grps, grad_weight.size(0) / n_weight_grps, grad_weight.size(1), grad_weight.size(2), grad_weight.size(3)}); auto columns = at::empty( {n_weight_grps, n_in_channels * weight_w * weight_h / n_weight_grps, n_parallel_imgs * out_h * out_w}, input.options()); for (int elt = 0; elt < batch_sz / n_parallel_imgs; elt++) { deformable_im2col( input[elt], offset[elt], mask[elt], n_in_channels, in_h, in_w, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, out_h, out_w, n_parallel_imgs, n_offset_grps, use_mask, columns); for (int g = 0; g < n_weight_grps; g++) { grad_weight[g] = grad_weight[g] .flatten(1) .addmm_( grad_out_buf[elt][g].flatten(1), columns[g].transpose(1, 0)) .view_as(grad_weight[g]); } } grad_weight = grad_weight.view( {grad_weight.size(0) * grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4)}); return grad_weight; } at::Tensor deform_conv2d_forward_kernel( const at::Tensor& input, const at::Tensor& weight, const at::Tensor& offset, const at::Tensor& mask, const at::Tensor& bias, int64_t stride_h, int64_t stride_w, int64_t pad_h, int64_t pad_w, int64_t dilation_h, int64_t dilation_w, int64_t n_weight_grps, int64_t n_offset_grps, bool use_mask) { at::Tensor input_c = input.contiguous(); at::Tensor offset_c = offset.contiguous(); at::Tensor weight_c = weight.contiguous(); at::Tensor mask_c = mask.contiguous(); at::Tensor bias_c = bias.contiguous(); TORCH_CHECK(input_c.ndimension() == 4); TORCH_CHECK(offset_c.ndimension() == 4); TORCH_CHECK(!use_mask || mask_c.ndimension() == 4); TORCH_CHECK(weight_c.ndimension() == 4); TORCH_CHECK(input_c.is_cuda(), "input must be a CUDA tensor"); at::DeviceGuard guard(input_c.device()); int batch_sz = input_c.size(0); int in_channels = input_c.size(1); int in_h = input_c.size(2); int in_w = input_c.size(3); int n_parallel_imgs = get_greatest_divisor_below_bound(batch_sz, kMaxParallelImgs); int out_channels = weight_c.size(0); int weight_h = weight_c.size(2); int weight_w = weight_c.size(3); int ker_h = dilation_h * (weight_h - 1) + 1; int ker_w = dilation_w * (weight_w - 1) + 1; int out_h = ((in_h + 2 * pad_h - ker_h) / stride_h) + 1; int out_w = ((in_w + 2 * pad_w - ker_w) / stride_w) + 1; TORCH_CHECK( weight_h > 0 && weight_w > 0, "weight_h: ", weight_h, " weight_w: ", weight_w); TORCH_CHECK( stride_h > 0 && stride_w > 0, "stride_h: ", stride_h, " stride_w: ", stride_w); TORCH_CHECK(pad_h >= 0 && pad_w >= 0, "pad_h: ", pad_h, " pad_w: ", pad_w); TORCH_CHECK( dilation_h > 0 && dilation_w > 0, "dilation_h: ", dilation_h, " dilation_w: ", dilation_w); TORCH_CHECK(weight_c.size(1) * n_weight_grps == input_c.size(1)); TORCH_CHECK(weight_c.size(0) % n_weight_grps == 0); TORCH_CHECK( (offset_c.size(1) == n_offset_grps * 2 * weight_h * weight_w), "offset.shape[1] is not valid: got: ", offset_c.size(1), " expected: ", n_offset_grps * 2 * weight_h * weight_w); TORCH_CHECK( (!use_mask || mask_c.size(1) == n_offset_grps * weight_h * weight_w), "mask.shape[1] is not valid: got: ", mask_c.size(1), " expected: ", n_offset_grps * weight_h * weight_w); TORCH_CHECK(input_c.size(1) % n_offset_grps == 0); TORCH_CHECK( (offset_c.size(0) == input_c.size(0)), "invalid batch size of offset"); TORCH_CHECK( (offset_c.size(2) == out_h && offset_c.size(3) == out_w), "offset output dims: (", offset_c.size(2), ", ", offset_c.size(3), ") - ", "computed output dims: (", out_h, ", ", out_w, ")"); TORCH_CHECK( (mask_c.size(0) == input_c.size(0)), "invalid batch size of mask"); TORCH_CHECK( (!use_mask || (mask_c.size(2) == out_h && mask_c.size(3) == out_w)), "mask output dims: (", mask_c.size(2), ", ", mask_c.size(3), ") - ", "computed output dims: (", out_h, ", ", out_w, ")"); TORCH_CHECK( out_h > 0 && out_w > 0, "Calculated output size too small - out_h: ", out_h, " out_w: ", out_w); auto out = at::zeros({batch_sz, out_channels, out_h, out_w}, input_c.options()); if (batch_sz == 0) { return out; } // Separate batches into blocks out = out.view( {batch_sz / n_parallel_imgs, n_parallel_imgs, out_channels, out_h, out_w}); input_c = input_c.view( {batch_sz / n_parallel_imgs, n_parallel_imgs, in_channels, in_h, in_w}); offset_c = offset_c.view( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); if (use_mask) { mask_c = mask_c.view( {batch_sz / n_parallel_imgs, n_parallel_imgs, n_offset_grps * weight_h * weight_w, out_h, out_w}); } at::Tensor out_buf = at::zeros( {batch_sz / n_parallel_imgs, out_channels, n_parallel_imgs * out_h, out_w}, out.options()); // Separate channels into convolution groups out_buf = out_buf.view( {out_buf.size(0), n_weight_grps, out_buf.size(1) / n_weight_grps, out_buf.size(2), out_buf.size(3)}); weight_c = weight_c.view( {n_weight_grps, weight_c.size(0) / n_weight_grps, weight_c.size(1), weight_c.size(2), weight_c.size(3)}); // Sample points and perform convolution auto columns = at::zeros( {in_channels * weight_h * weight_w, n_parallel_imgs * out_h * out_w}, input_c.options()); for (int b = 0; b < batch_sz / n_parallel_imgs; b++) { deformable_im2col( input_c[b], offset_c[b], mask_c[b], in_channels, in_h, in_w, weight_h, weight_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, out_h, out_w, n_parallel_imgs, n_offset_grps, use_mask, columns); columns = columns.view( {n_weight_grps, columns.size(0) / n_weight_grps, columns.size(1)}); for (int g = 0; g < n_weight_grps; g++) { out_buf[b][g] = out_buf[b][g] .flatten(1) .addmm_(weight_c[g].flatten(1), columns[g]) .view_as(out_buf[b][g]); } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); } out_buf = out_buf.view( {batch_sz / n_parallel_imgs, out_channels, n_parallel_imgs, out_h, out_w}); out_buf.transpose_(1, 2); out.copy_(out_buf); out = out.view({batch_sz, out_channels, out_h, out_w}); return out + bias_c.view({1, out_channels, 1, 1}); } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor> deform_conv2d_backward_kernel( const at::Tensor& grad_out, const at::Tensor& input, const at::Tensor& weight, const at::Tensor& offset, const at::Tensor& mask, const at::Tensor& bias, int64_t stride_h, int64_t stride_w, int64_t pad_h, int64_t pad_w, int64_t dilation_h, int64_t dilation_w, int64_t n_weight_grps, int64_t n_offset_grps, bool use_mask) { at::Tensor grad_out_c = grad_out.contiguous(); at::Tensor input_c = input.contiguous(); at::Tensor weight_c = weight.contiguous(); at::Tensor offset_c = offset.contiguous(); at::Tensor mask_c = mask.contiguous(); at::Tensor bias_c = bias.contiguous(); const int batch_sz = input_c.size(0); const int n_parallel_imgs = get_greatest_divisor_below_bound(batch_sz, kMaxParallelImgs); auto grad_input_and_offset_and_mask = backward_gradient_inputs( input_c, weight_c, offset_c, mask_c, grad_out_c, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, n_weight_grps, n_offset_grps, n_parallel_imgs, use_mask); auto grad_input = std::get<0>(grad_input_and_offset_and_mask); auto grad_offset = std::get<1>(grad_input_and_offset_and_mask); auto grad_mask = std::get<2>(grad_input_and_offset_and_mask); auto grad_weight = backward_gradient_parameters( input_c, weight_c, offset_c, mask_c, grad_out_c, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, n_weight_grps, n_offset_grps, n_parallel_imgs, use_mask); auto value = grad_out_c.sum({0, 2, 3}); auto grad_bias = at::ones_like(bias_c) * value; return std::make_tuple( grad_input, grad_weight, grad_offset, grad_mask, grad_bias); } } // namespace TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { m.impl( TORCH_SELECTIVE_NAME("torchvision::deform_conv2d"), TORCH_FN(deform_conv2d_forward_kernel)); m.impl( TORCH_SELECTIVE_NAME("torchvision::_deform_conv2d_backward"), TORCH_FN(deform_conv2d_backward_kernel)); } } // namespace ops } // namespace vision
813c991424a76c8924f5878a230f8554dfa030a6.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <stdio.h> #include <stdint.h> #include <zlib.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <fcntl.h> #include <limits.h> typedef struct __ReadSeqList { char* sequence; unsigned int length; struct __ReadSeqList* next; } ReadSeqList; typedef struct HashTable { unsigned int bits; unsigned int count; unsigned int read_count; unsigned int read_length; unsigned long long int *keys; unsigned int *values; } HashTable; HashTable* HashTable_init(unsigned int bits, unsigned int read_count, unsigned int read_length){ HashTable *ht; ht = (HashTable*)calloc(1, sizeof(HashTable)); ht->read_count = read_count; ht->read_length = read_length; ht->bits = bits; ht->count = 0; return ht; } void HashTable_destory(HashTable *ht) { if (!ht) return; free(ht); } __device__ const unsigned char seq_nt4_table[256] = { // translate ACGT to 0123 0, 1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }; // funcion para calcular un hash de 64 bits __device__ unsigned int hash_uint64(unsigned long long int key) { key = ~key + (key << 21); key = key ^ key >> 24; key = (key + (key << 3)) + (key << 8); key = key ^ key >> 14; key = (key + (key << 2)) + (key << 4); key = key ^ key >> 28; key = key + (key << 31); return (unsigned int)key; } __device__ unsigned int h2b(unsigned int hash, unsigned int bits) { return hash * 2654435769U >> (32 - bits); } __device__ void hash_insert(HashTable *ht, unsigned long long int kmer) { unsigned int iKey, last; bool end = false; iKey = last = h2b(hash_uint64(kmer), ht->bits); while (true) { unsigned long long int prev = atomicCAS(&(ht->keys[iKey]), 0ULL, kmer); if (prev == 0ULL || prev == kmer) { atomicAdd(&(ht->values[iKey]), 1U); return; } if(end) return; // Collition: Open addressing iKey = (iKey + 1U) & ((1U << ht->bits) - 1); // loop back end = (iKey == last); } } // insert k-mers in $seq to hash table $ht __global__ void kernel_count_seq_kmers(HashTable *ht, int k, char *d_reads) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < ht->read_count) { unsigned int i, l; unsigned int len = ht->read_length; unsigned long long int x[2], mask = (1ULL<<k*2) - 1, shift = (k - 1) * 2; for (i = l = 0, x[0] = x[1] = 0; i < len; ++i) { int c = seq_nt4_table[(unsigned char)d_reads[(tid*len)+i]]; if (c < 4) { // not an "N" base x[0] = (x[0] << 2 | c) & mask; // forward strand x[1] = x[1] >> 2 | (unsigned long long int)(3 - c) << shift; // reverse strand if (++l >= k) { // we find a k-mer unsigned long long int kmer = x[0] < x[1]? x[0] : x[1]; hash_insert(ht, kmer); // only add one strand! } } else l = 0, x[0] = x[1] = 0; // if there is an "N", restart } } } __global__ void kernel_print_hist(const HashTable *ht, unsigned int *cnt_d) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int pos; if(tid < (1U << ht->bits)) { if (ht->values[tid] > 0) { pos = ht->values[tid] < 256U ? ht->values[tid] : 255; atomicAdd(&(cnt_d[pos]), 1U); } } } static int count_file(const char *fn, int k, unsigned int p) { HashTable *ht; unsigned int i; unsigned int capacity = 1U << p; unsigned int cnt[256]; unsigned int read_count = 0; unsigned int read_length = 0; unsigned int fullength = 0; char *reads; // variables para cuda HashTable *ht_d; char *reads_d; unsigned int *cnt_d; unsigned long long int *keys_d; unsigned int *values_d; FILE * fp; char * line = NULL; size_t len = 0; ssize_t read; fp = fopen(fn, "r"); if (fp == NULL) exit(EXIT_FAILURE); ReadSeqList *current, *head; head = current = NULL; while ((read = getline(&line, &len, fp)) != -1) { read_count++; line[strcspn(line, "\n")] = 0; ReadSeqList *node = (ReadSeqList*)malloc(sizeof(ReadSeqList)); node->sequence = (char*)malloc(strlen(line)); strcpy(node->sequence, line); node->length = strlen(line); node->next = NULL; fullength += strlen(line); if(head == NULL){ current = head = node; } else { current = current->next = node; } } fclose(fp); if (line) free(line); read_length = head->length; // almacenar los caracteres en una array 1D reads = (char*)malloc(read_length * read_count * sizeof(char)); for(i=0, current = head; current; current=current->next){ memcpy(reads + (i * read_length), current->sequence, read_length); i++; } // inicializar hashtable ht = HashTable_init(p, read_count, read_length); printf("read count: %d\t read length: %d\t avg. length: %d\n", read_count, read_length, fullength/read_count); // allocate memory in device hipMalloc((void **)&ht_d, sizeof(HashTable)); hipMalloc((void **)&reads_d, read_length * read_count * sizeof(char)); hipMalloc((void **)&keys_d, capacity * sizeof(unsigned long long int)); hipMalloc((void **)&values_d, capacity * sizeof(unsigned int)); hipMalloc((void **)&cnt_d, 256 * sizeof(unsigned int)); // initialize values hipMemset(keys_d, 0ULL, capacity * sizeof(unsigned long long int)); hipMemset(values_d, 0, capacity * sizeof(unsigned int)); hipMemset(cnt_d, 0, 256 * sizeof(unsigned int)); // copy data to device ht->keys = keys_d; ht->values = values_d; hipMemcpy(ht_d, ht, sizeof(HashTable), hipMemcpyHostToDevice); hipMemcpy(reads_d, reads, read_length * read_count * sizeof (char), hipMemcpyHostToDevice); // invocar kernels unsigned int thr = 1024; hipLaunchKernelGGL(( kernel_count_seq_kmers), dim3(ceil(read_count/(float)thr)), dim3(thr), 0, 0, ht_d, k, reads_d); hipDeviceSynchronize(); hipLaunchKernelGGL(( kernel_print_hist), dim3(ceil(capacity/(float)thr)), dim3(thr), 0, 0, ht_d, cnt_d); hipDeviceSynchronize(); // copy data from device hipMemcpy(ht, ht_d, sizeof(HashTable), hipMemcpyDeviceToHost); hipMemcpy(ht->keys, keys_d, capacity * sizeof(unsigned long long int), hipMemcpyDeviceToHost); hipMemcpy(ht->values, values_d, capacity * sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemcpy(cnt, cnt_d, 256 * sizeof(unsigned int), hipMemcpyDeviceToHost); printf("read count: %d\t read length: %d\t avg. length: %d\n", read_count, read_length, fullength/read_count); printf("COUNT: %d\n\n", ht->count); // print histogram for (i = 1; i < 256; ++i) printf("%d\t%d\n", i, cnt[i]); // limpieza hipFree(reads_d); hipFree(ht_d); hipFree(cnt_d); hipFree(keys_d); hipFree(values_d); // limpieza for(current = head; current; current=current->next){ free(current->sequence); free(current); } free(reads); HashTable_destory(ht); return 0; } int main(int argc, char *argv[]) { int k = 31; unsigned int p = 27; k = (int)strtol(argv[1], NULL, 10); p = (unsigned int)strtol(argv[2], NULL, 10); count_file(argv[3], k, p); return 0; }
813c991424a76c8924f5878a230f8554dfa030a6.cu
#include <cuda.h> #include <cuda_fp16.h> #include <stdio.h> #include <stdint.h> #include <zlib.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <fcntl.h> #include <limits.h> typedef struct __ReadSeqList { char* sequence; unsigned int length; struct __ReadSeqList* next; } ReadSeqList; typedef struct HashTable { unsigned int bits; unsigned int count; unsigned int read_count; unsigned int read_length; unsigned long long int *keys; unsigned int *values; } HashTable; HashTable* HashTable_init(unsigned int bits, unsigned int read_count, unsigned int read_length){ HashTable *ht; ht = (HashTable*)calloc(1, sizeof(HashTable)); ht->read_count = read_count; ht->read_length = read_length; ht->bits = bits; ht->count = 0; return ht; } void HashTable_destory(HashTable *ht) { if (!ht) return; free(ht); } __device__ const unsigned char seq_nt4_table[256] = { // translate ACGT to 0123 0, 1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }; // funcion para calcular un hash de 64 bits __device__ unsigned int hash_uint64(unsigned long long int key) { key = ~key + (key << 21); key = key ^ key >> 24; key = (key + (key << 3)) + (key << 8); key = key ^ key >> 14; key = (key + (key << 2)) + (key << 4); key = key ^ key >> 28; key = key + (key << 31); return (unsigned int)key; } __device__ unsigned int h2b(unsigned int hash, unsigned int bits) { return hash * 2654435769U >> (32 - bits); } __device__ void hash_insert(HashTable *ht, unsigned long long int kmer) { unsigned int iKey, last; bool end = false; iKey = last = h2b(hash_uint64(kmer), ht->bits); while (true) { unsigned long long int prev = atomicCAS(&(ht->keys[iKey]), 0ULL, kmer); if (prev == 0ULL || prev == kmer) { atomicAdd(&(ht->values[iKey]), 1U); return; } if(end) return; // Collition: Open addressing iKey = (iKey + 1U) & ((1U << ht->bits) - 1); // loop back end = (iKey == last); } } // insert k-mers in $seq to hash table $ht __global__ void kernel_count_seq_kmers(HashTable *ht, int k, char *d_reads) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < ht->read_count) { unsigned int i, l; unsigned int len = ht->read_length; unsigned long long int x[2], mask = (1ULL<<k*2) - 1, shift = (k - 1) * 2; for (i = l = 0, x[0] = x[1] = 0; i < len; ++i) { int c = seq_nt4_table[(unsigned char)d_reads[(tid*len)+i]]; if (c < 4) { // not an "N" base x[0] = (x[0] << 2 | c) & mask; // forward strand x[1] = x[1] >> 2 | (unsigned long long int)(3 - c) << shift; // reverse strand if (++l >= k) { // we find a k-mer unsigned long long int kmer = x[0] < x[1]? x[0] : x[1]; hash_insert(ht, kmer); // only add one strand! } } else l = 0, x[0] = x[1] = 0; // if there is an "N", restart } } } __global__ void kernel_print_hist(const HashTable *ht, unsigned int *cnt_d) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int pos; if(tid < (1U << ht->bits)) { if (ht->values[tid] > 0) { pos = ht->values[tid] < 256U ? ht->values[tid] : 255; atomicAdd(&(cnt_d[pos]), 1U); } } } static int count_file(const char *fn, int k, unsigned int p) { HashTable *ht; unsigned int i; unsigned int capacity = 1U << p; unsigned int cnt[256]; unsigned int read_count = 0; unsigned int read_length = 0; unsigned int fullength = 0; char *reads; // variables para cuda HashTable *ht_d; char *reads_d; unsigned int *cnt_d; unsigned long long int *keys_d; unsigned int *values_d; FILE * fp; char * line = NULL; size_t len = 0; ssize_t read; fp = fopen(fn, "r"); if (fp == NULL) exit(EXIT_FAILURE); ReadSeqList *current, *head; head = current = NULL; while ((read = getline(&line, &len, fp)) != -1) { read_count++; line[strcspn(line, "\n")] = 0; ReadSeqList *node = (ReadSeqList*)malloc(sizeof(ReadSeqList)); node->sequence = (char*)malloc(strlen(line)); strcpy(node->sequence, line); node->length = strlen(line); node->next = NULL; fullength += strlen(line); if(head == NULL){ current = head = node; } else { current = current->next = node; } } fclose(fp); if (line) free(line); read_length = head->length; // almacenar los caracteres en una array 1D reads = (char*)malloc(read_length * read_count * sizeof(char)); for(i=0, current = head; current; current=current->next){ memcpy(reads + (i * read_length), current->sequence, read_length); i++; } // inicializar hashtable ht = HashTable_init(p, read_count, read_length); printf("read count: %d\t read length: %d\t avg. length: %d\n", read_count, read_length, fullength/read_count); // allocate memory in device cudaMalloc((void **)&ht_d, sizeof(HashTable)); cudaMalloc((void **)&reads_d, read_length * read_count * sizeof(char)); cudaMalloc((void **)&keys_d, capacity * sizeof(unsigned long long int)); cudaMalloc((void **)&values_d, capacity * sizeof(unsigned int)); cudaMalloc((void **)&cnt_d, 256 * sizeof(unsigned int)); // initialize values cudaMemset(keys_d, 0ULL, capacity * sizeof(unsigned long long int)); cudaMemset(values_d, 0, capacity * sizeof(unsigned int)); cudaMemset(cnt_d, 0, 256 * sizeof(unsigned int)); // copy data to device ht->keys = keys_d; ht->values = values_d; cudaMemcpy(ht_d, ht, sizeof(HashTable), cudaMemcpyHostToDevice); cudaMemcpy(reads_d, reads, read_length * read_count * sizeof (char), cudaMemcpyHostToDevice); // invocar kernels unsigned int thr = 1024; kernel_count_seq_kmers<<<ceil(read_count/(float)thr), thr>>>(ht_d, k, reads_d); cudaDeviceSynchronize(); kernel_print_hist<<<ceil(capacity/(float)thr), thr>>>(ht_d, cnt_d); cudaDeviceSynchronize(); // copy data from device cudaMemcpy(ht, ht_d, sizeof(HashTable), cudaMemcpyDeviceToHost); cudaMemcpy(ht->keys, keys_d, capacity * sizeof(unsigned long long int), cudaMemcpyDeviceToHost); cudaMemcpy(ht->values, values_d, capacity * sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(cnt, cnt_d, 256 * sizeof(unsigned int), cudaMemcpyDeviceToHost); printf("read count: %d\t read length: %d\t avg. length: %d\n", read_count, read_length, fullength/read_count); printf("COUNT: %d\n\n", ht->count); // print histogram for (i = 1; i < 256; ++i) printf("%d\t%d\n", i, cnt[i]); // limpieza cudaFree(reads_d); cudaFree(ht_d); cudaFree(cnt_d); cudaFree(keys_d); cudaFree(values_d); // limpieza for(current = head; current; current=current->next){ free(current->sequence); free(current); } free(reads); HashTable_destory(ht); return 0; } int main(int argc, char *argv[]) { int k = 31; unsigned int p = 27; k = (int)strtol(argv[1], NULL, 10); p = (unsigned int)strtol(argv[2], NULL, 10); count_file(argv[3], k, p); return 0; }
c4de0def0dd4e77e7c140821e2cad3034ee5cb05.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //====================================== // // GPU //====================================== #include"stdafx.h" #include"FullyConnect_DATA.hpp" #include"FullyConnect_FUNC.hpp" #include"FullyConnect_Base.h" #include"FullyConnect_GPU.cuh" #include"FullyConnect_LayerData_GPU.cuh" using namespace Gravisbell; using namespace Gravisbell::Layer::NeuralNetwork; #define BLOCK_SIZE (16) namespace { /** . */ __global__ void cuda_func_multiplVector(const F32* i_lpInputBufferA, const F32* i_lpInputBufferB, F32* o_lpOutputBuffer, U32 i_bufferSize) { const U32 bufferPos = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(bufferPos >= i_bufferSize) // warp... return; o_lpOutputBuffer[bufferPos] = i_lpInputBufferA[bufferPos] * i_lpInputBufferB[bufferPos]; } /** . */ __global__ void cuda_func_multiplVectorWithScaler(const F32* i_lpInputBufferA, const F32* i_lpInputBufferB, F32* o_lpOutputBuffer, U32 i_bufferSize, F32 alpha, F32 beta) { const U32 bufferPos = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(bufferPos >= i_bufferSize) // warp... return; o_lpOutputBuffer[bufferPos] = alpha * i_lpInputBufferA[bufferPos] * i_lpInputBufferB[bufferPos] + beta * o_lpOutputBuffer[bufferPos]; } } namespace Gravisbell { namespace Layer { namespace NeuralNetwork { /** */ FullyConnect_GPU::FullyConnect_GPU(Gravisbell::GUID guid, FullyConnect_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager) : FullyConnect_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1)) , layerData (i_layerData) /**< */ , inputBufferCount (0) /**< */ , neuronCount (0) /**< */ , outputBufferCount (0) /**< */ { hipblasCreate(&cublasHandle); } /** */ FullyConnect_GPU::~FullyConnect_GPU() { hipblasDestroy(cublasHandle); } //================================ // //================================ /** */ U32 FullyConnect_GPU::GetLayerKind()const { return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase(); } /** . @return 0 */ ErrorCode FullyConnect_GPU::Initialize(void) { return this->layerData.Initialize(); } //=========================== // //=========================== /** */ FullyConnect_LayerData_Base& FullyConnect_GPU::GetLayerData() { return this->layerData; } const FullyConnect_LayerData_Base& FullyConnect_GPU::GetLayerData()const { return this->layerData; } //================================ // //================================ /** .() @param batchSize . NN. PreProcessLearnLoop. */ ErrorCode FullyConnect_GPU::PreProcessLearn() { ErrorCode errorCode = this->PreProcessCalculate(); if(errorCode != ErrorCode::ERROR_CODE_NONE) return errorCode; // lpBiasUpdateVector_d.resize(this->GetBatchSize()); { thrust::host_vector<F32> lpBuf(this->GetBatchSize(), 1.0f); this->lpBiasUpdateVector_d = lpBuf; } // this->lpDBias.resize(this->neuronCount); this->lpDNeuron.resize(this->neuronCount * this->inputBufferCount); return ErrorCode::ERROR_CODE_NONE; } /** .() @param batchSize . NN. Calculate. */ ErrorCode FullyConnect_GPU::PreProcessCalculate() { // this->inputBufferCount = this->GetInputBufferCount(); if(this->inputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT; // this->neuronCount = this->GetNeuronCount(); if(this->neuronCount == 0) return ErrorCode::ERROR_CODE_FRAUD_NEURON_COUNT; // this->outputBufferCount = this->GetOutputBufferCount(); if(this->outputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT; return ErrorCode::ERROR_CODE_NONE; } /** . Calculate. */ ErrorCode FullyConnect_GPU::PreProcessLoop() { return ErrorCode::ERROR_CODE_NONE; } /** . @param lpInputBuffer . GetInputBufferCount @return 0 */ ErrorCode FullyConnect_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer) { if(this->GetProcessType() == ProcessType::PROCESSTYPE_LEARN && this->GetRuntimeParameterByStructure().UpdateWeigthWithOutputVariance) { // CPU. // 1 // U32 PROCTIME_MAX = 5; // F32 VARIANCE_TOLERANCE = 0.1f; // () std::vector<F32> lpTmpOutputBuffer(this->GetBatchSize() * this->outputBufferCount); // thrust::device_vector<F32> lpTmpWeight_d(this->layerData.pWeightData->GetWeigthSize()); thrust::device_vector<F32> lpTmpBias_d(this->layerData.pWeightData->GetBiasSize()); // hipMemcpy(thrust::raw_pointer_cast(&lpTmpWeight_d[0]), this->layerData.pWeightData->GetWeight(), sizeof(F32)*lpTmpWeight_d.size(), hipMemcpyDeviceToDevice); hipMemcpy(thrust::raw_pointer_cast(&lpTmpBias_d[0]), this->layerData.pWeightData->GetBias(), sizeof(F32)*lpTmpBias_d.size(), hipMemcpyDeviceToDevice); U32 procTime = 0; do { // ErrorCode err = this->CalculateBase(i_lppInputBuffer, o_lppOutputBuffer, thrust::raw_pointer_cast(&lpTmpWeight_d[0]), thrust::raw_pointer_cast(&lpTmpBias_d[0])); if(err != ErrorCode::ERROR_CODE_NONE) return err; // hipMemcpy(&lpTmpOutputBuffer[0], &o_lppOutputBuffer[0], sizeof(F32)*lpTmpOutputBuffer.size(), hipMemcpyDeviceToHost); // F32 variance = 0.0f; F32 average = 0.0f; { // for(U32 outputNum=0; outputNum<lpTmpOutputBuffer.size(); outputNum++) { average += lpTmpOutputBuffer[outputNum]; } average /= lpTmpOutputBuffer.size(); // for(U32 outputNum=0; outputNum<lpTmpOutputBuffer.size(); outputNum++) { variance += (lpTmpOutputBuffer[outputNum] - average) * (lpTmpOutputBuffer[outputNum] - average); } variance /= lpTmpOutputBuffer.size(); } if( abs(variance - 1.0f) < VARIANCE_TOLERANCE) break; // F32 deviation = sqrtf(variance); { thrust::host_vector<F32> lpTmpNeuron = lpTmpWeight_d; thrust::host_vector<F32> lpTmpBias = lpTmpBias_d; for(U32 neuronNum=0; neuronNum<lpTmpNeuron.size(); neuronNum++) { lpTmpNeuron[neuronNum] /= deviation; } for(U32 neuronNum=0; neuronNum<lpTmpBias.size(); neuronNum++) { lpTmpBias[neuronNum] /= deviation; } lpTmpWeight_d = lpTmpNeuron; lpTmpBias_d = lpTmpBias; } procTime++; }while(procTime < 5); // this->layerData.pWeightData->SetData(thrust::raw_pointer_cast(&lpTmpWeight_d[0]), thrust::raw_pointer_cast(&lpTmpBias_d[0])); } else { ErrorCode err = this->CalculateBase(i_lppInputBuffer, o_lppOutputBuffer, this->layerData.pWeightData->GetWeight(), this->layerData.pWeightData->GetBias()); if(err != ErrorCode::ERROR_CODE_NONE) return err; } return ErrorCode::ERROR_CODE_NONE; } ErrorCode FullyConnect_GPU::CalculateBase(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer, const F32* lpWeight, const F32* lpBias) { // { for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++) { hipError_t err = hipMemcpy( &o_lppOutputBuffer[batchNum * this->outputBufferCount], lpBias, sizeof(F32) * this->neuronCount, hipMemcpyDeviceToDevice); if(err != 0) return ERROR_CODE_CUDA_COPY_MEMORY; } } // T { // C = aAB + bC; // CUBLAS // 0, 4, 8 // 1, 5, 9 // 2, 6, 10 // 3, 7, 11 // F32 alpha = 1.0f; F32 beta = 1.0f; // C1.0 hipblasSgemm( this->cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, this->neuronCount, // A this->GetBatchSize(), // B this->inputBufferCount, // A,B &alpha, lpWeight, // A this->inputBufferCount, // A i_lppInputBuffer, // B this->inputBufferCount, // B &beta, &o_lppOutputBuffer[0], this->outputBufferCount); } return ErrorCode::ERROR_CODE_NONE; } //================================ // //================================ /** .. Calculate. @param o_lppDInputBuffer . [GetBatchSize()][GetInputBufferCount()]. @param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()]. */ ErrorCode FullyConnect_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { // if(o_lppDInputBuffer) { F32 alpha = 1.0f; F32 beta = 0.0f; hipblasSgemm( this->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, this->inputBufferCount, // A this->GetBatchSize(), // B this->neuronCount, // A,B &alpha, this->layerData.pWeightData->GetWeight(), // A this->inputBufferCount, // A i_lppDOutputBuffer, // B this->neuronCount, // B &beta, o_lppDInputBuffer, this->inputBufferCount); } return ErrorCode::ERROR_CODE_NONE; } /** . Calculate. @param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()]. */ ErrorCode FullyConnect_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { std::vector<F32> lpDOutputBuffer_h(this->outputBufferCount * this->GetBatchSize()); hipMemcpy(&lpDOutputBuffer_h[0], i_lppDOutputBuffer, sizeof(F32)*lpDOutputBuffer_h.size(), hipMemcpyDeviceToHost); std::vector<F32> lpInputBuffer_h(this->inputBufferCount * this->GetBatchSize()); hipMemcpy(&lpInputBuffer_h[0], i_lppInputBuffer, sizeof(F32)*lpInputBuffer_h.size(), hipMemcpyDeviceToHost); // Gravisbell::ErrorCode errCode = this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer); if(errCode != ErrorCode::ERROR_CODE_NONE) return errCode; // { F32 alpha = 1.0f; F32 beta = 0; hipblasSgemm( this->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, this->neuronCount, // A 1, // B this->GetBatchSize(), // A,B &alpha, i_lppDOutputBuffer, // A this->neuronCount, // A thrust::raw_pointer_cast(&this->lpBiasUpdateVector_d[0]), // B this->GetBatchSize(), // B &beta, thrust::raw_pointer_cast(&this->lpDBias[0]), this->neuronCount); } // { // { F32 alpha = 1.0f; F32 beta = 0; hipblasSgemm( this->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, this->inputBufferCount, // A this->neuronCount, // B this->GetBatchSize(), // A,B &alpha, i_lppInputBuffer, // A this->inputBufferCount, // A i_lppDOutputBuffer, // B this->neuronCount, // B &beta, thrust::raw_pointer_cast(&this->lpDNeuron[0]), this->inputBufferCount); } } // this->layerData.pWeightData->UpdateData(thrust::raw_pointer_cast(&this->lpDNeuron[0]), thrust::raw_pointer_cast(&this->lpDBias[0])); return ErrorCode::ERROR_CODE_NONE; } } // Gravisbell; } // Layer; } // NeuralNetwork;
c4de0def0dd4e77e7c140821e2cad3034ee5cb05.cu
//====================================== // フィードフォワードニューラルネットワークの統合処理レイヤー // GPU処理用 //====================================== #include"stdafx.h" #include"FullyConnect_DATA.hpp" #include"FullyConnect_FUNC.hpp" #include"FullyConnect_Base.h" #include"FullyConnect_GPU.cuh" #include"FullyConnect_LayerData_GPU.cuh" using namespace Gravisbell; using namespace Gravisbell::Layer::NeuralNetwork; #define BLOCK_SIZE (16) namespace { /** ベクトルの要素同士の掛け算. */ __global__ void cuda_func_multiplVector(const F32* i_lpInputBufferA, const F32* i_lpInputBufferB, F32* o_lpOutputBuffer, U32 i_bufferSize) { const U32 bufferPos = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(bufferPos >= i_bufferSize) // 分岐するが末尾のwarpだけなので、処理速度に影響はないはず... return; o_lpOutputBuffer[bufferPos] = i_lpInputBufferA[bufferPos] * i_lpInputBufferB[bufferPos]; } /** ベクトルの要素同士の掛け算. */ __global__ void cuda_func_multiplVectorWithScaler(const F32* i_lpInputBufferA, const F32* i_lpInputBufferB, F32* o_lpOutputBuffer, U32 i_bufferSize, F32 alpha, F32 beta) { const U32 bufferPos = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(bufferPos >= i_bufferSize) // 分岐するが末尾のwarpだけなので、処理速度に影響はないはず... return; o_lpOutputBuffer[bufferPos] = alpha * i_lpInputBufferA[bufferPos] * i_lpInputBufferB[bufferPos] + beta * o_lpOutputBuffer[bufferPos]; } } namespace Gravisbell { namespace Layer { namespace NeuralNetwork { /** コンストラクタ */ FullyConnect_GPU::FullyConnect_GPU(Gravisbell::GUID guid, FullyConnect_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager) : FullyConnect_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1)) , layerData (i_layerData) /**< レイヤーデータ */ , inputBufferCount (0) /**< 入力バッファ数 */ , neuronCount (0) /**< ニューロン数 */ , outputBufferCount (0) /**< 出力バッファ数 */ { cublasCreate(&cublasHandle); } /** デストラクタ */ FullyConnect_GPU::~FullyConnect_GPU() { cublasDestroy(cublasHandle); } //================================ // 基本処理 //================================ /** レイヤー種別の取得 */ U32 FullyConnect_GPU::GetLayerKind()const { return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase(); } /** 初期化. 各ニューロンの値をランダムに初期化 @return 成功した場合0 */ ErrorCode FullyConnect_GPU::Initialize(void) { return this->layerData.Initialize(); } //=========================== // レイヤーデータ関連 //=========================== /** レイヤーデータを取得する */ FullyConnect_LayerData_Base& FullyConnect_GPU::GetLayerData() { return this->layerData; } const FullyConnect_LayerData_Base& FullyConnect_GPU::GetLayerData()const { return this->layerData; } //================================ // 演算処理 //================================ /** 演算前処理を実行する.(学習用) @param batchSize 同時に演算を行うバッチのサイズ. NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない. 失敗した場合はPreProcessLearnLoop以降の処理は実行不可. */ ErrorCode FullyConnect_GPU::PreProcessLearn() { ErrorCode errorCode = this->PreProcessCalculate(); if(errorCode != ErrorCode::ERROR_CODE_NONE) return errorCode; // バイアス更新用のベクトルを作成 lpBiasUpdateVector_d.resize(this->GetBatchSize()); { thrust::host_vector<F32> lpBuf(this->GetBatchSize(), 1.0f); this->lpBiasUpdateVector_d = lpBuf; } // パラメータ変化量 this->lpDBias.resize(this->neuronCount); this->lpDNeuron.resize(this->neuronCount * this->inputBufferCount); return ErrorCode::ERROR_CODE_NONE; } /** 演算前処理を実行する.(演算用) @param batchSize 同時に演算を行うバッチのサイズ. NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない. 失敗した場合はCalculate以降の処理は実行不可. */ ErrorCode FullyConnect_GPU::PreProcessCalculate() { // 入力バッファ数を確認 this->inputBufferCount = this->GetInputBufferCount(); if(this->inputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT; // ニューロン数を確認 this->neuronCount = this->GetNeuronCount(); if(this->neuronCount == 0) return ErrorCode::ERROR_CODE_FRAUD_NEURON_COUNT; // 出力バッファ数を確認 this->outputBufferCount = this->GetOutputBufferCount(); if(this->outputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT; return ErrorCode::ERROR_CODE_NONE; } /** ループの初期化処理.データセットの実行開始前に実行する 失敗した場合はCalculate以降の処理は実行不可. */ ErrorCode FullyConnect_GPU::PreProcessLoop() { return ErrorCode::ERROR_CODE_NONE; } /** 演算処理を実行する. @param lpInputBuffer 入力データバッファ. GetInputBufferCountで取得した値の要素数が必要 @return 成功した場合0が返る */ ErrorCode FullyConnect_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer) { if(this->GetProcessType() == ProcessType::PROCESSTYPE_LEARN && this->GetRuntimeParameterByStructure().UpdateWeigthWithOutputVariance) { // ※とりあえずCPU側で処理. // 基本的に1回しか通らないから処理負荷に影響は与えない・・・はず // 超手抜き U32 PROCTIME_MAX = 5; // 実行最大値 F32 VARIANCE_TOLERANCE = 0.1f; // 分散交差(許容範囲) std::vector<F32> lpTmpOutputBuffer(this->GetBatchSize() * this->outputBufferCount); // バッファを確保 thrust::device_vector<F32> lpTmpWeight_d(this->layerData.pWeightData->GetWeigthSize()); thrust::device_vector<F32> lpTmpBias_d(this->layerData.pWeightData->GetBiasSize()); // バッファをコピー cudaMemcpy(thrust::raw_pointer_cast(&lpTmpWeight_d[0]), this->layerData.pWeightData->GetWeight(), sizeof(F32)*lpTmpWeight_d.size(), cudaMemcpyDeviceToDevice); cudaMemcpy(thrust::raw_pointer_cast(&lpTmpBias_d[0]), this->layerData.pWeightData->GetBias(), sizeof(F32)*lpTmpBias_d.size(), cudaMemcpyDeviceToDevice); U32 procTime = 0; do { // 演算を実行 ErrorCode err = this->CalculateBase(i_lppInputBuffer, o_lppOutputBuffer, thrust::raw_pointer_cast(&lpTmpWeight_d[0]), thrust::raw_pointer_cast(&lpTmpBias_d[0])); if(err != ErrorCode::ERROR_CODE_NONE) return err; // バッファをコピー cudaMemcpy(&lpTmpOutputBuffer[0], &o_lppOutputBuffer[0], sizeof(F32)*lpTmpOutputBuffer.size(), cudaMemcpyDeviceToHost); // 出力の分散を求める F32 variance = 0.0f; F32 average = 0.0f; { // 平均を求める for(U32 outputNum=0; outputNum<lpTmpOutputBuffer.size(); outputNum++) { average += lpTmpOutputBuffer[outputNum]; } average /= lpTmpOutputBuffer.size(); // 分散を求める for(U32 outputNum=0; outputNum<lpTmpOutputBuffer.size(); outputNum++) { variance += (lpTmpOutputBuffer[outputNum] - average) * (lpTmpOutputBuffer[outputNum] - average); } variance /= lpTmpOutputBuffer.size(); } if( abs(variance - 1.0f) < VARIANCE_TOLERANCE) break; // 標準偏差で重みを割って更新する F32 deviation = sqrtf(variance); { thrust::host_vector<F32> lpTmpNeuron = lpTmpWeight_d; thrust::host_vector<F32> lpTmpBias = lpTmpBias_d; for(U32 neuronNum=0; neuronNum<lpTmpNeuron.size(); neuronNum++) { lpTmpNeuron[neuronNum] /= deviation; } for(U32 neuronNum=0; neuronNum<lpTmpBias.size(); neuronNum++) { lpTmpBias[neuronNum] /= deviation; } lpTmpWeight_d = lpTmpNeuron; lpTmpBias_d = lpTmpBias; } procTime++; }while(procTime < 5); // 重みを更新 this->layerData.pWeightData->SetData(thrust::raw_pointer_cast(&lpTmpWeight_d[0]), thrust::raw_pointer_cast(&lpTmpBias_d[0])); } else { ErrorCode err = this->CalculateBase(i_lppInputBuffer, o_lppOutputBuffer, this->layerData.pWeightData->GetWeight(), this->layerData.pWeightData->GetBias()); if(err != ErrorCode::ERROR_CODE_NONE) return err; } return ErrorCode::ERROR_CODE_NONE; } ErrorCode FullyConnect_GPU::CalculateBase(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer, const F32* lpWeight, const F32* lpBias) { // バイアスを出力信号にコピーする { for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++) { cudaError_t err = cudaMemcpy( &o_lppOutputBuffer[batchNum * this->outputBufferCount], lpBias, sizeof(F32) * this->neuronCount, cudaMemcpyDeviceToDevice); if(err != 0) return ERROR_CODE_CUDA_COPY_MEMORY; } } // ニューロンT×入力信号 { // C = aAB + bC; // CUBLASは // 0, 4, 8 // 1, 5, 9 // 2, 6, 10 // 3, 7, 11 // のように縦方向にインデックスが進む行列で構成されている F32 alpha = 1.0f; F32 beta = 1.0f; // バイアスがCにコピー済みなのでそのまま利用するために1.0を指定 cublasSgemm( this->cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, this->neuronCount, // 行列Aの行数 this->GetBatchSize(), // 行列Bの列数 this->inputBufferCount, // 行列Aの列数,行列Bの行数 &alpha, lpWeight, // 行列A this->inputBufferCount, // 行列Aの転置前の行数 i_lppInputBuffer, // 行列B this->inputBufferCount, // 行列Bの転置前の行数 &beta, &o_lppOutputBuffer[0], this->outputBufferCount); } return ErrorCode::ERROR_CODE_NONE; } //================================ // 学習処理 //================================ /** 入力誤差計算をを実行する.学習せずに入力誤差を取得したい場合に使用する. 入力信号、出力信号は直前のCalculateの値を参照する. @param o_lppDInputBuffer 入力誤差差分格納先レイヤー. [GetBatchSize()の戻り値][GetInputBufferCount()の戻り値]の要素数が必要. @param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要. 直前の計算結果を使用する */ ErrorCode FullyConnect_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { // 入力誤差差分を計算 if(o_lppDInputBuffer) { F32 alpha = 1.0f; F32 beta = 0.0f; cublasSgemm( this->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, this->inputBufferCount, // 行列Aの行数 this->GetBatchSize(), // 行列Bの列数 this->neuronCount, // 行列Aの列数,行列Bの行数 &alpha, this->layerData.pWeightData->GetWeight(), // 行列A this->inputBufferCount, // 行列Aの転置前の行数 i_lppDOutputBuffer, // 行列B this->neuronCount, // 行列Bの転置前の行数 &beta, o_lppDInputBuffer, this->inputBufferCount); } return ErrorCode::ERROR_CODE_NONE; } /** 学習処理を実行する. 入力信号、出力信号は直前のCalculateの値を参照する. @param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要. 直前の計算結果を使用する */ ErrorCode FullyConnect_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { std::vector<F32> lpDOutputBuffer_h(this->outputBufferCount * this->GetBatchSize()); cudaMemcpy(&lpDOutputBuffer_h[0], i_lppDOutputBuffer, sizeof(F32)*lpDOutputBuffer_h.size(), cudaMemcpyDeviceToHost); std::vector<F32> lpInputBuffer_h(this->inputBufferCount * this->GetBatchSize()); cudaMemcpy(&lpInputBuffer_h[0], i_lppInputBuffer, sizeof(F32)*lpInputBuffer_h.size(), cudaMemcpyDeviceToHost); // 入力誤差計算 Gravisbell::ErrorCode errCode = this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer); if(errCode != ErrorCode::ERROR_CODE_NONE) return errCode; // バイアス変化量計算 { F32 alpha = 1.0f; F32 beta = 0; cublasSgemm( this->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, this->neuronCount, // 行列Aの行数 1, // 行列Bの列数 this->GetBatchSize(), // 行列Aの列数,行列Bの行数 &alpha, i_lppDOutputBuffer, // 行列A this->neuronCount, // 行列Aの転置前の行数 thrust::raw_pointer_cast(&this->lpBiasUpdateVector_d[0]), // 行列B this->GetBatchSize(), // 行列Bの転置前の行数 &beta, thrust::raw_pointer_cast(&this->lpDBias[0]), this->neuronCount); } // ニューロン変化量計算 { // ニューロンの誤差を計算して加算する { F32 alpha = 1.0f; F32 beta = 0; cublasSgemm( this->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, this->inputBufferCount, // 行列Aの行数 this->neuronCount, // 行列Bの列数 this->GetBatchSize(), // 行列Aの列数,行列Bの行数 &alpha, i_lppInputBuffer, // 行列A this->inputBufferCount, // 行列Aの転置前の行数 i_lppDOutputBuffer, // 行列B this->neuronCount, // 行列Bの転置前の行数 &beta, thrust::raw_pointer_cast(&this->lpDNeuron[0]), this->inputBufferCount); } } // 誤差を反映 this->layerData.pWeightData->UpdateData(thrust::raw_pointer_cast(&this->lpDNeuron[0]), thrust::raw_pointer_cast(&this->lpDBias[0])); return ErrorCode::ERROR_CODE_NONE; } } // Gravisbell; } // Layer; } // NeuralNetwork;
91c2aaa3ed6fe5b034446fffc4bec1b2e28b611b.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "definitions.cuh" #include <time.h> //Number of elements on which to perform CFD unsigned int Ni = 512; // Y elements unsigned int Nj = 512; // X elements unsigned int nIterations = 10000; // No Of Iterations unsigned int kernelVersion =1; // Decides which GPU kernel version to call (Set it to 1 or 2) int main(int argc, char** argv) { //Variables for Timing float cpuTime, gpuTime; // CPU and GPU Pointers ( d_XX : refers to pointer pointing to GPU memory. This is just a convention) float *t = NULL, *t_prev = NULL; float *d_t = NULL,*d_t_prev= NULL; parseCommandLineArguments(argc, (char **)argv); printf("\n Ni= %d, Nj=%d nIteration=%d",Ni,Nj,nIterations); unsigned int size = Ni * Nj * sizeof(float); if(!initializeCPU(&t, &t_prev) ) { printf("\n Error in allocating memory on CPU!!!"); unInitializeCPU(&t, &t_prev); getchar(); return 0; } if (!initializeGPU(&d_t, &d_t_prev)) { printf("\n Error in allocating memory on GPU!!!"); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); return 0; } //Perform CFD on CPU performCPUCFD(t,t_prev, &cpuTime); // To temporarily store CPU data. This is just for comparing with GPU output float *tempBuffer = (float*) calloc(Ni*Nj, sizeof(float)); memcpy(tempBuffer, t_prev, size); //Perform CFD on GPU if(!performGPUCFD(d_t,d_t_prev, t, t_prev, &gpuTime)) { printf("\n GPU Kernel failed !!!"); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); if(tempBuffer !=NULL) free(tempBuffer); return 0; } printf("\n Is host equal to device = %d", checkHostEqualsDevice(tempBuffer,t)); printf("\n Speedup = %fx", (float)(cpuTime/gpuTime)); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); if(tempBuffer !=NULL) free(tempBuffer); printf("\n Finished Processing!!!"); getchar(); } int initializeCPU(float **t, float **t_prev) { *t = (float*) calloc(Ni*Nj, sizeof(float)); *t_prev = (float*) calloc(Ni*Nj, sizeof(float)); if((*t)==NULL || (*t_prev) == NULL) return 0; else return 1; } void unInitializeCPU(float **t, float **t_prev) { if((*t) !=NULL) free(*t); if((*t_prev) != NULL) free(*t_prev); } int initializeGPU(float **d_t, float **d_t_prev) { unsigned int size = Ni * Nj * sizeof(float); // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); getchar(); return 0; } // Allocate GPU buffers. cudaStatus = hipMalloc((void**)&(*d_t), size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); getchar(); return 0; } // Allocate GPU buffers . cudaStatus = hipMalloc((void**)&(*d_t_prev), size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); getchar(); return 0; } // Memset GPU buffers cudaStatus = hipMemset((*d_t),0, size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset failed!"); getchar(); return 0; } // Memset GPU buffers cudaStatus = hipMemset((*d_t_prev),0, size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset failed!"); getchar(); return 0; } return 1; } void unInitializeGPU(float **d_t, float **d_t_prev) { hipError_t cudaStatus; if((*d_t)!=NULL) cudaStatus = hipFree((*d_t)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipFree failed!"); return; } if((*d_t_prev)!=NULL) cudaStatus = hipFree((*d_t_prev)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipFree failed!"); return; } #if 0 cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); getchar(); return; } #endif } void performCPUCFD(float *t, float *t_prev, float *cpuTime) { float h,x,y; h = 1.0f/(Ni-1); for(unsigned int i=0;i<Ni;i++) { x = i*h; t_prev[i*Nj+0] = x*x; t_prev[i*Nj+(Nj-1)] = x*x + 1.0f; } for(unsigned int j=0;j < Nj; j++) { y = j*h; t_prev[0*Nj+j] = y*y; t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y; } float elapsedTimeInMs = 0.0f; clock_t start = clock(); for(unsigned int k=0;k<nIterations;k++) { for(unsigned int j=1;j<(Nj-1);j++) { for(unsigned int i=1;i<(Ni-1);i++) { t[i*Nj+j] = 0.25f * (t_prev[(i-1)*Nj+j] + t_prev[(i+1)*Nj+j] + t_prev[i*Nj+(j-1)] + t_prev[i*Nj+(j+1)] - 4*h*h); } } float* pingPong = t_prev; t_prev = t; t = pingPong; } clock_t end = clock(); elapsedTimeInMs = (float)((end - start) * 1000 / CLOCKS_PER_SEC); printf("\n CPU Time:: %f ms", elapsedTimeInMs); *cpuTime = elapsedTimeInMs; } int performGPUCFD(float *d_t, float *d_t_prev, float *t, float *t_prev, float*gpuTime) { float h,x,y; const char *str = (char*) malloc(1024); // To store error string //Decide how many blocks per thread and how many blocks per grid dim3 dimBlock(THREADS_PER_BLOCK_X,THREADS_PER_BLOCK_Y); dim3 dimGrid(Nj/dimBlock.x,Ni/dimBlock.y); h = 1.0f/(Ni-1); memset(t_prev, 0, sizeof(float) * Ni * Nj); for(unsigned int i=0;i<Ni;i++) { x = i*h; t_prev[i*Nj+0] = x*x; t_prev[i*Nj+(Nj-1)] = x*x + 1.0f; } for(unsigned int j=0;j < Nj; j++) { y = j*h; t_prev[0*Nj+j] = y*y; t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y; } //Copy data to device hipMemcpy(d_t_prev, t_prev, sizeof(float) * Ni * Nj , hipMemcpyHostToDevice); //Insert event to calculate time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //This calls Version 1 of kernel which uses Global memory if(kernelVersion ==1) { printf("\n Inside Kernel ::1\n"); hipEventRecord(start, 0); for(unsigned int k=0;k<nIterations;k++) { // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( calculateCFD_V1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_t_prev,d_t, Ni, Nj, h); float* pingPong = d_t_prev; d_t_prev = d_t; d_t = pingPong; } hipEventRecord(stop, 0); hipEventSynchronize(stop); } //This calls Version 2 of kernel which uses optimization by copying data to shared memory else if(kernelVersion ==2) { printf("\n Inside Kernel ::2\n"); hipEventRecord(start, 0); for(unsigned int k=0;k<nIterations;k++) { // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( calculateCFD_V2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_t_prev,d_t, Ni, Nj, h); float* pingPong = d_t_prev; d_t_prev = d_t; d_t = pingPong; } hipEventRecord(stop, 0); hipEventSynchronize(stop); } hipError_t cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { str = hipGetErrorString(cudaStatus); fprintf(stderr, "CUDA Error!:: %s\n", str); getchar(); return 0; } float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("\n GPU Time:: %f ms", elapsedTime); *gpuTime = elapsedTime; cudaStatus = hipMemcpy(t, d_t_prev, sizeof(float) * Ni * Nj , hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); str = hipGetErrorString(cudaStatus); fprintf(stderr, "CUDA Error!:: %s\n", str); getchar(); return 0; } return 1; } int checkHostEqualsDevice(float* o_host, float* o_device) { int flag =1; float tolerance = 0.0001f; //Compare the results for(unsigned int j=0;j<Nj;j++) { for(unsigned int i=0;i<Ni;i++) { if( (o_host[i*Nj+j] - o_device[i*Nj+j]) >= tolerance || (o_host[i*Nj+j] - o_device[i*Nj+j]) <= -tolerance) { printf("\n D=[%f]!=H=[%f] since Diff > tol %f for [%d][%d]",o_device[i*Nj+j], o_host[i*Nj+j],tolerance, i, j); flag =0; getchar(); return flag; } } } return flag; } void parseCommandLineArguments(int argc, char**argv) { if (argc >= 1) { for (int i=1; i < argc; i++) { int bFirstArgIsParam = false; int string_start = 0; while (argv[i][string_start] == '-') string_start++; char *string_argv = &argv[i][string_start]; if (!STRNCASECMP(string_argv, "Ni=", 3)) { bFirstArgIsParam = true; Ni = atoi(&string_argv[3]); continue; } if (!STRNCASECMP(string_argv, "Nj=", 3)) { bFirstArgIsParam = true; Nj = atoi(&string_argv[3]); continue; } if (!STRNCASECMP(string_argv, "iterations=", 11)) { bFirstArgIsParam = true; nIterations = atoi(&string_argv[11]); continue; } if (!STRNCASECMP(string_argv, "kernel=", 7)) { bFirstArgIsParam = true; kernelVersion = atoi(&string_argv[7]); continue; } if (!bFirstArgIsParam) { printf("Invalid arguments\n"); for (int n=0; n < argc; n++) { printf("argv[%d] = %s\n", n, argv[n]); } printf("\n"); exit(0); } } } if(( Ni % THREADS_PER_BLOCK_Y != 0) || (Nj % THREADS_PER_BLOCK_X != 0)) { fprintf(stderr, "Please specify Ni & Nj as multiple of 16 !!!!"); getchar(); exit(0); } }
91c2aaa3ed6fe5b034446fffc4bec1b2e28b611b.cu
#include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "definitions.cuh" #include <time.h> //Number of elements on which to perform CFD unsigned int Ni = 512; // Y elements unsigned int Nj = 512; // X elements unsigned int nIterations = 10000; // No Of Iterations unsigned int kernelVersion =1; // Decides which GPU kernel version to call (Set it to 1 or 2) int main(int argc, char** argv) { //Variables for Timing float cpuTime, gpuTime; // CPU and GPU Pointers ( d_XX : refers to pointer pointing to GPU memory. This is just a convention) float *t = NULL, *t_prev = NULL; float *d_t = NULL,*d_t_prev= NULL; parseCommandLineArguments(argc, (char **)argv); printf("\n Ni= %d, Nj=%d nIteration=%d",Ni,Nj,nIterations); unsigned int size = Ni * Nj * sizeof(float); if(!initializeCPU(&t, &t_prev) ) { printf("\n Error in allocating memory on CPU!!!"); unInitializeCPU(&t, &t_prev); getchar(); return 0; } if (!initializeGPU(&d_t, &d_t_prev)) { printf("\n Error in allocating memory on GPU!!!"); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); return 0; } //Perform CFD on CPU performCPUCFD(t,t_prev, &cpuTime); // To temporarily store CPU data. This is just for comparing with GPU output float *tempBuffer = (float*) calloc(Ni*Nj, sizeof(float)); memcpy(tempBuffer, t_prev, size); //Perform CFD on GPU if(!performGPUCFD(d_t,d_t_prev, t, t_prev, &gpuTime)) { printf("\n GPU Kernel failed !!!"); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); if(tempBuffer !=NULL) free(tempBuffer); return 0; } printf("\n Is host equal to device = %d", checkHostEqualsDevice(tempBuffer,t)); printf("\n Speedup = %fx", (float)(cpuTime/gpuTime)); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); if(tempBuffer !=NULL) free(tempBuffer); printf("\n Finished Processing!!!"); getchar(); } int initializeCPU(float **t, float **t_prev) { *t = (float*) calloc(Ni*Nj, sizeof(float)); *t_prev = (float*) calloc(Ni*Nj, sizeof(float)); if((*t)==NULL || (*t_prev) == NULL) return 0; else return 1; } void unInitializeCPU(float **t, float **t_prev) { if((*t) !=NULL) free(*t); if((*t_prev) != NULL) free(*t_prev); } int initializeGPU(float **d_t, float **d_t_prev) { unsigned int size = Ni * Nj * sizeof(float); // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); getchar(); return 0; } // Allocate GPU buffers. cudaStatus = cudaMalloc((void**)&(*d_t), size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); getchar(); return 0; } // Allocate GPU buffers . cudaStatus = cudaMalloc((void**)&(*d_t_prev), size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); getchar(); return 0; } // Memset GPU buffers cudaStatus = cudaMemset((*d_t),0, size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); getchar(); return 0; } // Memset GPU buffers cudaStatus = cudaMemset((*d_t_prev),0, size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); getchar(); return 0; } return 1; } void unInitializeGPU(float **d_t, float **d_t_prev) { cudaError_t cudaStatus; if((*d_t)!=NULL) cudaStatus = cudaFree((*d_t)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaFree failed!"); return; } if((*d_t_prev)!=NULL) cudaStatus = cudaFree((*d_t_prev)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaFree failed!"); return; } #if 0 cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); getchar(); return; } #endif } void performCPUCFD(float *t, float *t_prev, float *cpuTime) { float h,x,y; h = 1.0f/(Ni-1); for(unsigned int i=0;i<Ni;i++) { x = i*h; t_prev[i*Nj+0] = x*x; t_prev[i*Nj+(Nj-1)] = x*x + 1.0f; } for(unsigned int j=0;j < Nj; j++) { y = j*h; t_prev[0*Nj+j] = y*y; t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y; } float elapsedTimeInMs = 0.0f; clock_t start = clock(); for(unsigned int k=0;k<nIterations;k++) { for(unsigned int j=1;j<(Nj-1);j++) { for(unsigned int i=1;i<(Ni-1);i++) { t[i*Nj+j] = 0.25f * (t_prev[(i-1)*Nj+j] + t_prev[(i+1)*Nj+j] + t_prev[i*Nj+(j-1)] + t_prev[i*Nj+(j+1)] - 4*h*h); } } float* pingPong = t_prev; t_prev = t; t = pingPong; } clock_t end = clock(); elapsedTimeInMs = (float)((end - start) * 1000 / CLOCKS_PER_SEC); printf("\n CPU Time:: %f ms", elapsedTimeInMs); *cpuTime = elapsedTimeInMs; } int performGPUCFD(float *d_t, float *d_t_prev, float *t, float *t_prev, float*gpuTime) { float h,x,y; const char *str = (char*) malloc(1024); // To store error string //Decide how many blocks per thread and how many blocks per grid dim3 dimBlock(THREADS_PER_BLOCK_X,THREADS_PER_BLOCK_Y); dim3 dimGrid(Nj/dimBlock.x,Ni/dimBlock.y); h = 1.0f/(Ni-1); memset(t_prev, 0, sizeof(float) * Ni * Nj); for(unsigned int i=0;i<Ni;i++) { x = i*h; t_prev[i*Nj+0] = x*x; t_prev[i*Nj+(Nj-1)] = x*x + 1.0f; } for(unsigned int j=0;j < Nj; j++) { y = j*h; t_prev[0*Nj+j] = y*y; t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y; } //Copy data to device cudaMemcpy(d_t_prev, t_prev, sizeof(float) * Ni * Nj , cudaMemcpyHostToDevice); //Insert event to calculate time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //This calls Version 1 of kernel which uses Global memory if(kernelVersion ==1) { printf("\n Inside Kernel ::1\n"); cudaEventRecord(start, 0); for(unsigned int k=0;k<nIterations;k++) { // Launch a kernel on the GPU with one thread for each element. calculateCFD_V1<<<dimGrid,dimBlock>>>(d_t_prev,d_t, Ni, Nj, h); float* pingPong = d_t_prev; d_t_prev = d_t; d_t = pingPong; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } //This calls Version 2 of kernel which uses optimization by copying data to shared memory else if(kernelVersion ==2) { printf("\n Inside Kernel ::2\n"); cudaEventRecord(start, 0); for(unsigned int k=0;k<nIterations;k++) { // Launch a kernel on the GPU with one thread for each element. calculateCFD_V2<<<dimGrid,dimBlock>>>(d_t_prev,d_t, Ni, Nj, h); float* pingPong = d_t_prev; d_t_prev = d_t; d_t = pingPong; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } cudaError_t cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { str = cudaGetErrorString(cudaStatus); fprintf(stderr, "CUDA Error!:: %s\n", str); getchar(); return 0; } float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("\n GPU Time:: %f ms", elapsedTime); *gpuTime = elapsedTime; cudaStatus = cudaMemcpy(t, d_t_prev, sizeof(float) * Ni * Nj , cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); str = cudaGetErrorString(cudaStatus); fprintf(stderr, "CUDA Error!:: %s\n", str); getchar(); return 0; } return 1; } int checkHostEqualsDevice(float* o_host, float* o_device) { int flag =1; float tolerance = 0.0001f; //Compare the results for(unsigned int j=0;j<Nj;j++) { for(unsigned int i=0;i<Ni;i++) { if( (o_host[i*Nj+j] - o_device[i*Nj+j]) >= tolerance || (o_host[i*Nj+j] - o_device[i*Nj+j]) <= -tolerance) { printf("\n D=[%f]!=H=[%f] since Diff > tol %f for [%d][%d]",o_device[i*Nj+j], o_host[i*Nj+j],tolerance, i, j); flag =0; getchar(); return flag; } } } return flag; } void parseCommandLineArguments(int argc, char**argv) { if (argc >= 1) { for (int i=1; i < argc; i++) { int bFirstArgIsParam = false; int string_start = 0; while (argv[i][string_start] == '-') string_start++; char *string_argv = &argv[i][string_start]; if (!STRNCASECMP(string_argv, "Ni=", 3)) { bFirstArgIsParam = true; Ni = atoi(&string_argv[3]); continue; } if (!STRNCASECMP(string_argv, "Nj=", 3)) { bFirstArgIsParam = true; Nj = atoi(&string_argv[3]); continue; } if (!STRNCASECMP(string_argv, "iterations=", 11)) { bFirstArgIsParam = true; nIterations = atoi(&string_argv[11]); continue; } if (!STRNCASECMP(string_argv, "kernel=", 7)) { bFirstArgIsParam = true; kernelVersion = atoi(&string_argv[7]); continue; } if (!bFirstArgIsParam) { printf("Invalid arguments\n"); for (int n=0; n < argc; n++) { printf("argv[%d] = %s\n", n, argv[n]); } printf("\n"); exit(0); } } } if(( Ni % THREADS_PER_BLOCK_Y != 0) || (Nj % THREADS_PER_BLOCK_X != 0)) { fprintf(stderr, "Please specify Ni & Nj as multiple of 16 !!!!"); getchar(); exit(0); } }
aee44ee10a54d4c0a8aa4daa65ceb52404fa48ef.hip
// !!! This is a file automatically generated by hipify!!! // http://www.caam.rice.edu/~timwar/CAAM210/Flows.html #include <math.h> #include <stdlib.h> #include <stdio.h> extern "C" { #include "png_util.h" } #define dfloat float #define FLUID 0 #define WALL 1 #define NSPECIES 9 #include "hip/hip_runtime.h" // loop up 1D array index from 2D node coordinates __host__ __device__ int idx(int N, int n, int m){ return n + m*(N+2); } void lbmInput(const char *imageFileName, dfloat threshold, int *outN, int *outM, unsigned char **rgb, unsigned char **alpha, int **nodeType){ int n,m, N,M; // read png file read_png(imageFileName, &N, &M, rgb, alpha); // pad to guarantee space around obstacle and extend the wake int Npad = 3*N; int Mpad = 2*M; if(Npad>8192) Npad = 8192; if(Mpad>8192) Mpad = 8192; // threshold walls based on gray scale *nodeType = (int*) calloc((Npad+2)*(Mpad+2), sizeof(int)); // mark pixels by gray scale intensity unsigned char *rgbPad = (unsigned char*) calloc(3*(Npad+2)*(Mpad+2), sizeof(unsigned char)); unsigned char *alphaPad = (unsigned char*) calloc((Npad+2)*(Mpad+2), sizeof(unsigned char)); int wallCount = 0; for(m=1;m<=M;++m){ for(n=1;n<=N;++n){ int offset = ((n-1)+(m-1)*N); dfloat r = (*rgb)[3*offset+0]; dfloat g = (*rgb)[3*offset+1]; dfloat b = (*rgb)[3*offset+2]; dfloat a = (*alpha) ? (*alpha)[offset]:255; // center image in padded region (including halo zone) int hoffset = N/4, yoffset = M/2; int id = idx(Npad,n+hoffset,m+yoffset); if(a==0) (*nodeType)[id] = FLUID; else (*nodeType)[id] = WALL*(sqrt(r*r+g*g+b*b)<threshold); wallCount += (*nodeType)[id]; rgbPad[3*id+0] = r; rgbPad[3*id+1] = g; rgbPad[3*id+2] = b; alphaPad[id] = 255; } } for(n=1;n<=Npad;++n){ (*nodeType)[idx(Npad,n,1)] = WALL; (*nodeType)[idx(Npad,n,Mpad)] = WALL; } free(*rgb); free(*alpha); *rgb = rgbPad; *alpha = alphaPad; printf("wallCount = %d (%g percent of %d x %d nodes)\n", wallCount, 100.*((dfloat)wallCount/((Npad+2)*(Mpad+2))), Npad, Mpad); *outN = Npad; *outM = Mpad; } void lbmOutput(const char *fname, const int *nodeType, unsigned char *rgb, unsigned char *alpha, const dfloat c, const dfloat dx, int N, int M, const dfloat *f){ int n,m,s; FILE *bah = fopen(fname, "w"); // compute vorticity dfloat *Ux = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat)); dfloat *Uy = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat)); dfloat fnm[NSPECIES]; for(m=1;m<=M;++m){ for(n=1;n<=N;++n){ int base = idx(N, n, m); for(s=0;s<NSPECIES;++s) fnm[s] = f[base+s*(N+2)*(M+2)]; const dfloat rho = fnm[0]+fnm[1]+fnm[2]+fnm[3]+fnm[4]+fnm[5]+fnm[6]+fnm[7]+fnm[8]; // macroscopic momentum Ux[base] = (fnm[1] - fnm[3] + fnm[5] - fnm[6] - fnm[7] + fnm[8])*c/rho; Uy[base] = (fnm[2] - fnm[4] + fnm[5] + fnm[6] - fnm[7] - fnm[8])*c/rho; } } dfloat plotMin = -4, plotMax = 4; for(m=1;m<=M;++m){ for(n=1;n<=N;++n){ int id = idx(N,n,m); // over write pixels in fluid region if(nodeType[id]==FLUID){ unsigned char r,g,b,a; // reconstruct macroscopic density dfloat rho = 0; for(s=0;s<NSPECIES;++s) rho += f[id+s*(N+2)*(M+2)]; rho = ((rho-plotMin)/(plotMax-plotMin)); // rescale dfloat dUxdy = (Ux[idx(N,n,m+1)]-Ux[idx(N,n,m-1)])/(2.*dx); dfloat dUydx = (Uy[idx(N,n+1,m)]-Uy[idx(N,n-1,m)])/(2.*dx); dfloat curlU = dUydx-dUxdy; curlU = ((curlU-plotMin)/(plotMax-plotMin)); #if 0 r = 255*curlU; g = 255*curlU; b = 255*curlU; a = 255; #else a = 255; if(curlU>.55){ r = 255*(curlU-.55)/.45; g = 0; b = 0; } else if(curlU<.45){ r = 0; g = 0; b = 255*(.45-curlU)/.45; } else{ r = 255; g = 255; b = 255; } #endif rgb[idx(N,n,m)*3+0] = r; rgb[idx(N,n,m)*3+1] = g; rgb[idx(N,n,m)*3+2] = b; alpha[idx(N,n,m)] = a; } } } write_png(bah, N+2, M+2, rgb, alpha); fclose(bah); free(Ux); free(Uy); } // weights used to compute equilibrium distribution (post collision) const dfloat w0 = 4.f/9.f, w1 = 1.f/9.f, w2 = 1.f/9.f, w3 = 1.f/9.f; const dfloat w4 = 1.f/9.f, w5 = 1.f/36.f, w6 = 1.f/36.f, w7 = 1.f/36.f, w8 = 1.f/36.f; __host__ __device__ void lbmEquilibrium(const dfloat c, const dfloat rho, const dfloat Ux, const dfloat Uy, dfloat * feq){ // resolve macroscopic velocity into lattice particle velocity directions const dfloat U2 = Ux*Ux+Uy*Uy; const dfloat v0 = 0; const dfloat v1 = +Ux/c; const dfloat v2 = +Uy/c; const dfloat v3 = -Ux/c; const dfloat v4 = -Uy/c; const dfloat v5 = (+Ux+Uy)/c; const dfloat v6 = (-Ux+Uy)/c; const dfloat v7 = (-Ux-Uy)/c; const dfloat v8 = (+Ux-Uy)/c; // compute LBM post-collisional feq[0] = rho*w0*(1.f + 3.f*v0 + 4.5f*v0*v0 - 1.5f*U2/(c*c)); feq[1] = rho*w1*(1.f + 3.f*v1 + 4.5f*v1*v1 - 1.5f*U2/(c*c)); feq[2] = rho*w2*(1.f + 3.f*v2 + 4.5f*v2*v2 - 1.5f*U2/(c*c)); feq[3] = rho*w3*(1.f + 3.f*v3 + 4.5f*v3*v3 - 1.5f*U2/(c*c)); feq[4] = rho*w4*(1.f + 3.f*v4 + 4.5f*v4*v4 - 1.5f*U2/(c*c)); feq[5] = rho*w5*(1.f + 3.f*v5 + 4.5f*v5*v5 - 1.5f*U2/(c*c)); feq[6] = rho*w6*(1.f + 3.f*v6 + 4.5f*v6*v6 - 1.5f*U2/(c*c)); feq[7] = rho*w7*(1.f + 3.f*v7 + 4.5f*v7*v7 - 1.5f*U2/(c*c)); feq[8] = rho*w8*(1.f + 3.f*v8 + 4.5f*v8*v8 - 1.5f*U2/(c*c)); } #define TX 32 #define TY 8 // perform lattice streaming and collision steps __global__ void lbmUpdate(const int N, // number of nodes in x const int M, // number of nodes in y const dfloat c, // speed of sound const dfloat * __restrict__ tau, // relaxation rate const int * __restrict__ nodeType, // (N+2) x (M+2) node types const dfloat * __restrict__ f, // (N+2) x (M+2) x 9 fields before streaming and collisions dfloat * __restrict__ fnew){ // (N+2) x (M+2) x 9 fields after streaming and collisions // number of nodes in whole array including halo int Nall = (N+2)*(M+2); // loop over all non-halo nodes in lattice int n = 1 + threadIdx.x + blockIdx.x*TX; int m = 1 + threadIdx.y + blockIdx.y*TY; if(m<M+1 && n<=N+1){ // physics paramaters dfloat tauinv = 1.f/tau[idx(N,n,m)]; // discover type of node (WALL or FLUID) const int nt = nodeType[idx(N,n,m)]; dfloat fnm[NSPECIES]; // OUTFLOW if(n==N+1){ fnm[0] = f[idx(N,n, m) + 0*Nall]; // stationary fnm[1] = f[idx(N,n-1,m) + 1*Nall]; // E bound from W fnm[2] = f[idx(N,n,m-1) + 2*Nall]; // N bound from S fnm[3] = f[idx(N,n,m) + 3*Nall]; // W bound from E fnm[4] = f[idx(N,n,m+1) + 4*Nall]; // S bound from N fnm[5] = f[idx(N,n-1,m-1) + 5*Nall]; // NE bound from SW fnm[6] = f[idx(N,n,m-1) + 6*Nall]; // NW bound from SE fnm[7] = f[idx(N,n,m+1) + 7*Nall]; // SW bound from NE fnm[8] = f[idx(N,n-1,m+1) + 8*Nall]; // SE bound from NW } else if(nt == FLUID){ fnm[0] = f[idx(N,n, m) + 0*Nall]; // stationary fnm[1] = f[idx(N,n-1,m) + 1*Nall]; // E bound from W fnm[2] = f[idx(N,n,m-1) + 2*Nall]; // N bound from S fnm[3] = f[idx(N,n+1,m) + 3*Nall]; // W bound from E fnm[4] = f[idx(N,n,m+1) + 4*Nall]; // S bound from N fnm[5] = f[idx(N,n-1,m-1) + 5*Nall]; // NE bound from SW fnm[6] = f[idx(N,n+1,m-1) + 6*Nall]; // NW bound from SE fnm[7] = f[idx(N,n+1,m+1) + 7*Nall]; // SW bound from NE fnm[8] = f[idx(N,n-1,m+1) + 8*Nall]; // SE bound from NW } else{ // WALL reflects particles fnm[0] = f[idx(N,n,m) + 0*Nall]; // stationary fnm[1] = f[idx(N,n,m) + 3*Nall]; // E bound from W fnm[2] = f[idx(N,n,m) + 4*Nall]; // N bound from S fnm[3] = f[idx(N,n,m) + 1*Nall]; // W bound from E fnm[4] = f[idx(N,n,m) + 2*Nall]; // S bound from N fnm[5] = f[idx(N,n,m) + 7*Nall]; // NE bound from SW fnm[6] = f[idx(N,n,m) + 8*Nall]; // NW bound from SE fnm[7] = f[idx(N,n,m) + 5*Nall]; // SW bound from NE fnm[8] = f[idx(N,n,m) + 6*Nall]; // SE bound from NW } // macroscopic density const dfloat rho = fnm[0]+fnm[1]+fnm[2]+fnm[3]+fnm[4]+fnm[5]+fnm[6]+fnm[7]+fnm[8]; // if(rho<1e-4){ printf("rho(%d,%d)=%g\n", n,m,rho); exit(-1); } // macroscopic momentum const dfloat delta2 = 1e-8; const dfloat Ux = (fnm[1] - fnm[3] + fnm[5] - fnm[6] - fnm[7] + fnm[8])*c/sqrt(rho*rho+delta2); const dfloat Uy = (fnm[2] - fnm[4] + fnm[5] + fnm[6] - fnm[7] - fnm[8])*c/sqrt(rho*rho+delta2); // compute equilibrium distribution dfloat feq[NSPECIES]; lbmEquilibrium(c, rho, Ux, Uy, feq); // MRT stabilization const dfloat g0 = 1.f, g1 = -2.f, g2 = -2.f, g3 = -2.f, g4 = -2.f; const dfloat g5 = 4.f, g6 = 4.f, g7 = 4.f, g8 = 4.f; const dfloat R = g0*fnm[0] + g1*fnm[1] + g2*fnm[2]+ g3*fnm[3] + g4*fnm[4] + g5*fnm[5] + g6*fnm[6] + g7*fnm[7] + g8*fnm[8]; // post collision densities fnm[0] -= tauinv*(fnm[0]-feq[0]) + (1.f-tauinv)*w0*g0*R*0.25f; fnm[1] -= tauinv*(fnm[1]-feq[1]) + (1.f-tauinv)*w1*g1*R*0.25f; fnm[2] -= tauinv*(fnm[2]-feq[2]) + (1.f-tauinv)*w2*g2*R*0.25f; fnm[3] -= tauinv*(fnm[3]-feq[3]) + (1.f-tauinv)*w3*g3*R*0.25f; fnm[4] -= tauinv*(fnm[4]-feq[4]) + (1.f-tauinv)*w4*g4*R*0.25f; fnm[5] -= tauinv*(fnm[5]-feq[5]) + (1.f-tauinv)*w5*g5*R*0.25f; fnm[6] -= tauinv*(fnm[6]-feq[6]) + (1.f-tauinv)*w6*g6*R*0.25f; fnm[7] -= tauinv*(fnm[7]-feq[7]) + (1.f-tauinv)*w7*g7*R*0.25f; fnm[8] -= tauinv*(fnm[8]-feq[8]) + (1.f-tauinv)*w8*g8*R*0.25f; // store new densities const int base = idx(N,n,m); fnew[base+0*Nall] = fnm[0]; fnew[base+1*Nall] = fnm[1]; fnew[base+2*Nall] = fnm[2]; fnew[base+3*Nall] = fnm[3]; fnew[base+4*Nall] = fnm[4]; fnew[base+5*Nall] = fnm[5]; fnew[base+6*Nall] = fnm[6]; fnew[base+7*Nall] = fnm[7]; fnew[base+8*Nall] = fnm[8]; } } void lbmCheck(int N, int M, dfloat *f){ int n,m,s; int nanCount = 0; for(s=0;s<NSPECIES;++s){ for(m=0;m<=M+1;++m){ for(n=0;n<=N+1;++n){ nanCount += isnan(f[idx(N,n,m)+s*(N+2)*(M+2)]); } } } if(nanCount){ printf("found %d nans\n", nanCount); exit(-1); } } // set initial conditions (use uniform flow f everywhere) void lbmInitialConditions(dfloat c, int N, int M, int *nodeType, dfloat *f){ int n,m; dfloat feqIC[NSPECIES]; dfloat feqWALL[NSPECIES]; dfloat rhoIC = 1.; dfloat UxIC = 1.; dfloat UyIC = 0.; lbmEquilibrium(c, rhoIC, 0., 0., feqWALL); lbmEquilibrium(c, rhoIC, UxIC, UyIC, feqIC); for(m=0;m<=M+1;++m){ for(n=0;n<=N+1;++n){ int base = idx(N, n, m); int s; if(n==0 || m==0 || m==M+1){ for(s=0;s<NSPECIES;++s){ f[idx(N,n,m)+s*(N+2)*(M+2)] = feqIC[s]; } } else{ for(s=0;s<NSPECIES;++s){ f[idx(N,n,m)+s*(N+2)*(M+2)] = feqWALL[s]; } } } } } int main(int argc, char **argv){ if(argc!=3){ printf("usage: ./cudaLBM foo.png threshold\n"); exit(-1); } // read threshold dfloat threshold = atof(argv[2]); char *imageFileName = strdup(argv[1]); int N, M; // size of lattice unsigned char *rgb, *alpha; int *nodeType; lbmInput(imageFileName, threshold, &N, &M, &rgb, &alpha, &nodeType); // physical parameters dfloat dx = .01; // lattice node spacings dfloat dt = dx*.1; // time step (also determines Mach number) dfloat c = dx/dt; // speed of sound dfloat tau = .8; // relaxation rate dfloat Reynolds = 2./((tau-.5)*c*c*dt/3.); printf("Reynolds number %g\n", Reynolds); // create lattice storage dfloat *h_f = (dfloat*) calloc((N+2)*(M+2)*NSPECIES, sizeof(dfloat)); dfloat *h_fnew = (dfloat*) calloc((N+2)*(M+2)*NSPECIES, sizeof(dfloat)); dfloat *h_tau = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat)); // set initial flow densities lbmInitialConditions(c, N, M, nodeType, h_f); lbmInitialConditions(c, N, M, nodeType, h_fnew); // set tau based on n index dfloat xo = .95; int n,m; for(m=0;m<=M+1;++m){ for(n=0;n<=N+1;++n){ dfloat x = ((double)n)/N; dfloat taunm = tau*(1 + 4*(1+tanh(20*(x-xo)))); h_tau[idx(N,n,m)] = taunm; } } // DEVICE storage dfloat *c_f, *c_fnew, *c_tau; int *c_nodeType; hipMalloc(&c_f, (N+2)*(M+2)*NSPECIES*sizeof(dfloat)); hipMalloc(&c_fnew, (N+2)*(M+2)*NSPECIES*sizeof(dfloat)); hipMalloc(&c_nodeType, (N+2)*(M+2)*sizeof(int)); hipMalloc(&c_tau, (N+2)*(M+2)*sizeof(dfloat)); hipMemcpy(c_f, h_f, (N+2)*(M+2)*NSPECIES*sizeof(dfloat), hipMemcpyHostToDevice); hipMemcpy(c_fnew, h_fnew, (N+2)*(M+2)*NSPECIES*sizeof(dfloat), hipMemcpyHostToDevice); hipMemcpy(c_nodeType, nodeType, (N+2)*(M+2)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(c_tau, h_tau, (N+2)*(M+2)*sizeof(dfloat), hipMemcpyHostToDevice); int Nsteps = 480000/2, tstep = 0, iostep = 100; // time step for(tstep=0;tstep<Nsteps;++tstep){ // perform two updates dim3 T(TX,TY,1); dim3 B( (N+1+TX-1)/TX, (M+1+TY-1)/TY, 1); hipLaunchKernelGGL(( lbmUpdate) , dim3(B), dim3(T) , 0, 0, N, M, c, c_tau, c_nodeType, c_f, c_fnew); hipLaunchKernelGGL(( lbmUpdate) , dim3(B), dim3(T) , 0, 0, N, M, c, c_tau, c_nodeType, c_fnew, c_f); if(!(tstep%iostep)){ // output an image every iostep printf("tstep = %d\n", tstep); char fname[BUFSIZ]; sprintf(fname, "bah%06d.png", tstep/iostep); hipMemcpy(h_f, c_f, (N+2)*(M+2)*NSPECIES*sizeof(dfloat), hipMemcpyDeviceToHost); lbmOutput(fname, nodeType, rgb, alpha, c, dx, N, M, h_f); lbmCheck(N,M,h_f); } } // output final result as image hipMemcpy(h_f, c_f, (N+2)*(M+2)*NSPECIES*sizeof(dfloat), hipMemcpyDeviceToHost); lbmOutput("bahFinal.png", nodeType, rgb, alpha, c, dx, N, M, h_f); exit(0); return 0; }
aee44ee10a54d4c0a8aa4daa65ceb52404fa48ef.cu
// http://www.caam.rice.edu/~timwar/CAAM210/Flows.html #include <math.h> #include <stdlib.h> #include <stdio.h> extern "C" { #include "png_util.h" } #define dfloat float #define FLUID 0 #define WALL 1 #define NSPECIES 9 #include "cuda.h" // loop up 1D array index from 2D node coordinates __host__ __device__ int idx(int N, int n, int m){ return n + m*(N+2); } void lbmInput(const char *imageFileName, dfloat threshold, int *outN, int *outM, unsigned char **rgb, unsigned char **alpha, int **nodeType){ int n,m, N,M; // read png file read_png(imageFileName, &N, &M, rgb, alpha); // pad to guarantee space around obstacle and extend the wake int Npad = 3*N; int Mpad = 2*M; if(Npad>8192) Npad = 8192; if(Mpad>8192) Mpad = 8192; // threshold walls based on gray scale *nodeType = (int*) calloc((Npad+2)*(Mpad+2), sizeof(int)); // mark pixels by gray scale intensity unsigned char *rgbPad = (unsigned char*) calloc(3*(Npad+2)*(Mpad+2), sizeof(unsigned char)); unsigned char *alphaPad = (unsigned char*) calloc((Npad+2)*(Mpad+2), sizeof(unsigned char)); int wallCount = 0; for(m=1;m<=M;++m){ for(n=1;n<=N;++n){ int offset = ((n-1)+(m-1)*N); dfloat r = (*rgb)[3*offset+0]; dfloat g = (*rgb)[3*offset+1]; dfloat b = (*rgb)[3*offset+2]; dfloat a = (*alpha) ? (*alpha)[offset]:255; // center image in padded region (including halo zone) int hoffset = N/4, yoffset = M/2; int id = idx(Npad,n+hoffset,m+yoffset); if(a==0) (*nodeType)[id] = FLUID; else (*nodeType)[id] = WALL*(sqrt(r*r+g*g+b*b)<threshold); wallCount += (*nodeType)[id]; rgbPad[3*id+0] = r; rgbPad[3*id+1] = g; rgbPad[3*id+2] = b; alphaPad[id] = 255; } } for(n=1;n<=Npad;++n){ (*nodeType)[idx(Npad,n,1)] = WALL; (*nodeType)[idx(Npad,n,Mpad)] = WALL; } free(*rgb); free(*alpha); *rgb = rgbPad; *alpha = alphaPad; printf("wallCount = %d (%g percent of %d x %d nodes)\n", wallCount, 100.*((dfloat)wallCount/((Npad+2)*(Mpad+2))), Npad, Mpad); *outN = Npad; *outM = Mpad; } void lbmOutput(const char *fname, const int *nodeType, unsigned char *rgb, unsigned char *alpha, const dfloat c, const dfloat dx, int N, int M, const dfloat *f){ int n,m,s; FILE *bah = fopen(fname, "w"); // compute vorticity dfloat *Ux = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat)); dfloat *Uy = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat)); dfloat fnm[NSPECIES]; for(m=1;m<=M;++m){ for(n=1;n<=N;++n){ int base = idx(N, n, m); for(s=0;s<NSPECIES;++s) fnm[s] = f[base+s*(N+2)*(M+2)]; const dfloat rho = fnm[0]+fnm[1]+fnm[2]+fnm[3]+fnm[4]+fnm[5]+fnm[6]+fnm[7]+fnm[8]; // macroscopic momentum Ux[base] = (fnm[1] - fnm[3] + fnm[5] - fnm[6] - fnm[7] + fnm[8])*c/rho; Uy[base] = (fnm[2] - fnm[4] + fnm[5] + fnm[6] - fnm[7] - fnm[8])*c/rho; } } dfloat plotMin = -4, plotMax = 4; for(m=1;m<=M;++m){ for(n=1;n<=N;++n){ int id = idx(N,n,m); // over write pixels in fluid region if(nodeType[id]==FLUID){ unsigned char r,g,b,a; // reconstruct macroscopic density dfloat rho = 0; for(s=0;s<NSPECIES;++s) rho += f[id+s*(N+2)*(M+2)]; rho = ((rho-plotMin)/(plotMax-plotMin)); // rescale dfloat dUxdy = (Ux[idx(N,n,m+1)]-Ux[idx(N,n,m-1)])/(2.*dx); dfloat dUydx = (Uy[idx(N,n+1,m)]-Uy[idx(N,n-1,m)])/(2.*dx); dfloat curlU = dUydx-dUxdy; curlU = ((curlU-plotMin)/(plotMax-plotMin)); #if 0 r = 255*curlU; g = 255*curlU; b = 255*curlU; a = 255; #else a = 255; if(curlU>.55){ r = 255*(curlU-.55)/.45; g = 0; b = 0; } else if(curlU<.45){ r = 0; g = 0; b = 255*(.45-curlU)/.45; } else{ r = 255; g = 255; b = 255; } #endif rgb[idx(N,n,m)*3+0] = r; rgb[idx(N,n,m)*3+1] = g; rgb[idx(N,n,m)*3+2] = b; alpha[idx(N,n,m)] = a; } } } write_png(bah, N+2, M+2, rgb, alpha); fclose(bah); free(Ux); free(Uy); } // weights used to compute equilibrium distribution (post collision) const dfloat w0 = 4.f/9.f, w1 = 1.f/9.f, w2 = 1.f/9.f, w3 = 1.f/9.f; const dfloat w4 = 1.f/9.f, w5 = 1.f/36.f, w6 = 1.f/36.f, w7 = 1.f/36.f, w8 = 1.f/36.f; __host__ __device__ void lbmEquilibrium(const dfloat c, const dfloat rho, const dfloat Ux, const dfloat Uy, dfloat * feq){ // resolve macroscopic velocity into lattice particle velocity directions const dfloat U2 = Ux*Ux+Uy*Uy; const dfloat v0 = 0; const dfloat v1 = +Ux/c; const dfloat v2 = +Uy/c; const dfloat v3 = -Ux/c; const dfloat v4 = -Uy/c; const dfloat v5 = (+Ux+Uy)/c; const dfloat v6 = (-Ux+Uy)/c; const dfloat v7 = (-Ux-Uy)/c; const dfloat v8 = (+Ux-Uy)/c; // compute LBM post-collisional feq[0] = rho*w0*(1.f + 3.f*v0 + 4.5f*v0*v0 - 1.5f*U2/(c*c)); feq[1] = rho*w1*(1.f + 3.f*v1 + 4.5f*v1*v1 - 1.5f*U2/(c*c)); feq[2] = rho*w2*(1.f + 3.f*v2 + 4.5f*v2*v2 - 1.5f*U2/(c*c)); feq[3] = rho*w3*(1.f + 3.f*v3 + 4.5f*v3*v3 - 1.5f*U2/(c*c)); feq[4] = rho*w4*(1.f + 3.f*v4 + 4.5f*v4*v4 - 1.5f*U2/(c*c)); feq[5] = rho*w5*(1.f + 3.f*v5 + 4.5f*v5*v5 - 1.5f*U2/(c*c)); feq[6] = rho*w6*(1.f + 3.f*v6 + 4.5f*v6*v6 - 1.5f*U2/(c*c)); feq[7] = rho*w7*(1.f + 3.f*v7 + 4.5f*v7*v7 - 1.5f*U2/(c*c)); feq[8] = rho*w8*(1.f + 3.f*v8 + 4.5f*v8*v8 - 1.5f*U2/(c*c)); } #define TX 32 #define TY 8 // perform lattice streaming and collision steps __global__ void lbmUpdate(const int N, // number of nodes in x const int M, // number of nodes in y const dfloat c, // speed of sound const dfloat * __restrict__ tau, // relaxation rate const int * __restrict__ nodeType, // (N+2) x (M+2) node types const dfloat * __restrict__ f, // (N+2) x (M+2) x 9 fields before streaming and collisions dfloat * __restrict__ fnew){ // (N+2) x (M+2) x 9 fields after streaming and collisions // number of nodes in whole array including halo int Nall = (N+2)*(M+2); // loop over all non-halo nodes in lattice int n = 1 + threadIdx.x + blockIdx.x*TX; int m = 1 + threadIdx.y + blockIdx.y*TY; if(m<M+1 && n<=N+1){ // physics paramaters dfloat tauinv = 1.f/tau[idx(N,n,m)]; // discover type of node (WALL or FLUID) const int nt = nodeType[idx(N,n,m)]; dfloat fnm[NSPECIES]; // OUTFLOW if(n==N+1){ fnm[0] = f[idx(N,n, m) + 0*Nall]; // stationary fnm[1] = f[idx(N,n-1,m) + 1*Nall]; // E bound from W fnm[2] = f[idx(N,n,m-1) + 2*Nall]; // N bound from S fnm[3] = f[idx(N,n,m) + 3*Nall]; // W bound from E fnm[4] = f[idx(N,n,m+1) + 4*Nall]; // S bound from N fnm[5] = f[idx(N,n-1,m-1) + 5*Nall]; // NE bound from SW fnm[6] = f[idx(N,n,m-1) + 6*Nall]; // NW bound from SE fnm[7] = f[idx(N,n,m+1) + 7*Nall]; // SW bound from NE fnm[8] = f[idx(N,n-1,m+1) + 8*Nall]; // SE bound from NW } else if(nt == FLUID){ fnm[0] = f[idx(N,n, m) + 0*Nall]; // stationary fnm[1] = f[idx(N,n-1,m) + 1*Nall]; // E bound from W fnm[2] = f[idx(N,n,m-1) + 2*Nall]; // N bound from S fnm[3] = f[idx(N,n+1,m) + 3*Nall]; // W bound from E fnm[4] = f[idx(N,n,m+1) + 4*Nall]; // S bound from N fnm[5] = f[idx(N,n-1,m-1) + 5*Nall]; // NE bound from SW fnm[6] = f[idx(N,n+1,m-1) + 6*Nall]; // NW bound from SE fnm[7] = f[idx(N,n+1,m+1) + 7*Nall]; // SW bound from NE fnm[8] = f[idx(N,n-1,m+1) + 8*Nall]; // SE bound from NW } else{ // WALL reflects particles fnm[0] = f[idx(N,n,m) + 0*Nall]; // stationary fnm[1] = f[idx(N,n,m) + 3*Nall]; // E bound from W fnm[2] = f[idx(N,n,m) + 4*Nall]; // N bound from S fnm[3] = f[idx(N,n,m) + 1*Nall]; // W bound from E fnm[4] = f[idx(N,n,m) + 2*Nall]; // S bound from N fnm[5] = f[idx(N,n,m) + 7*Nall]; // NE bound from SW fnm[6] = f[idx(N,n,m) + 8*Nall]; // NW bound from SE fnm[7] = f[idx(N,n,m) + 5*Nall]; // SW bound from NE fnm[8] = f[idx(N,n,m) + 6*Nall]; // SE bound from NW } // macroscopic density const dfloat rho = fnm[0]+fnm[1]+fnm[2]+fnm[3]+fnm[4]+fnm[5]+fnm[6]+fnm[7]+fnm[8]; // if(rho<1e-4){ printf("rho(%d,%d)=%g\n", n,m,rho); exit(-1); } // macroscopic momentum const dfloat delta2 = 1e-8; const dfloat Ux = (fnm[1] - fnm[3] + fnm[5] - fnm[6] - fnm[7] + fnm[8])*c/sqrt(rho*rho+delta2); const dfloat Uy = (fnm[2] - fnm[4] + fnm[5] + fnm[6] - fnm[7] - fnm[8])*c/sqrt(rho*rho+delta2); // compute equilibrium distribution dfloat feq[NSPECIES]; lbmEquilibrium(c, rho, Ux, Uy, feq); // MRT stabilization const dfloat g0 = 1.f, g1 = -2.f, g2 = -2.f, g3 = -2.f, g4 = -2.f; const dfloat g5 = 4.f, g6 = 4.f, g7 = 4.f, g8 = 4.f; const dfloat R = g0*fnm[0] + g1*fnm[1] + g2*fnm[2]+ g3*fnm[3] + g4*fnm[4] + g5*fnm[5] + g6*fnm[6] + g7*fnm[7] + g8*fnm[8]; // post collision densities fnm[0] -= tauinv*(fnm[0]-feq[0]) + (1.f-tauinv)*w0*g0*R*0.25f; fnm[1] -= tauinv*(fnm[1]-feq[1]) + (1.f-tauinv)*w1*g1*R*0.25f; fnm[2] -= tauinv*(fnm[2]-feq[2]) + (1.f-tauinv)*w2*g2*R*0.25f; fnm[3] -= tauinv*(fnm[3]-feq[3]) + (1.f-tauinv)*w3*g3*R*0.25f; fnm[4] -= tauinv*(fnm[4]-feq[4]) + (1.f-tauinv)*w4*g4*R*0.25f; fnm[5] -= tauinv*(fnm[5]-feq[5]) + (1.f-tauinv)*w5*g5*R*0.25f; fnm[6] -= tauinv*(fnm[6]-feq[6]) + (1.f-tauinv)*w6*g6*R*0.25f; fnm[7] -= tauinv*(fnm[7]-feq[7]) + (1.f-tauinv)*w7*g7*R*0.25f; fnm[8] -= tauinv*(fnm[8]-feq[8]) + (1.f-tauinv)*w8*g8*R*0.25f; // store new densities const int base = idx(N,n,m); fnew[base+0*Nall] = fnm[0]; fnew[base+1*Nall] = fnm[1]; fnew[base+2*Nall] = fnm[2]; fnew[base+3*Nall] = fnm[3]; fnew[base+4*Nall] = fnm[4]; fnew[base+5*Nall] = fnm[5]; fnew[base+6*Nall] = fnm[6]; fnew[base+7*Nall] = fnm[7]; fnew[base+8*Nall] = fnm[8]; } } void lbmCheck(int N, int M, dfloat *f){ int n,m,s; int nanCount = 0; for(s=0;s<NSPECIES;++s){ for(m=0;m<=M+1;++m){ for(n=0;n<=N+1;++n){ nanCount += isnan(f[idx(N,n,m)+s*(N+2)*(M+2)]); } } } if(nanCount){ printf("found %d nans\n", nanCount); exit(-1); } } // set initial conditions (use uniform flow f everywhere) void lbmInitialConditions(dfloat c, int N, int M, int *nodeType, dfloat *f){ int n,m; dfloat feqIC[NSPECIES]; dfloat feqWALL[NSPECIES]; dfloat rhoIC = 1.; dfloat UxIC = 1.; dfloat UyIC = 0.; lbmEquilibrium(c, rhoIC, 0., 0., feqWALL); lbmEquilibrium(c, rhoIC, UxIC, UyIC, feqIC); for(m=0;m<=M+1;++m){ for(n=0;n<=N+1;++n){ int base = idx(N, n, m); int s; if(n==0 || m==0 || m==M+1){ for(s=0;s<NSPECIES;++s){ f[idx(N,n,m)+s*(N+2)*(M+2)] = feqIC[s]; } } else{ for(s=0;s<NSPECIES;++s){ f[idx(N,n,m)+s*(N+2)*(M+2)] = feqWALL[s]; } } } } } int main(int argc, char **argv){ if(argc!=3){ printf("usage: ./cudaLBM foo.png threshold\n"); exit(-1); } // read threshold dfloat threshold = atof(argv[2]); char *imageFileName = strdup(argv[1]); int N, M; // size of lattice unsigned char *rgb, *alpha; int *nodeType; lbmInput(imageFileName, threshold, &N, &M, &rgb, &alpha, &nodeType); // physical parameters dfloat dx = .01; // lattice node spacings dfloat dt = dx*.1; // time step (also determines Mach number) dfloat c = dx/dt; // speed of sound dfloat tau = .8; // relaxation rate dfloat Reynolds = 2./((tau-.5)*c*c*dt/3.); printf("Reynolds number %g\n", Reynolds); // create lattice storage dfloat *h_f = (dfloat*) calloc((N+2)*(M+2)*NSPECIES, sizeof(dfloat)); dfloat *h_fnew = (dfloat*) calloc((N+2)*(M+2)*NSPECIES, sizeof(dfloat)); dfloat *h_tau = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat)); // set initial flow densities lbmInitialConditions(c, N, M, nodeType, h_f); lbmInitialConditions(c, N, M, nodeType, h_fnew); // set tau based on n index dfloat xo = .95; int n,m; for(m=0;m<=M+1;++m){ for(n=0;n<=N+1;++n){ dfloat x = ((double)n)/N; dfloat taunm = tau*(1 + 4*(1+tanh(20*(x-xo)))); h_tau[idx(N,n,m)] = taunm; } } // DEVICE storage dfloat *c_f, *c_fnew, *c_tau; int *c_nodeType; cudaMalloc(&c_f, (N+2)*(M+2)*NSPECIES*sizeof(dfloat)); cudaMalloc(&c_fnew, (N+2)*(M+2)*NSPECIES*sizeof(dfloat)); cudaMalloc(&c_nodeType, (N+2)*(M+2)*sizeof(int)); cudaMalloc(&c_tau, (N+2)*(M+2)*sizeof(dfloat)); cudaMemcpy(c_f, h_f, (N+2)*(M+2)*NSPECIES*sizeof(dfloat), cudaMemcpyHostToDevice); cudaMemcpy(c_fnew, h_fnew, (N+2)*(M+2)*NSPECIES*sizeof(dfloat), cudaMemcpyHostToDevice); cudaMemcpy(c_nodeType, nodeType, (N+2)*(M+2)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(c_tau, h_tau, (N+2)*(M+2)*sizeof(dfloat), cudaMemcpyHostToDevice); int Nsteps = 480000/2, tstep = 0, iostep = 100; // time step for(tstep=0;tstep<Nsteps;++tstep){ // perform two updates dim3 T(TX,TY,1); dim3 B( (N+1+TX-1)/TX, (M+1+TY-1)/TY, 1); lbmUpdate <<< B, T >>> (N, M, c, c_tau, c_nodeType, c_f, c_fnew); lbmUpdate <<< B, T >>> (N, M, c, c_tau, c_nodeType, c_fnew, c_f); if(!(tstep%iostep)){ // output an image every iostep printf("tstep = %d\n", tstep); char fname[BUFSIZ]; sprintf(fname, "bah%06d.png", tstep/iostep); cudaMemcpy(h_f, c_f, (N+2)*(M+2)*NSPECIES*sizeof(dfloat), cudaMemcpyDeviceToHost); lbmOutput(fname, nodeType, rgb, alpha, c, dx, N, M, h_f); lbmCheck(N,M,h_f); } } // output final result as image cudaMemcpy(h_f, c_f, (N+2)*(M+2)*NSPECIES*sizeof(dfloat), cudaMemcpyDeviceToHost); lbmOutput("bahFinal.png", nodeType, rgb, alpha, c, dx, N, M, h_f); exit(0); return 0; }
1ac6c1af39c59ec926c0f2e34eeb26d6ee8e855d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "include/nvgraph_lapack.hxx" //#include <f2c.h> //#include <complex> //#define NVGRAPH_USE_LAPACK 1 namespace nvgraph { #define lapackCheckError(status) \ { \ if (status < 0) { \ std::stringstream ss; \ ss << "Lapack error: argument number " << -status << " had an illegal value."; \ FatalError(ss.str(), NVGRAPH_ERR_UNKNOWN); \ } else if (status > 0) \ FatalError("Lapack error: internal error.", NVGRAPH_ERR_UNKNOWN); \ } template <typename T> void Lapack<T>::check_lapack_enabled() { #ifndef NVGRAPH_USE_LAPACK FatalError("Error: LAPACK not enabled.", NVGRAPH_ERR_UNKNOWN); #endif } typedef enum { CUSOLVER_STATUS_SUCCESS = 0, CUSOLVER_STATUS_NOT_INITIALIZED = 1, CUSOLVER_STATUS_ALLOC_FAILED = 2, CUSOLVER_STATUS_INVALID_VALUE = 3, CUSOLVER_STATUS_ARCH_MISMATCH = 4, CUSOLVER_STATUS_MAPPING_ERROR = 5, CUSOLVER_STATUS_EXECUTION_FAILED = 6, CUSOLVER_STATUS_INTERNAL_ERROR = 7, CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED = 8, CUSOLVER_STATUS_NOT_SUPPORTED = 9, CUSOLVER_STATUS_ZERO_PIVOT = 10, CUSOLVER_STATUS_INVALID_LICENSE = 11 } cusolverStatus_t; typedef enum { HIPBLAS_OP_N = 0, HIPBLAS_OP_T = 1, HIPBLAS_OP_C = 2 } hipblasOperation_t; namespace { // XGEMM // extern "C" // void sgemm_(const char *transa, const char *transb, // const int *m, const int *n, const int *k, // const float *alpha, const float *a, const int *lda, // const float *b, const int *ldb, // const float *beta, float *c, const int *ldc); // extern "C" // void dgemm_(const char *transa, const char *transb, // const int *m, const int *n, const int *k, // const double *alpha, const double *a, const int *lda, // const double *b, const int *ldb, // const double *beta, double *c, const int *ldc); extern "C" cusolverStatus_t cusolverDnSgemmHost(hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc); void lapack_gemm(const char transa, const char transb, int m, int n, int k, float alpha, const float *a, int lda, const float *b, int ldb, float beta, float *c, int ldc) { hipblasOperation_t cublas_transa = (transa == 'N') ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cublas_transb = (transb == 'N') ? HIPBLAS_OP_N : HIPBLAS_OP_T; cusolverDnSgemmHost( cublas_transa, cublas_transb, m, n, k, &alpha, (float *)a, lda, (float *)b, ldb, &beta, c, ldc); } extern "C" cusolverStatus_t cusolverDnDgemmHost(hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc); void lapack_gemm(const signed char transa, const signed char transb, int m, int n, int k, double alpha, const double *a, int lda, const double *b, int ldb, double beta, double *c, int ldc) { hipblasOperation_t cublas_transa = (transa == 'N') ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cublas_transb = (transb == 'N') ? HIPBLAS_OP_N : HIPBLAS_OP_T; cusolverDnDgemmHost(cublas_transa, cublas_transb, m, n, k, &alpha, (double *)a, lda, (double *)b, ldb, &beta, c, ldc); } // XSTERF // extern "C" // void ssterf_(const int *n, float *d, float *e, int *info); // // extern "C" // void dsterf_(const int *n, double *d, double *e, int *info); // extern "C" cusolverStatus_t cusolverDnSsterfHost(int n, float *d, float *e, int *info); void lapack_sterf(int n, float *d, float *e, int *info) { cusolverDnSsterfHost(n, d, e, info); } extern "C" cusolverStatus_t cusolverDnDsterfHost(int n, double *d, double *e, int *info); void lapack_sterf(int n, double *d, double *e, int *info) { cusolverDnDsterfHost(n, d, e, info); } // XSTEQR // extern "C" // void ssteqr_(const char *compz, const int *n, float *d, float *e, // float *z, const int *ldz, float *work, int * info); // extern "C" // void dsteqr_(const char *compz, const int *n, double *d, double *e, // double *z, const int *ldz, double *work, int *info); extern "C" cusolverStatus_t cusolverDnSsteqrHost( const signed char *compz, int n, float *d, float *e, float *z, int ldz, float *work, int *info); void lapack_steqr( const signed char compz, int n, float *d, float *e, float *z, int ldz, float *work, int *info) { cusolverDnSsteqrHost(&compz, n, d, e, z, ldz, work, info); } extern "C" cusolverStatus_t cusolverDnDsteqrHost(const signed char *compz, int n, double *d, double *e, double *z, int ldz, double *work, int *info); void lapack_steqr( const signed char compz, int n, double *d, double *e, double *z, int ldz, double *work, int *info) { cusolverDnDsteqrHost(&compz, n, d, e, z, ldz, work, info); } #ifdef NVGRAPH_USE_LAPACK extern "C" void sgeqrf_( int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info); extern "C" void dgeqrf_( int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info); // extern "C" // void cgeqrf_(int *m, int *n, std::complex<float> *a, int *lda, std::complex<float> *tau, // std::complex<float> *work, int *lwork, int *info); extern "C" void zgeqrf_(int *m, int *n, // std::complex<double> *a, int *lda, std::complex<double> *tau, std::complex<double> *work, int // *lwork, int *info); void lapack_geqrf(int m, int n, float *a, int lda, float *tau, float *work, int *lwork, int *info) { sgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); } void lapack_geqrf( int m, int n, double *a, int lda, double *tau, double *work, int *lwork, int *info) { dgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); } // void lapack_geqrf(int m, int n, std::complex<float> *a, int lda, std::complex<float> *tau, // std::complex<float> *work, int *lwork, int *info) //{ // cgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); //} // void lapack_geqrf(int m, int n, std::complex<double> *a, int lda, std::complex<double> *tau, // std::complex<double> *work, int *lwork, int *info) //{ // zgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); //} extern "C" void sormqr_(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, const float *tau, float *c, int *ldc, float *work, int *lwork, int *info); extern "C" void dormqr_(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, const double *tau, double *c, int *ldc, double *work, int *lwork, int *info); // extern "C" // void cunmqr_ (char* side, char* trans, int *m, int *n, int *k, std::complex<float> *a, int *lda, // const std::complex<float> *tau, std::complex<float>* c, int *ldc, std::complex<float> *work, int // *lwork, int *info); extern "C" void zunmqr_(char* side, char* trans, int *m, int *n, int *k, // std::complex<double> *a, int *lda, const std::complex<double> *tau, std::complex<double>* c, int // *ldc, std::complex<double> *work, int *lwork, int *info); void lapack_ormqr(char side, char trans, int m, int n, int k, float *a, int lda, float *tau, float *c, int ldc, float *work, int *lwork, int *info) { sormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); } void lapack_ormqr(char side, char trans, int m, int n, int k, double *a, int lda, double *tau, double *c, int ldc, double *work, int *lwork, int *info) { dormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); } // void lapack_unmqr(char side, char trans, int m, int n, int k, std::complex<float> *a, int lda, // std::complex<float> *tau, std::complex<float>* c, int ldc, std::complex<float> *work, int *lwork, // int *info) //{ // cunmqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); //} // void lapack_unmqr(char side, char trans, int m, int n, int k, std::complex<double> *a, int lda, // std::complex<double> *tau, std::complex<double>* c, int ldc, std::complex<double> *work, int // *lwork, int *info) //{ // zunmqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); //} // extern "C" // void sorgqr_ ( int* m, int* n, int* k, float* a, int* lda, const float* tau, float* work, int* // lwork, int *info ); extern "C" void dorgqr_ ( int* m, int* n, int* k, double* a, int* lda, const // double* tau, double* work, int* lwork, int *info ); // // void lapack_orgqr( int m, int n, int k, float* a, int lda, const float* tau, float* work, int // *lwork, int *info) // { // sorgqr_(&m, &n, &k, a, &lda, tau, work, lwork, info); // } // void lapack_orgqr( int m, int n, int k, double* a, int lda, const double* tau, double* work, int* // lwork, int *info ) // { // dorgqr_(&m, &n, &k, a, &lda, tau, work, lwork, info); // } // int lapack_hseqr_dispatch(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // double *h, int* ldh, double *wr, double *wi, double *z, // int*ldz, double *work, int *lwork, int *info) //{ // return dhseqr_(jobvl, jobvr, n, ilo, ihi, h, ldh, wr, wi, z, ldz, work, lwork, info); //} // // int lapack_hseqr_dispatch(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // float *h, int* ldh, float *wr, float *wi, float *z, // int*ldz, float *work, int *lwork, int *info) //{ // return shseqr_(jobvl, jobvr, n, ilo, ihi, h, ldh, wr, wi, z, ldz, work, lwork, info); //} // XGEEV extern "C" int dgeev_(char *jobvl, char *jobvr, int *n, double *a, int *lda, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, double *work, int *lwork, int *info); extern "C" int sgeev_(char *jobvl, char *jobvr, int *n, float *a, int *lda, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, float *work, int *lwork, int *info); // extern "C" // int dhseqr_(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // double *h, int* ldh, double *wr, double *wi, double *z, // int*ldz, double *work, int *lwork, int *info); // extern "C" // int shseqr_(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // float *h, int* ldh, float *wr, float *wi, float *z, // int*ldz, float *work, int *lwork, int *info); // int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, double *a, int *lda, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, double *work, int *lwork, int *info) { return dgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info); } int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, float *a, int *lda, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, float *work, int *lwork, int *info) { return sgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info); } // real eigenvalues template <typename T> void lapack_geev(T *A, T *eigenvalues, int dim, int lda) { char job = 'N'; std::vector<T> WI(dim); int ldv = 1; T *vl = 0; int work_size = 6 * dim; std::vector<T> work(work_size); int info; lapack_geev_dispatch(&job, &job, &dim, A, &lda, eigenvalues, WI.data(), vl, &ldv, vl, &ldv, work.data(), &work_size, &info); lapackCheckError(info); } // real eigenpairs template <typename T> void lapack_geev(T *A, T *eigenvalues, T *eigenvectors, int dim, int lda, int ldvr) { char jobvl = 'N'; char jobvr = 'V'; std::vector<T> WI(dim); int work_size = 6 * dim; T *vl = 0; int ldvl = 1; std::vector<T> work(work_size); int info; lapack_geev_dispatch(&jobvl, &jobvr, &dim, A, &lda, eigenvalues, WI.data(), vl, &ldvl, eigenvectors, &ldvr, work.data(), &work_size, &info); lapackCheckError(info); } // complex eigenpairs template <typename T> void lapack_geev(T *A, T *eigenvalues_r, T *eigenvalues_i, T *eigenvectors_r, T *eigenvectors_i, int dim, int lda, int ldvr) { char jobvl = 'N'; char jobvr = 'V'; int work_size = 8 * dim; int ldvl = 1; std::vector<T> work(work_size); int info; lapack_geev_dispatch(&jobvl, &jobvr, &dim, A, &lda, eigenvalues_r, eigenvalues_i, 0, &ldvl, eigenvectors_r, &ldvr, work.data(), &work_size, &info); lapackCheckError(info); } // template <typename T> // void lapack_hseqr(T* Q, T* H, T* eigenvalues, int dim, int ldh, int ldq) //{ // char job = 'S'; // S compute eigenvalues and the Schur form T. On entry, the upper Hessenberg // matrix H. // // On exit H contains the upper quasi-triangular matrix T from the Schur // decomposition // char jobvr = 'V'; //Take Q on entry, and the product Q*Z is returned. // //ILO and IHI are normally set by a previous call to DGEBAL, Otherwise ILO and IHI should be // set to 1 and N int ilo = 1; int ihi = dim; T* WI = new T[dim]; int ldv = 1; T* vl = 0; int // work_size = 11 * dim; //LWORK as large as 11*N may be required for optimal performance. It is // CPU memory and the matrix is assumed to be small T* work = new T[work_size]; int info; // lapack_hseqr_dispatch(&job, &jobvr, &dim, &ilo, &ihi, H, &ldh, eigenvalues, WI, Q, &ldq, work, // &work_size, &info); lapackCheckError(info); delete [] WI; delete [] work; //} #endif } // end anonymous namespace template <typename T> void Lapack<T>::gemm(bool transa, bool transb, int m, int n, int k, T alpha, const T *A, int lda, const T *B, int ldb, T beta, T *C, int ldc) { // check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK const char transA_char = transa ? 'T' : 'N'; const char transB_char = transb ? 'T' : 'N'; lapack_gemm(transA_char, transB_char, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); //#endif } template <typename T> void Lapack<T>::sterf(int n, T *d, T *e) { // check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK int info; lapack_sterf(n, d, e, &info); lapackCheckError(info); //#endif } template <typename T> void Lapack<T>::steqr(char compz, int n, T *d, T *e, T *z, int ldz, T *work) { // check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK int info; lapack_steqr(compz, n, d, e, z, ldz, work, &info); lapackCheckError(info); //#endif } template <typename T> void Lapack<T>::geqrf(int m, int n, T *a, int lda, T *tau, T *work, int *lwork) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK int info; lapack_geqrf(m, n, a, lda, tau, work, lwork, &info); lapackCheckError(info); #endif } template <typename T> void Lapack<T>::ormqr(bool right_side, bool transq, int m, int n, int k, T *a, int lda, T *tau, T *c, int ldc, T *work, int *lwork) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK char side = right_side ? 'R' : 'L'; char trans = transq ? 'T' : 'N'; int info; lapack_ormqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, &info); lapackCheckError(info); #endif } // template <typename T> // void Lapack< T >::unmqr(bool right_side, bool transq, int m, int n, int k, T *a, int lda, T *tau, // T *c, int ldc, T *work, int *lwork) //{ // check_lapack_enabled(); // #ifdef NVGRAPH_USE_LAPACK // char side = right_side ? 'R' : 'L'; // char trans = transq ? 'T' : 'N'; // int info; // lapack_unmqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, &info); // lapackCheckError(info); // #endif //} // template <typename T> // void Lapack< T >::orgqr( int m, int n, int k, T* a, int lda, const T* tau, T* work, int* lwork) //{ // check_lapack_enabled(); // #ifdef NVGRAPH_USE_LAPACK // int info; // lapack_orgqr(m, n, k, a, lda, tau, work, lwork, &info); // lapackCheckError(info); // #endif //} // template <typename T> // void Lapack< T >::qrf(int n, int k, T *H, T *C, T *Q, T *R) //{ // check_lapack_enabled(); // #ifdef NVGRAPH_USE_LAPACK // // int m = n, k = n, lda=n, lwork=2*n, info; // // lapack_geqrf(m, n, H, lda, C, work, lwork, &info); // // lapackCheckError(info); // // lapack_ormqr(m, n, k, H, lda, tau, c, ldc, work, lwork, &info); // // lapackCheckError(info); // #endif //} // real eigenvalues template <typename T> void Lapack<T>::geev(T *A, T *eigenvalues, int dim, int lda) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK lapack_geev(A, eigenvalues, dim, lda); #endif } // real eigenpairs template <typename T> void Lapack<T>::geev(T *A, T *eigenvalues, T *eigenvectors, int dim, int lda, int ldvr) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK lapack_geev(A, eigenvalues, eigenvectors, dim, lda, ldvr); #endif } // complex eigenpairs template <typename T> void Lapack<T>::geev(T *A, T *eigenvalues_r, T *eigenvalues_i, T *eigenvectors_r, T *eigenvectors_i, int dim, int lda, int ldvr) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK lapack_geev(A, eigenvalues_r, eigenvalues_i, eigenvectors_r, eigenvectors_i, dim, lda, ldvr); #endif } // template <typename T> // void Lapack< T >::hseqr(T* Q, T* H, T* eigenvalues,T* eigenvectors, int dim, int ldh, int ldq) //{ // check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK // lapack_hseqr(Q, H, eigenvalues, dim, ldh, ldq); //#endif //} // Explicit instantiation template void Lapack<float>::check_lapack_enabled(); template void Lapack<float>::gemm(bool transa, bool transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc); template void Lapack<float>::sterf(int n, float *d, float *e); template void Lapack<float>::geev( float *A, float *eigenvalues, float *eigenvectors, int dim, int lda, int ldvr); template void Lapack<float>::geev(float *A, float *eigenvalues_r, float *eigenvalues_i, float *eigenvectors_r, float *eigenvectors_i, int dim, int lda, int ldvr); // template void Lapack<float>::hseqr(float* Q, float* H, float* eigenvalues, float* eigenvectors, // int dim, int ldh, int ldq); template void Lapack<float>::steqr( char compz, int n, float *d, float *e, float *z, int ldz, float *work); template void Lapack<float>::geqrf( int m, int n, float *a, int lda, float *tau, float *work, int *lwork); template void Lapack<float>::ormqr(bool right_side, bool transq, int m, int n, int k, float *a, int lda, float *tau, float *c, int ldc, float *work, int *lwork); // template void Lapack<float>::orgqr(int m, int n, int k, float* a, int lda, const float* tau, // float* work, int* lwork); template void Lapack<double>::check_lapack_enabled(); template void Lapack<double>::gemm(bool transa, bool transb, int m, int n, int k, double alpha, const double *A, int lda, const double *B, int ldb, double beta, double *C, int ldc); template void Lapack<double>::sterf(int n, double *d, double *e); template void Lapack<double>::geev( double *A, double *eigenvalues, double *eigenvectors, int dim, int lda, int ldvr); template void Lapack<double>::geev(double *A, double *eigenvalues_r, double *eigenvalues_i, double *eigenvectors_r, double *eigenvectors_i, int dim, int lda, int ldvr); // template void Lapack<double>::hseqr(double* Q, double* H, double* eigenvalues, double* // eigenvectors, int dim, int ldh, int ldq); template void Lapack<double>::steqr( char compz, int n, double *d, double *e, double *z, int ldz, double *work); template void Lapack<double>::geqrf( int m, int n, double *a, int lda, double *tau, double *work, int *lwork); template void Lapack<double>::ormqr(bool right_side, bool transq, int m, int n, int k, double *a, int lda, double *tau, double *c, int ldc, double *work, int *lwork); // template void Lapack<double>::orgqr(int m, int n, int k, double* a, int lda, const double* tau, // double* work, int* lwork); // template void Lapack<std::complex<float> >::geqrf(int m, int n, std::complex<float> *a, int lda, // std::complex<float> *tau, std::complex<float> *work, int *lwork); template void // Lapack<std::complex<double> >::geqrf(int m, int n, std::complex<double> *a, int lda, // std::complex<double> *tau, std::complex<double> *work, int *lwork); template void // Lapack<std::complex<float> >::unmqr(bool right_side, bool transq, int m, int n, int k, // std::complex<float> *a, int lda, std::complex<float> *tau, std::complex<float> *c, int ldc, // std::complex<float> *work, int *lwork); template void Lapack<std::complex<double> >::unmqr(bool // right_side, bool transq, int m, int n, int k, std::complex<double> *a, int lda, // std::complex<double> *tau, std::complex<double> *c, int ldc, std::complex<double> *work, int // *lwork); } // end namespace nvgraph
1ac6c1af39c59ec926c0f2e34eeb26d6ee8e855d.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "include/nvgraph_lapack.hxx" //#include <f2c.h> //#include <complex> //#define NVGRAPH_USE_LAPACK 1 namespace nvgraph { #define lapackCheckError(status) \ { \ if (status < 0) { \ std::stringstream ss; \ ss << "Lapack error: argument number " << -status << " had an illegal value."; \ FatalError(ss.str(), NVGRAPH_ERR_UNKNOWN); \ } else if (status > 0) \ FatalError("Lapack error: internal error.", NVGRAPH_ERR_UNKNOWN); \ } template <typename T> void Lapack<T>::check_lapack_enabled() { #ifndef NVGRAPH_USE_LAPACK FatalError("Error: LAPACK not enabled.", NVGRAPH_ERR_UNKNOWN); #endif } typedef enum { CUSOLVER_STATUS_SUCCESS = 0, CUSOLVER_STATUS_NOT_INITIALIZED = 1, CUSOLVER_STATUS_ALLOC_FAILED = 2, CUSOLVER_STATUS_INVALID_VALUE = 3, CUSOLVER_STATUS_ARCH_MISMATCH = 4, CUSOLVER_STATUS_MAPPING_ERROR = 5, CUSOLVER_STATUS_EXECUTION_FAILED = 6, CUSOLVER_STATUS_INTERNAL_ERROR = 7, CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED = 8, CUSOLVER_STATUS_NOT_SUPPORTED = 9, CUSOLVER_STATUS_ZERO_PIVOT = 10, CUSOLVER_STATUS_INVALID_LICENSE = 11 } cusolverStatus_t; typedef enum { CUBLAS_OP_N = 0, CUBLAS_OP_T = 1, CUBLAS_OP_C = 2 } cublasOperation_t; namespace { // XGEMM // extern "C" // void sgemm_(const char *transa, const char *transb, // const int *m, const int *n, const int *k, // const float *alpha, const float *a, const int *lda, // const float *b, const int *ldb, // const float *beta, float *c, const int *ldc); // extern "C" // void dgemm_(const char *transa, const char *transb, // const int *m, const int *n, const int *k, // const double *alpha, const double *a, const int *lda, // const double *b, const int *ldb, // const double *beta, double *c, const int *ldc); extern "C" cusolverStatus_t cusolverDnSgemmHost(cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc); void lapack_gemm(const char transa, const char transb, int m, int n, int k, float alpha, const float *a, int lda, const float *b, int ldb, float beta, float *c, int ldc) { cublasOperation_t cublas_transa = (transa == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cublas_transb = (transb == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T; cusolverDnSgemmHost( cublas_transa, cublas_transb, m, n, k, &alpha, (float *)a, lda, (float *)b, ldb, &beta, c, ldc); } extern "C" cusolverStatus_t cusolverDnDgemmHost(cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc); void lapack_gemm(const signed char transa, const signed char transb, int m, int n, int k, double alpha, const double *a, int lda, const double *b, int ldb, double beta, double *c, int ldc) { cublasOperation_t cublas_transa = (transa == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cublas_transb = (transb == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T; cusolverDnDgemmHost(cublas_transa, cublas_transb, m, n, k, &alpha, (double *)a, lda, (double *)b, ldb, &beta, c, ldc); } // XSTERF // extern "C" // void ssterf_(const int *n, float *d, float *e, int *info); // // extern "C" // void dsterf_(const int *n, double *d, double *e, int *info); // extern "C" cusolverStatus_t cusolverDnSsterfHost(int n, float *d, float *e, int *info); void lapack_sterf(int n, float *d, float *e, int *info) { cusolverDnSsterfHost(n, d, e, info); } extern "C" cusolverStatus_t cusolverDnDsterfHost(int n, double *d, double *e, int *info); void lapack_sterf(int n, double *d, double *e, int *info) { cusolverDnDsterfHost(n, d, e, info); } // XSTEQR // extern "C" // void ssteqr_(const char *compz, const int *n, float *d, float *e, // float *z, const int *ldz, float *work, int * info); // extern "C" // void dsteqr_(const char *compz, const int *n, double *d, double *e, // double *z, const int *ldz, double *work, int *info); extern "C" cusolverStatus_t cusolverDnSsteqrHost( const signed char *compz, int n, float *d, float *e, float *z, int ldz, float *work, int *info); void lapack_steqr( const signed char compz, int n, float *d, float *e, float *z, int ldz, float *work, int *info) { cusolverDnSsteqrHost(&compz, n, d, e, z, ldz, work, info); } extern "C" cusolverStatus_t cusolverDnDsteqrHost(const signed char *compz, int n, double *d, double *e, double *z, int ldz, double *work, int *info); void lapack_steqr( const signed char compz, int n, double *d, double *e, double *z, int ldz, double *work, int *info) { cusolverDnDsteqrHost(&compz, n, d, e, z, ldz, work, info); } #ifdef NVGRAPH_USE_LAPACK extern "C" void sgeqrf_( int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info); extern "C" void dgeqrf_( int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info); // extern "C" // void cgeqrf_(int *m, int *n, std::complex<float> *a, int *lda, std::complex<float> *tau, // std::complex<float> *work, int *lwork, int *info); extern "C" void zgeqrf_(int *m, int *n, // std::complex<double> *a, int *lda, std::complex<double> *tau, std::complex<double> *work, int // *lwork, int *info); void lapack_geqrf(int m, int n, float *a, int lda, float *tau, float *work, int *lwork, int *info) { sgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); } void lapack_geqrf( int m, int n, double *a, int lda, double *tau, double *work, int *lwork, int *info) { dgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); } // void lapack_geqrf(int m, int n, std::complex<float> *a, int lda, std::complex<float> *tau, // std::complex<float> *work, int *lwork, int *info) //{ // cgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); //} // void lapack_geqrf(int m, int n, std::complex<double> *a, int lda, std::complex<double> *tau, // std::complex<double> *work, int *lwork, int *info) //{ // zgeqrf_(&m, &n, a, &lda, tau, work, lwork, info); //} extern "C" void sormqr_(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, const float *tau, float *c, int *ldc, float *work, int *lwork, int *info); extern "C" void dormqr_(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, const double *tau, double *c, int *ldc, double *work, int *lwork, int *info); // extern "C" // void cunmqr_ (char* side, char* trans, int *m, int *n, int *k, std::complex<float> *a, int *lda, // const std::complex<float> *tau, std::complex<float>* c, int *ldc, std::complex<float> *work, int // *lwork, int *info); extern "C" void zunmqr_(char* side, char* trans, int *m, int *n, int *k, // std::complex<double> *a, int *lda, const std::complex<double> *tau, std::complex<double>* c, int // *ldc, std::complex<double> *work, int *lwork, int *info); void lapack_ormqr(char side, char trans, int m, int n, int k, float *a, int lda, float *tau, float *c, int ldc, float *work, int *lwork, int *info) { sormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); } void lapack_ormqr(char side, char trans, int m, int n, int k, double *a, int lda, double *tau, double *c, int ldc, double *work, int *lwork, int *info) { dormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); } // void lapack_unmqr(char side, char trans, int m, int n, int k, std::complex<float> *a, int lda, // std::complex<float> *tau, std::complex<float>* c, int ldc, std::complex<float> *work, int *lwork, // int *info) //{ // cunmqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); //} // void lapack_unmqr(char side, char trans, int m, int n, int k, std::complex<double> *a, int lda, // std::complex<double> *tau, std::complex<double>* c, int ldc, std::complex<double> *work, int // *lwork, int *info) //{ // zunmqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info); //} // extern "C" // void sorgqr_ ( int* m, int* n, int* k, float* a, int* lda, const float* tau, float* work, int* // lwork, int *info ); extern "C" void dorgqr_ ( int* m, int* n, int* k, double* a, int* lda, const // double* tau, double* work, int* lwork, int *info ); // // void lapack_orgqr( int m, int n, int k, float* a, int lda, const float* tau, float* work, int // *lwork, int *info) // { // sorgqr_(&m, &n, &k, a, &lda, tau, work, lwork, info); // } // void lapack_orgqr( int m, int n, int k, double* a, int lda, const double* tau, double* work, int* // lwork, int *info ) // { // dorgqr_(&m, &n, &k, a, &lda, tau, work, lwork, info); // } // int lapack_hseqr_dispatch(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // double *h, int* ldh, double *wr, double *wi, double *z, // int*ldz, double *work, int *lwork, int *info) //{ // return dhseqr_(jobvl, jobvr, n, ilo, ihi, h, ldh, wr, wi, z, ldz, work, lwork, info); //} // // int lapack_hseqr_dispatch(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // float *h, int* ldh, float *wr, float *wi, float *z, // int*ldz, float *work, int *lwork, int *info) //{ // return shseqr_(jobvl, jobvr, n, ilo, ihi, h, ldh, wr, wi, z, ldz, work, lwork, info); //} // XGEEV extern "C" int dgeev_(char *jobvl, char *jobvr, int *n, double *a, int *lda, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, double *work, int *lwork, int *info); extern "C" int sgeev_(char *jobvl, char *jobvr, int *n, float *a, int *lda, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, float *work, int *lwork, int *info); // extern "C" // int dhseqr_(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // double *h, int* ldh, double *wr, double *wi, double *z, // int*ldz, double *work, int *lwork, int *info); // extern "C" // int shseqr_(char *jobvl, char *jobvr, int* n, int*ilo, int*ihi, // float *h, int* ldh, float *wr, float *wi, float *z, // int*ldz, float *work, int *lwork, int *info); // int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, double *a, int *lda, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, double *work, int *lwork, int *info) { return dgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info); } int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, float *a, int *lda, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, float *work, int *lwork, int *info) { return sgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info); } // real eigenvalues template <typename T> void lapack_geev(T *A, T *eigenvalues, int dim, int lda) { char job = 'N'; std::vector<T> WI(dim); int ldv = 1; T *vl = 0; int work_size = 6 * dim; std::vector<T> work(work_size); int info; lapack_geev_dispatch(&job, &job, &dim, A, &lda, eigenvalues, WI.data(), vl, &ldv, vl, &ldv, work.data(), &work_size, &info); lapackCheckError(info); } // real eigenpairs template <typename T> void lapack_geev(T *A, T *eigenvalues, T *eigenvectors, int dim, int lda, int ldvr) { char jobvl = 'N'; char jobvr = 'V'; std::vector<T> WI(dim); int work_size = 6 * dim; T *vl = 0; int ldvl = 1; std::vector<T> work(work_size); int info; lapack_geev_dispatch(&jobvl, &jobvr, &dim, A, &lda, eigenvalues, WI.data(), vl, &ldvl, eigenvectors, &ldvr, work.data(), &work_size, &info); lapackCheckError(info); } // complex eigenpairs template <typename T> void lapack_geev(T *A, T *eigenvalues_r, T *eigenvalues_i, T *eigenvectors_r, T *eigenvectors_i, int dim, int lda, int ldvr) { char jobvl = 'N'; char jobvr = 'V'; int work_size = 8 * dim; int ldvl = 1; std::vector<T> work(work_size); int info; lapack_geev_dispatch(&jobvl, &jobvr, &dim, A, &lda, eigenvalues_r, eigenvalues_i, 0, &ldvl, eigenvectors_r, &ldvr, work.data(), &work_size, &info); lapackCheckError(info); } // template <typename T> // void lapack_hseqr(T* Q, T* H, T* eigenvalues, int dim, int ldh, int ldq) //{ // char job = 'S'; // S compute eigenvalues and the Schur form T. On entry, the upper Hessenberg // matrix H. // // On exit H contains the upper quasi-triangular matrix T from the Schur // decomposition // char jobvr = 'V'; //Take Q on entry, and the product Q*Z is returned. // //ILO and IHI are normally set by a previous call to DGEBAL, Otherwise ILO and IHI should be // set to 1 and N int ilo = 1; int ihi = dim; T* WI = new T[dim]; int ldv = 1; T* vl = 0; int // work_size = 11 * dim; //LWORK as large as 11*N may be required for optimal performance. It is // CPU memory and the matrix is assumed to be small T* work = new T[work_size]; int info; // lapack_hseqr_dispatch(&job, &jobvr, &dim, &ilo, &ihi, H, &ldh, eigenvalues, WI, Q, &ldq, work, // &work_size, &info); lapackCheckError(info); delete [] WI; delete [] work; //} #endif } // end anonymous namespace template <typename T> void Lapack<T>::gemm(bool transa, bool transb, int m, int n, int k, T alpha, const T *A, int lda, const T *B, int ldb, T beta, T *C, int ldc) { // check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK const char transA_char = transa ? 'T' : 'N'; const char transB_char = transb ? 'T' : 'N'; lapack_gemm(transA_char, transB_char, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); //#endif } template <typename T> void Lapack<T>::sterf(int n, T *d, T *e) { // check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK int info; lapack_sterf(n, d, e, &info); lapackCheckError(info); //#endif } template <typename T> void Lapack<T>::steqr(char compz, int n, T *d, T *e, T *z, int ldz, T *work) { // check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK int info; lapack_steqr(compz, n, d, e, z, ldz, work, &info); lapackCheckError(info); //#endif } template <typename T> void Lapack<T>::geqrf(int m, int n, T *a, int lda, T *tau, T *work, int *lwork) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK int info; lapack_geqrf(m, n, a, lda, tau, work, lwork, &info); lapackCheckError(info); #endif } template <typename T> void Lapack<T>::ormqr(bool right_side, bool transq, int m, int n, int k, T *a, int lda, T *tau, T *c, int ldc, T *work, int *lwork) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK char side = right_side ? 'R' : 'L'; char trans = transq ? 'T' : 'N'; int info; lapack_ormqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, &info); lapackCheckError(info); #endif } // template <typename T> // void Lapack< T >::unmqr(bool right_side, bool transq, int m, int n, int k, T *a, int lda, T *tau, // T *c, int ldc, T *work, int *lwork) //{ // check_lapack_enabled(); // #ifdef NVGRAPH_USE_LAPACK // char side = right_side ? 'R' : 'L'; // char trans = transq ? 'T' : 'N'; // int info; // lapack_unmqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, &info); // lapackCheckError(info); // #endif //} // template <typename T> // void Lapack< T >::orgqr( int m, int n, int k, T* a, int lda, const T* tau, T* work, int* lwork) //{ // check_lapack_enabled(); // #ifdef NVGRAPH_USE_LAPACK // int info; // lapack_orgqr(m, n, k, a, lda, tau, work, lwork, &info); // lapackCheckError(info); // #endif //} // template <typename T> // void Lapack< T >::qrf(int n, int k, T *H, T *C, T *Q, T *R) //{ // check_lapack_enabled(); // #ifdef NVGRAPH_USE_LAPACK // // int m = n, k = n, lda=n, lwork=2*n, info; // // lapack_geqrf(m, n, H, lda, C, work, lwork, &info); // // lapackCheckError(info); // // lapack_ormqr(m, n, k, H, lda, tau, c, ldc, work, lwork, &info); // // lapackCheckError(info); // #endif //} // real eigenvalues template <typename T> void Lapack<T>::geev(T *A, T *eigenvalues, int dim, int lda) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK lapack_geev(A, eigenvalues, dim, lda); #endif } // real eigenpairs template <typename T> void Lapack<T>::geev(T *A, T *eigenvalues, T *eigenvectors, int dim, int lda, int ldvr) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK lapack_geev(A, eigenvalues, eigenvectors, dim, lda, ldvr); #endif } // complex eigenpairs template <typename T> void Lapack<T>::geev(T *A, T *eigenvalues_r, T *eigenvalues_i, T *eigenvectors_r, T *eigenvectors_i, int dim, int lda, int ldvr) { check_lapack_enabled(); #ifdef NVGRAPH_USE_LAPACK lapack_geev(A, eigenvalues_r, eigenvalues_i, eigenvectors_r, eigenvectors_i, dim, lda, ldvr); #endif } // template <typename T> // void Lapack< T >::hseqr(T* Q, T* H, T* eigenvalues,T* eigenvectors, int dim, int ldh, int ldq) //{ // check_lapack_enabled(); //#ifdef NVGRAPH_USE_LAPACK // lapack_hseqr(Q, H, eigenvalues, dim, ldh, ldq); //#endif //} // Explicit instantiation template void Lapack<float>::check_lapack_enabled(); template void Lapack<float>::gemm(bool transa, bool transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc); template void Lapack<float>::sterf(int n, float *d, float *e); template void Lapack<float>::geev( float *A, float *eigenvalues, float *eigenvectors, int dim, int lda, int ldvr); template void Lapack<float>::geev(float *A, float *eigenvalues_r, float *eigenvalues_i, float *eigenvectors_r, float *eigenvectors_i, int dim, int lda, int ldvr); // template void Lapack<float>::hseqr(float* Q, float* H, float* eigenvalues, float* eigenvectors, // int dim, int ldh, int ldq); template void Lapack<float>::steqr( char compz, int n, float *d, float *e, float *z, int ldz, float *work); template void Lapack<float>::geqrf( int m, int n, float *a, int lda, float *tau, float *work, int *lwork); template void Lapack<float>::ormqr(bool right_side, bool transq, int m, int n, int k, float *a, int lda, float *tau, float *c, int ldc, float *work, int *lwork); // template void Lapack<float>::orgqr(int m, int n, int k, float* a, int lda, const float* tau, // float* work, int* lwork); template void Lapack<double>::check_lapack_enabled(); template void Lapack<double>::gemm(bool transa, bool transb, int m, int n, int k, double alpha, const double *A, int lda, const double *B, int ldb, double beta, double *C, int ldc); template void Lapack<double>::sterf(int n, double *d, double *e); template void Lapack<double>::geev( double *A, double *eigenvalues, double *eigenvectors, int dim, int lda, int ldvr); template void Lapack<double>::geev(double *A, double *eigenvalues_r, double *eigenvalues_i, double *eigenvectors_r, double *eigenvectors_i, int dim, int lda, int ldvr); // template void Lapack<double>::hseqr(double* Q, double* H, double* eigenvalues, double* // eigenvectors, int dim, int ldh, int ldq); template void Lapack<double>::steqr( char compz, int n, double *d, double *e, double *z, int ldz, double *work); template void Lapack<double>::geqrf( int m, int n, double *a, int lda, double *tau, double *work, int *lwork); template void Lapack<double>::ormqr(bool right_side, bool transq, int m, int n, int k, double *a, int lda, double *tau, double *c, int ldc, double *work, int *lwork); // template void Lapack<double>::orgqr(int m, int n, int k, double* a, int lda, const double* tau, // double* work, int* lwork); // template void Lapack<std::complex<float> >::geqrf(int m, int n, std::complex<float> *a, int lda, // std::complex<float> *tau, std::complex<float> *work, int *lwork); template void // Lapack<std::complex<double> >::geqrf(int m, int n, std::complex<double> *a, int lda, // std::complex<double> *tau, std::complex<double> *work, int *lwork); template void // Lapack<std::complex<float> >::unmqr(bool right_side, bool transq, int m, int n, int k, // std::complex<float> *a, int lda, std::complex<float> *tau, std::complex<float> *c, int ldc, // std::complex<float> *work, int *lwork); template void Lapack<std::complex<double> >::unmqr(bool // right_side, bool transq, int m, int n, int k, std::complex<double> *a, int lda, // std::complex<double> *tau, std::complex<double> *c, int ldc, std::complex<double> *work, int // *lwork); } // end namespace nvgraph
4d95c37e0cf5ed90a1d6ea7df7417e5be4130eb3.hip
// !!! This is a file automatically generated by hipify!!! #include "data.cuh" void Data_t::malloc_data(const int Nx, const int Ny, const int Nz){ const int Ns3 = Tile::Ns*Tile::Ns*Tile::Ns; const size_t sz = long(Nx)*Ny*Nz/Ns3*sizeof(Tile); printf("Total data size = %g GB\n",double(sz)/1024/1024/1024); for(int ipar:{0,1} ) CHECK_ERROR( hipMalloc((void**)&tiles[ipar], sz ) ); CHECK_ERROR( hipHostMalloc((void**)&tilesHost, sz ) ); for(auto itile: tiles ) CHECK_ERROR( hipMemset(itile, 0, sz ) ); CHECK_ERROR( hipMemset(tilesHost, 0, sz ) ); }; void Data_t::copyHost2Dev(){ const int Ns3 = Tile::Ns*Tile::Ns*Tile::Ns; const size_t sz = long(Nx)*Ny*Nz/Ns3*sizeof(Tile); for(auto itiles: tiles ) CHECK_ERROR( hipMemcpy(itiles, tilesHost, sz, hipMemcpyHostToDevice ) ); } void Data_t::copyDev2Host(const int ipar){ const int Ns3 = Tile::Ns*Tile::Ns*Tile::Ns; const size_t sz = long(Nx)*Ny*Nz/Ns3*sizeof(Tile); CHECK_ERROR( hipMemcpy(tilesHost, tiles[ipar], sz, hipMemcpyDeviceToHost ) ); } void Data_t::swap_ptrs(){ Tile* tmpptr = tiles[0]; tiles[0] = tiles[1]; tiles[1] = tmpptr; }
4d95c37e0cf5ed90a1d6ea7df7417e5be4130eb3.cu
#include "data.cuh" void Data_t::malloc_data(const int Nx, const int Ny, const int Nz){ const int Ns3 = Tile::Ns*Tile::Ns*Tile::Ns; const size_t sz = long(Nx)*Ny*Nz/Ns3*sizeof(Tile); printf("Total data size = %g GB\n",double(sz)/1024/1024/1024); for(int ipar:{0,1} ) CHECK_ERROR( cudaMalloc((void**)&tiles[ipar], sz ) ); CHECK_ERROR( cudaMallocHost((void**)&tilesHost, sz ) ); for(auto itile: tiles ) CHECK_ERROR( cudaMemset(itile, 0, sz ) ); CHECK_ERROR( cudaMemset(tilesHost, 0, sz ) ); }; void Data_t::copyHost2Dev(){ const int Ns3 = Tile::Ns*Tile::Ns*Tile::Ns; const size_t sz = long(Nx)*Ny*Nz/Ns3*sizeof(Tile); for(auto itiles: tiles ) CHECK_ERROR( cudaMemcpy(itiles, tilesHost, sz, cudaMemcpyHostToDevice ) ); } void Data_t::copyDev2Host(const int ipar){ const int Ns3 = Tile::Ns*Tile::Ns*Tile::Ns; const size_t sz = long(Nx)*Ny*Nz/Ns3*sizeof(Tile); CHECK_ERROR( cudaMemcpy(tilesHost, tiles[ipar], sz, cudaMemcpyDeviceToHost ) ); } void Data_t::swap_ptrs(){ Tile* tmpptr = tiles[0]; tiles[0] = tiles[1]; tiles[1] = tmpptr; }
546f46c56fa805624de8a202f80619ce92a1e51b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/multilabel_sigmoid_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MultiLabelSigmoidLossForwardGPU(const int nthreads, const Dtype* input_data, const Dtype* target, Dtype* loss, Dtype* counts) { CUDA_KERNEL_LOOP(i, nthreads) { // const Dtype target_value = static_cast<Dtype>(target[i]); if (target[i] >= 0) { loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); counts[i] = 1; } else { counts[i] = 0; loss[i] = 0; } } } template <typename Dtype> __global__ void MultiLabelSigmoidLossIgnoreDiffGPU(const int count, const Dtype* target, Dtype* diff, Dtype* counts) { CUDA_KERNEL_LOOP(i, count) { //const Dtype target_value = static_cast<Dtype>(target[i]); if (target[i] < 0) { diff[i] = 0; counts[i] = 0; } else counts[i] = 1; } } template <typename Dtype> void MultiLabelSigmoidLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // The forward pass computes the sigmoid outputs. sigmoid_bottom_vec_[0] = bottom[0]; sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); // Compute the loss (negative log likelihood) const int count = bottom[0]->count(); const int num = bottom[0]->num(); // Stable version of loss computation from input data const Dtype* input_data = bottom[0]->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); Dtype* count_data = bottom[1]->mutable_gpu_diff(); Dtype valid_count; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MultiLabelSigmoidLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, input_data, target, loss_data, count_data); caffe_gpu_asum(count, count_data, &valid_count); Dtype loss; caffe_gpu_asum(count, loss_data, &loss); // CPU version divides loss by num, which is the total number of classes, // here we divide by the number of classes that are not "dontcare", as in sigmoidcrossentropyloss if (valid_count > 0) top[0]->mutable_cpu_data()[0] = loss / valid_count; else top[0]->mutable_cpu_data()[0] = loss / num; } template <typename Dtype> void MultiLabelSigmoidLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { // First, compute the diff const int count = bottom[0]->count(); const int num = bottom[0]->num(); Dtype* count_data = bottom[1]->mutable_gpu_diff(); Dtype valid_count; const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_copy(count, sigmoid_output_data, bottom_diff); caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); // Zero out gradient of ignored targets. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MultiLabelSigmoidLossIgnoreDiffGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, target, bottom_diff,count_data); caffe_gpu_asum(count, count_data, &valid_count); // Scale down gradient Dtype loss_weight = top[0]->cpu_diff()[0]; // here we should divide by valid_count for consistency if (valid_count) caffe_gpu_scal(count, loss_weight / valid_count, bottom_diff); else caffe_gpu_scal(count, loss_weight / num, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(MultiLabelSigmoidLossLayer); } // namespace caffe
546f46c56fa805624de8a202f80619ce92a1e51b.cu
#include <vector> #include "caffe/layers/multilabel_sigmoid_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MultiLabelSigmoidLossForwardGPU(const int nthreads, const Dtype* input_data, const Dtype* target, Dtype* loss, Dtype* counts) { CUDA_KERNEL_LOOP(i, nthreads) { // const Dtype target_value = static_cast<Dtype>(target[i]); if (target[i] >= 0) { loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); counts[i] = 1; } else { counts[i] = 0; loss[i] = 0; } } } template <typename Dtype> __global__ void MultiLabelSigmoidLossIgnoreDiffGPU(const int count, const Dtype* target, Dtype* diff, Dtype* counts) { CUDA_KERNEL_LOOP(i, count) { //const Dtype target_value = static_cast<Dtype>(target[i]); if (target[i] < 0) { diff[i] = 0; counts[i] = 0; } else counts[i] = 1; } } template <typename Dtype> void MultiLabelSigmoidLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // The forward pass computes the sigmoid outputs. sigmoid_bottom_vec_[0] = bottom[0]; sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); // Compute the loss (negative log likelihood) const int count = bottom[0]->count(); const int num = bottom[0]->num(); // Stable version of loss computation from input data const Dtype* input_data = bottom[0]->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); Dtype* count_data = bottom[1]->mutable_gpu_diff(); Dtype valid_count; // NOLINT_NEXT_LINE(whitespace/operators) MultiLabelSigmoidLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, input_data, target, loss_data, count_data); caffe_gpu_asum(count, count_data, &valid_count); Dtype loss; caffe_gpu_asum(count, loss_data, &loss); // CPU version divides loss by num, which is the total number of classes, // here we divide by the number of classes that are not "dontcare", as in sigmoidcrossentropyloss if (valid_count > 0) top[0]->mutable_cpu_data()[0] = loss / valid_count; else top[0]->mutable_cpu_data()[0] = loss / num; } template <typename Dtype> void MultiLabelSigmoidLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { // First, compute the diff const int count = bottom[0]->count(); const int num = bottom[0]->num(); Dtype* count_data = bottom[1]->mutable_gpu_diff(); Dtype valid_count; const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_copy(count, sigmoid_output_data, bottom_diff); caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); // Zero out gradient of ignored targets. // NOLINT_NEXT_LINE(whitespace/operators) MultiLabelSigmoidLossIgnoreDiffGPU<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, target, bottom_diff,count_data); caffe_gpu_asum(count, count_data, &valid_count); // Scale down gradient Dtype loss_weight = top[0]->cpu_diff()[0]; // here we should divide by valid_count for consistency if (valid_count) caffe_gpu_scal(count, loss_weight / valid_count, bottom_diff); else caffe_gpu_scal(count, loss_weight / num, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(MultiLabelSigmoidLossLayer); } // namespace caffe
f970117d2614eecb8beaa4833bd6c0a3d26a2b9f.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <hip/hip_runtime.h> #include <cv.h> #include <iostream> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <stdio.h> #include <time.h> #define CHANNELS 3 #define MASK_WIDTH 3 __constant__ char M1[MASK_WIDTH * MASK_WIDTH]; __constant__ char M2[MASK_WIDTH * MASK_WIDTH]; using namespace cv; using namespace std; __global__ void sobel(unsigned char *in, unsigned char *out, int w, int h) { int Col = blockIdx.x * blockDim.x + threadIdx.x; int Row = blockIdx.y * blockDim.y + threadIdx.y; if (Col < w && Row < h) { int Gx = 0, Gy = 0; int N_start_col = Col - (MASK_WIDTH / 2); int N_start_row = Row - (MASK_WIDTH / 2); for (int j = 0; j < MASK_WIDTH; j++) { for (int k = 0; k < MASK_WIDTH; k++) { int curRow = N_start_row + j; int curCol = N_start_col + k; if (curRow > -1 && curRow < h && curCol > -1 && curCol < w) { Gx += in[curRow * w + curCol] * M1[j * MASK_WIDTH + k]; Gy += in[curRow * w + curCol] * M2[j * MASK_WIDTH + k]; } } } if (Gx < 0) Gx = 0; else { if (Gx > 255) Gx = 255; } if (Gy < 0) Gy = 0; else { if (Gy > 255) Gy = 255; } out[Row * w + Col] = (unsigned char)sqrtf((Gx * Gx) + (Gy * Gy)); } } int main(int argc, char **argv) { // Lectura de la imagen con openCV Mat image; image = imread("perro.jpg", CV_LOAD_IMAGE_COLOR); // Read the file Size s = image.size(); int width = s.width; int height = s.height; // Definicion de mascaras char h_mask1[] = {-1, 0, 1, -2, 0, 2, -1, 0, 1}; char h_mask2[] = {-1, -2, -1, 0, 0, 0, 1, 2, 1}; // Definicion de variables que se manejaran en el device unsigned char *d_image_Gray, *h_imageOutput, *out; // Reserva de memora para variales en host h_imageOutput = (unsigned char *)malloc(sizeof(unsigned char) * width * height); // Reserva de memoria para variables en device hipMalloc((void **)&d_image_Gray, sizeof(unsigned char) * width * height); hipMalloc((void **)&out, sizeof(unsigned char) * width * height); // Definicion de los bloques e hilos por bloques int blockSize = 32; dim3 dimBlock(blockSize, blockSize, 1); dim3 dimGrid(ceil(width / float(blockSize)), ceil(height / float(blockSize)), 1); // Copiando los datos del host al device hipMemcpyToSymbol(M1, h_mask1, sizeof(char) * MASK_WIDTH * MASK_WIDTH); hipMemcpyToSymbol(M2, h_mask2, sizeof(char) * MASK_WIDTH * MASK_WIDTH); // Convirtiendo imagen en escala de grises con openCV Mat grayImg; cvtColor(image, grayImg, CV_BGR2GRAY); // Copiando la imagen del host al device hipMemcpy(d_image_Gray, grayImg.data, width * height * sizeof(unsigned char), hipMemcpyHostToDevice); // Lanzando el kernel hipLaunchKernelGGL(( sobel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_image_Gray, out, width, height); // Copiando el resultado del device al host hipMemcpy(h_imageOutput, out, width * height * sizeof(unsigned char), hipMemcpyDeviceToHost); // Generando la imagen de salida Mat grayImgCuda; grayImgCuda.create(s.height, s.width, CV_8UC1); grayImgCuda.data = h_imageOutput; // Filtro de Sobel con openCV Mat gray_image_opencv, grad_x, abs_grad_x; cvtColor(image, gray_image_opencv, CV_BGR2GRAY); Sobel(gray_image_opencv, grad_x, CV_8UC1, 1, 0, 3, 1, 0, BORDER_DEFAULT); convertScaleAbs(grad_x, abs_grad_x); if (!image.data) // Check for invalid input { cout << "Could not open or find the image" << endl; return -1; } // Guardando la imagen generada por CUDA imwrite("perrogris.png", grayImgCuda); // Guardando la imagen generada por openCV // imwrite("./outputs/1088331150.png", abs_grad_x); cout << "La imagen esta lista." << std::endl; return 0; }
f970117d2614eecb8beaa4833bd6c0a3d26a2b9f.cu
#include <cmath> #include <cuda.h> #include <cv.h> #include <iostream> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <stdio.h> #include <time.h> #define CHANNELS 3 #define MASK_WIDTH 3 __constant__ char M1[MASK_WIDTH * MASK_WIDTH]; __constant__ char M2[MASK_WIDTH * MASK_WIDTH]; using namespace cv; using namespace std; __global__ void sobel(unsigned char *in, unsigned char *out, int w, int h) { int Col = blockIdx.x * blockDim.x + threadIdx.x; int Row = blockIdx.y * blockDim.y + threadIdx.y; if (Col < w && Row < h) { int Gx = 0, Gy = 0; int N_start_col = Col - (MASK_WIDTH / 2); int N_start_row = Row - (MASK_WIDTH / 2); for (int j = 0; j < MASK_WIDTH; j++) { for (int k = 0; k < MASK_WIDTH; k++) { int curRow = N_start_row + j; int curCol = N_start_col + k; if (curRow > -1 && curRow < h && curCol > -1 && curCol < w) { Gx += in[curRow * w + curCol] * M1[j * MASK_WIDTH + k]; Gy += in[curRow * w + curCol] * M2[j * MASK_WIDTH + k]; } } } if (Gx < 0) Gx = 0; else { if (Gx > 255) Gx = 255; } if (Gy < 0) Gy = 0; else { if (Gy > 255) Gy = 255; } out[Row * w + Col] = (unsigned char)sqrtf((Gx * Gx) + (Gy * Gy)); } } int main(int argc, char **argv) { // Lectura de la imagen con openCV Mat image; image = imread("perro.jpg", CV_LOAD_IMAGE_COLOR); // Read the file Size s = image.size(); int width = s.width; int height = s.height; // Definicion de mascaras char h_mask1[] = {-1, 0, 1, -2, 0, 2, -1, 0, 1}; char h_mask2[] = {-1, -2, -1, 0, 0, 0, 1, 2, 1}; // Definicion de variables que se manejaran en el device unsigned char *d_image_Gray, *h_imageOutput, *out; // Reserva de memora para variales en host h_imageOutput = (unsigned char *)malloc(sizeof(unsigned char) * width * height); // Reserva de memoria para variables en device cudaMalloc((void **)&d_image_Gray, sizeof(unsigned char) * width * height); cudaMalloc((void **)&out, sizeof(unsigned char) * width * height); // Definicion de los bloques e hilos por bloques int blockSize = 32; dim3 dimBlock(blockSize, blockSize, 1); dim3 dimGrid(ceil(width / float(blockSize)), ceil(height / float(blockSize)), 1); // Copiando los datos del host al device cudaMemcpyToSymbol(M1, h_mask1, sizeof(char) * MASK_WIDTH * MASK_WIDTH); cudaMemcpyToSymbol(M2, h_mask2, sizeof(char) * MASK_WIDTH * MASK_WIDTH); // Convirtiendo imagen en escala de grises con openCV Mat grayImg; cvtColor(image, grayImg, CV_BGR2GRAY); // Copiando la imagen del host al device cudaMemcpy(d_image_Gray, grayImg.data, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice); // Lanzando el kernel sobel<<<dimGrid, dimBlock>>>(d_image_Gray, out, width, height); // Copiando el resultado del device al host cudaMemcpy(h_imageOutput, out, width * height * sizeof(unsigned char), cudaMemcpyDeviceToHost); // Generando la imagen de salida Mat grayImgCuda; grayImgCuda.create(s.height, s.width, CV_8UC1); grayImgCuda.data = h_imageOutput; // Filtro de Sobel con openCV Mat gray_image_opencv, grad_x, abs_grad_x; cvtColor(image, gray_image_opencv, CV_BGR2GRAY); Sobel(gray_image_opencv, grad_x, CV_8UC1, 1, 0, 3, 1, 0, BORDER_DEFAULT); convertScaleAbs(grad_x, abs_grad_x); if (!image.data) // Check for invalid input { cout << "Could not open or find the image" << endl; return -1; } // Guardando la imagen generada por CUDA imwrite("perrogris.png", grayImgCuda); // Guardando la imagen generada por openCV // imwrite("./outputs/1088331150.png", abs_grad_x); cout << "La imagen esta lista." << std::endl; return 0; }
338f2c68cd3bbe76314e33d1d5fe2ce98b596b8a.hip
// !!! This is a file automatically generated by hipify!!! /*--------------------------------------------------------------------- Copyright 2019 Paolo G. Crosetto <[email protected]> SPDX-License-Identifier: Apache-2.0 ---------------------------------------------------------------------*/ #include <boost/program_options.hpp> #include <cmath> #include <iostream> #include <vector> // boosts timers #include <boost/timer/timer.hpp> #include "../src/cupq.h" // nvcc -gencode=arch=compute_50,code=sm_50 -std=c++14 test_reverse.cu -O3 // -DNDEBUG -lboost_timer -g int main(int argc, char **argv) { using namespace cupq; boost::program_options::options_description desc("Allowed options"); desc.add_options()("help,h", "print usage message")( "graph,g", boost::program_options::value<std::string>(), "graph filename without extensions"); boost::program_options::variables_map vm; boost::program_options::store( boost::program_options::parse_command_line(argc, argv, desc), vm); std::string graphname_; if (vm.count("help") || !vm.size()) { std::cout << "USAGE:\n\n" "dijkstra_simple_exe -g <graph file> \n\n" "printing this help:\n\n" "dijkstra_exe -h \n" "dijkstra_exe \n" "\n"; return 0; } if (vm.count("graph")) { graphname_ = vm["graph"].as<std::string>(); } else { std::cout << "specify a graph file"; } size_t s; hipDeviceGetLimit(&s, hipLimitPrintfFifoSize); hipDeviceSetLimit(hipLimitPrintfFifoSize, s * 100); using value_t = float; using index_t = int; using graph_t = Graph<value_t, index_t, cupq::Backend::Host>; graph_t graph_; graph_.setup(); graph_.readDIMACS(graphname_.c_str()); // computing heuristic { unsigned N = 92; std::vector<index_t> sources_(N); for (auto i = 0; i < N; ++i) { sources_[i] = i + 1; } std::vector<index_t> destinations_(N); for (auto i = 0; i < N; ++i) { destinations_[i] = graph_.size() - i - 1; } auto rgraph_ = graph_.reverse(); cupq::DijkstraSolver<graph_t> solver_; solver_.setSources(sources_); solver_.setGraph(graph_); solver_.computeDijkstra(-1, -1); solver_.finalize(); auto out = solver_.out(); graph_.free(); solver_.setGraph(rgraph_); solver_.setSources(destinations_); solver_.computeDijkstra(-1, -1); auto out3 = solver_.out(); auto rrgraph_ = rgraph_.reverse(); rgraph_.free(); solver_.setGraph(rrgraph_); solver_.setSources(sources_); solver_.computeDijkstra(-1, -1); auto out2 = solver_.out(); std::cout << "CPU:\n\n"; for (unsigned i = 0; i < N; ++i) { Matrix<value_t, cupq::Backend::Host> d_potential_(graph_.nbNodes() - 1, 1, (value_t)FLT_MAX); Matrix<index_t, cupq::Backend::Host> d_parent_(graph_.nbNodes() - 1, 1, -1); std::vector<index_t> orig_{(index_t)(i + 1)}; dijkstra(graph_, d_potential_, d_parent_, orig_, -1); for (unsigned k = 0; k < d_potential_.nrows(); ++k) if (out.first(i, k) != d_potential_(k, 0)) { if (d_parent_(k, 0) != -1) { std::cout << "error66\n"; std::cout << i << " " << k << " " << out.second(i, k) << " != " << d_parent_(k, 0) << "\n"; std::cout << out.first(i, k) << " != " << d_potential_(k, 0) << "\n"; } } } for (auto i = 0; i < N; ++i) for (unsigned k = 0; k < graph_.size(); ++k) if (out.first(i, k) != out2.first(i, k)) { std::cout << "error2\n"; std::cout << i << " " << k << " " << out.second(i, k) << " != " << out2.second(i, k) << "\n"; std::cout << out.first(i, k) << " != " << out2.first(i, k) << "\n"; break; } auto eps = .1; for (auto i = 0; i < N; ++i) for (auto j = 0; j < N; ++j) if (out.first(i, destinations_[j]) > out3.first(j, sources_[i]) + eps || out.first(i, destinations_[j]) < out3.first(j, sources_[i]) - eps) { std::cout << "error3\n"; std::cout << i << " " << j << " " << out.second(i, destinations_[j]) << " != " << out3.second(j, sources_[i]) << "\n"; std::cout << out.first(i, destinations_[j]) << " != " << out3.first(j, sources_[i]) << "\n"; break; } } }
338f2c68cd3bbe76314e33d1d5fe2ce98b596b8a.cu
/*--------------------------------------------------------------------- Copyright 2019 Paolo G. Crosetto <[email protected]> SPDX-License-Identifier: Apache-2.0 ---------------------------------------------------------------------*/ #include <boost/program_options.hpp> #include <cmath> #include <iostream> #include <vector> // boosts timers #include <boost/timer/timer.hpp> #include "../src/cupq.h" // nvcc -gencode=arch=compute_50,code=sm_50 -std=c++14 test_reverse.cu -O3 // -DNDEBUG -lboost_timer -g int main(int argc, char **argv) { using namespace cupq; boost::program_options::options_description desc("Allowed options"); desc.add_options()("help,h", "print usage message")( "graph,g", boost::program_options::value<std::string>(), "graph filename without extensions"); boost::program_options::variables_map vm; boost::program_options::store( boost::program_options::parse_command_line(argc, argv, desc), vm); std::string graphname_; if (vm.count("help") || !vm.size()) { std::cout << "USAGE:\n\n" "dijkstra_simple_exe -g <graph file> \n\n" "printing this help:\n\n" "dijkstra_exe -h \n" "dijkstra_exe \n" "\n"; return 0; } if (vm.count("graph")) { graphname_ = vm["graph"].as<std::string>(); } else { std::cout << "specify a graph file"; } size_t s; cudaDeviceGetLimit(&s, cudaLimitPrintfFifoSize); cudaDeviceSetLimit(cudaLimitPrintfFifoSize, s * 100); using value_t = float; using index_t = int; using graph_t = Graph<value_t, index_t, cupq::Backend::Host>; graph_t graph_; graph_.setup(); graph_.readDIMACS(graphname_.c_str()); // computing heuristic { unsigned N = 92; std::vector<index_t> sources_(N); for (auto i = 0; i < N; ++i) { sources_[i] = i + 1; } std::vector<index_t> destinations_(N); for (auto i = 0; i < N; ++i) { destinations_[i] = graph_.size() - i - 1; } auto rgraph_ = graph_.reverse(); cupq::DijkstraSolver<graph_t> solver_; solver_.setSources(sources_); solver_.setGraph(graph_); solver_.computeDijkstra(-1, -1); solver_.finalize(); auto out = solver_.out(); graph_.free(); solver_.setGraph(rgraph_); solver_.setSources(destinations_); solver_.computeDijkstra(-1, -1); auto out3 = solver_.out(); auto rrgraph_ = rgraph_.reverse(); rgraph_.free(); solver_.setGraph(rrgraph_); solver_.setSources(sources_); solver_.computeDijkstra(-1, -1); auto out2 = solver_.out(); std::cout << "CPU:\n\n"; for (unsigned i = 0; i < N; ++i) { Matrix<value_t, cupq::Backend::Host> d_potential_(graph_.nbNodes() - 1, 1, (value_t)FLT_MAX); Matrix<index_t, cupq::Backend::Host> d_parent_(graph_.nbNodes() - 1, 1, -1); std::vector<index_t> orig_{(index_t)(i + 1)}; dijkstra(graph_, d_potential_, d_parent_, orig_, -1); for (unsigned k = 0; k < d_potential_.nrows(); ++k) if (out.first(i, k) != d_potential_(k, 0)) { if (d_parent_(k, 0) != -1) { std::cout << "error66\n"; std::cout << i << " " << k << " " << out.second(i, k) << " != " << d_parent_(k, 0) << "\n"; std::cout << out.first(i, k) << " != " << d_potential_(k, 0) << "\n"; } } } for (auto i = 0; i < N; ++i) for (unsigned k = 0; k < graph_.size(); ++k) if (out.first(i, k) != out2.first(i, k)) { std::cout << "error2\n"; std::cout << i << " " << k << " " << out.second(i, k) << " != " << out2.second(i, k) << "\n"; std::cout << out.first(i, k) << " != " << out2.first(i, k) << "\n"; break; } auto eps = .1; for (auto i = 0; i < N; ++i) for (auto j = 0; j < N; ++j) if (out.first(i, destinations_[j]) > out3.first(j, sources_[i]) + eps || out.first(i, destinations_[j]) < out3.first(j, sources_[i]) - eps) { std::cout << "error3\n"; std::cout << i << " " << j << " " << out.second(i, destinations_[j]) << " != " << out3.second(j, sources_[i]) << "\n"; std::cout << out.first(i, destinations_[j]) << " != " << out3.first(j, sources_[i]) << "\n"; break; } } }
680e97cd3b05fac57177e1cc1528da260df99b24.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kBiggerThanRowVector(float* mat, float* vec, float* tgtMat, const int width, const int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] > vec[i % width]; } }
680e97cd3b05fac57177e1cc1528da260df99b24.cu
#include "includes.h" __global__ void kBiggerThanRowVector(float* mat, float* vec, float* tgtMat, const int width, const int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] > vec[i % width]; } }
9f467926044d84f2c4ce2aa72c209eee914c3d31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <cmath> #include <ctime> #include <numeric> #include "book.h" const int Nstart = 1000; const int Ntimes = 4; const int Nhist = 100; #define HISTINT long long using namespace std; // Define a particle storage class struct Particles { vector<float> x, y, z; int N; }; struct ParticlesGPU { float *x, *y, *z; }; void AllocCopyGPU(Particles &p, ParticlesGPU &p2) { // x HANDLE_ERROR( hipMalloc ( (void**)&p2.x, p.N * sizeof(float))); HANDLE_ERROR( hipMemcpy ( p2.x, &p.x[0], p.N * sizeof(float), hipMemcpyHostToDevice)); // y HANDLE_ERROR( hipMalloc ( (void**)&p2.y, p.N * sizeof(float))); HANDLE_ERROR( hipMemcpy ( p2.y, &p.y[0], p.N * sizeof(float), hipMemcpyHostToDevice)); // z HANDLE_ERROR( hipMalloc ( (void**)&p2.z, p.N * sizeof(float))); HANDLE_ERROR( hipMemcpy ( p2.z, &p.z[0], p.N * sizeof(float), hipMemcpyHostToDevice)); } void FreeGPU(ParticlesGPU &p) { hipFree(p.x); hipFree(p.y); hipFree(p.z); } void makeRandomParticles(int N, Particles &p) { // Set number of particles p.N = N; // Resize the vectors p.x.resize(N); p.y.resize(N); p.z.resize(N); // Fill in the vectors for (int ii=0; ii < N; ++ii) { p.x[ii] = float(rand())/float(RAND_MAX); p.y[ii] = float(rand())/float(RAND_MAX); p.z[ii] = float(rand())/float(RAND_MAX); } }; // Define the GPU kernel here __global__ void paircount_kernel( int N1, float *x1, float *y1, float *z1, int N2, float *x2, float *y2, float *z2, int Nh, HISTINT *hist) { // Keep a shared copy of the histogram __shared__ long long _hist[Nhist]; // We distribute p1, but loop through all of p2 int ii, jj, idr; int stride = blockDim.x * gridDim.x; float x, y, z, dx, dy, dz, dr; // Zero histogram ii = threadIdx.x; while (ii < Nhist) { _hist[ii] = 0ll; ii += blockDim.x; } __syncthreads(); ii = threadIdx.x + blockIdx.x * blockDim.x; while (ii < N1) { x = x1[ii]; y = y1[ii]; z = z1[ii]; for (jj = 0; jj < N2; ++jj) { dx = x2[jj] - x; dy = y2[jj] - y; dz = z2[jj] - z; dr = sqrtf(dx*dx + dy*dy + dz*dz); idr = (int) (dr*Nh); if (idr < Nh) atomicAdd( (unsigned long long*) &_hist[idr], 1ll); } ii += stride; } // Synchronize __syncthreads(); // Copy histogram ii = threadIdx.x; while (ii < Nhist) { atomicAdd( (unsigned long long*) &hist[ii], _hist[ii]); ii += blockDim.x; } } void cpu_paircount_v2(const Particles &p1, const Particles &p2, vector<HISTINT>& hist) { float x1, y1, z1, dx, dy, dz; const int nblock=10; float dr[nblock]; int idr; for (int ii =0; ii < p1.N; ++ii) { x1 = p1.x[ii]; y1 = p1.y[ii]; z1 = p1.z[ii]; for (int jj=0; jj < p2.N/nblock; ++jj) { for (int kk=0; kk < nblock; ++kk) { dx = p2.x[jj*nblock+kk]-x1; dy = p2.y[jj*nblock+kk]-y1; dz = p2.z[jj*nblock+kk]-z1; dr[kk] = sqrt(dx*dx + dy*dy + dz*dz); } for (int kk=0; kk < nblock; ++kk) { idr = (int)(dr[kk]*Nhist); if (idr < Nhist) hist[idr]++; } } } } double cpu_harness(int N, int blocks) { Particles p1,p2; ParticlesGPU pg1, pg2; clock_t t0; double dt; float gpu_dt; cout << "Starting harness with N=" << N << endl; // Initialize t0 = clock(); makeRandomParticles(N, p1); makeRandomParticles(N, p2); dt = difftime(clock(), t0)/double(CLOCKS_PER_SEC); cout << " Time to initialize: " << dt << endl; // Set up GPU timers hipEvent_t start, stop; HANDLE_ERROR( hipEventCreate( &start ) ); HANDLE_ERROR( hipEventCreate( &stop ) ); // Move data to GPU HANDLE_ERROR( hipEventRecord( start, 0 ) ); AllocCopyGPU(p1, pg1); AllocCopyGPU(p2, pg2); HANDLE_ERROR( hipEventRecord( stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( stop ) ); HANDLE_ERROR( hipEventElapsedTime( &gpu_dt, start, stop ) ); cout << " Time to move data on to GPU (ms): " << gpu_dt << endl; // Set up the gpu_hist HISTINT *gpu_hist; HANDLE_ERROR( hipMalloc( (void**)&gpu_hist, Nhist*sizeof(HISTINT))); HANDLE_ERROR( hipMemset( gpu_hist, 0, Nhist*sizeof(HISTINT))); HANDLE_ERROR( hipEventRecord( start, 0 ) ); hipLaunchKernelGGL(( paircount_kernel), dim3(blocks), dim3(512), 0, 0, N, pg1.x, pg1.y, pg1.z, N, pg2.x, pg2.y, pg2.z, Nhist, gpu_hist); HANDLE_ERROR( hipEventRecord( stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( stop ) ); HANDLE_ERROR( hipEventElapsedTime( &gpu_dt, start, stop ) ); cout << " Time for GPU paircounts (ms): " << gpu_dt << endl; // reduce histogram HANDLE_ERROR( hipEventRecord( start, 0 ) ); // Suck back the histogram array vector<HISTINT> hist1(Nhist); HANDLE_ERROR( hipMemcpy( &hist1[0], gpu_hist, Nhist*sizeof(HISTINT), hipMemcpyDeviceToHost)); // Clean up hipFree(gpu_hist); FreeGPU(pg1); FreeGPU(pg2); // Clean up GPU timers HANDLE_ERROR( hipEventDestroy( start ) ); HANDLE_ERROR( hipEventDestroy( stop ) ); // CPU paircounting vector<HISTINT> hist(Nhist,0); t0 = clock(); cpu_paircount_v2(p1, p2, hist); dt = difftime(clock(), t0)/double(CLOCKS_PER_SEC); cout << " Time to count pairs v2: " << dt << endl; //for (int ii = 0; ii < Nhist; ++ii) { // cout << ii << " " << hist[ii] << " " << hist1[ii] << endl; //} // Now compare histograms HISTINT dhist = 0, error = 0, eval = 0; for (int ii =0; ii < Nhist; ++ii) { dhist = abs(hist[ii] - hist1[ii]); if (dhist > error) { error = dhist; eval = hist[ii]; } } cout << " Difference in histograms : " << error << " " << eval << endl; return dt; } int main() { double timing[Ntimes]; int i, N1; cout << "Pair counting timing code...." << endl; // kernel launch - 2x the number of mps gave best timing hipDeviceProp_t prop; HANDLE_ERROR( hipGetDeviceProperties( &prop, 0 ) ); int blocks = prop.multiProcessorCount * 2; cout << "Using blocks = " << blocks << endl; for (i=0, N1=Nstart; i < Ntimes; ++i, N1*=2) { timing[i] = cpu_harness(N1, blocks); } }
9f467926044d84f2c4ce2aa72c209eee914c3d31.cu
#include <iostream> #include <vector> #include <cmath> #include <ctime> #include <numeric> #include "book.h" const int Nstart = 1000; const int Ntimes = 4; const int Nhist = 100; #define HISTINT long long using namespace std; // Define a particle storage class struct Particles { vector<float> x, y, z; int N; }; struct ParticlesGPU { float *x, *y, *z; }; void AllocCopyGPU(Particles &p, ParticlesGPU &p2) { // x HANDLE_ERROR( cudaMalloc ( (void**)&p2.x, p.N * sizeof(float))); HANDLE_ERROR( cudaMemcpy ( p2.x, &p.x[0], p.N * sizeof(float), cudaMemcpyHostToDevice)); // y HANDLE_ERROR( cudaMalloc ( (void**)&p2.y, p.N * sizeof(float))); HANDLE_ERROR( cudaMemcpy ( p2.y, &p.y[0], p.N * sizeof(float), cudaMemcpyHostToDevice)); // z HANDLE_ERROR( cudaMalloc ( (void**)&p2.z, p.N * sizeof(float))); HANDLE_ERROR( cudaMemcpy ( p2.z, &p.z[0], p.N * sizeof(float), cudaMemcpyHostToDevice)); } void FreeGPU(ParticlesGPU &p) { cudaFree(p.x); cudaFree(p.y); cudaFree(p.z); } void makeRandomParticles(int N, Particles &p) { // Set number of particles p.N = N; // Resize the vectors p.x.resize(N); p.y.resize(N); p.z.resize(N); // Fill in the vectors for (int ii=0; ii < N; ++ii) { p.x[ii] = float(rand())/float(RAND_MAX); p.y[ii] = float(rand())/float(RAND_MAX); p.z[ii] = float(rand())/float(RAND_MAX); } }; // Define the GPU kernel here __global__ void paircount_kernel( int N1, float *x1, float *y1, float *z1, int N2, float *x2, float *y2, float *z2, int Nh, HISTINT *hist) { // Keep a shared copy of the histogram __shared__ long long _hist[Nhist]; // We distribute p1, but loop through all of p2 int ii, jj, idr; int stride = blockDim.x * gridDim.x; float x, y, z, dx, dy, dz, dr; // Zero histogram ii = threadIdx.x; while (ii < Nhist) { _hist[ii] = 0ll; ii += blockDim.x; } __syncthreads(); ii = threadIdx.x + blockIdx.x * blockDim.x; while (ii < N1) { x = x1[ii]; y = y1[ii]; z = z1[ii]; for (jj = 0; jj < N2; ++jj) { dx = x2[jj] - x; dy = y2[jj] - y; dz = z2[jj] - z; dr = sqrtf(dx*dx + dy*dy + dz*dz); idr = (int) (dr*Nh); if (idr < Nh) atomicAdd( (unsigned long long*) &_hist[idr], 1ll); } ii += stride; } // Synchronize __syncthreads(); // Copy histogram ii = threadIdx.x; while (ii < Nhist) { atomicAdd( (unsigned long long*) &hist[ii], _hist[ii]); ii += blockDim.x; } } void cpu_paircount_v2(const Particles &p1, const Particles &p2, vector<HISTINT>& hist) { float x1, y1, z1, dx, dy, dz; const int nblock=10; float dr[nblock]; int idr; for (int ii =0; ii < p1.N; ++ii) { x1 = p1.x[ii]; y1 = p1.y[ii]; z1 = p1.z[ii]; for (int jj=0; jj < p2.N/nblock; ++jj) { for (int kk=0; kk < nblock; ++kk) { dx = p2.x[jj*nblock+kk]-x1; dy = p2.y[jj*nblock+kk]-y1; dz = p2.z[jj*nblock+kk]-z1; dr[kk] = sqrt(dx*dx + dy*dy + dz*dz); } for (int kk=0; kk < nblock; ++kk) { idr = (int)(dr[kk]*Nhist); if (idr < Nhist) hist[idr]++; } } } } double cpu_harness(int N, int blocks) { Particles p1,p2; ParticlesGPU pg1, pg2; clock_t t0; double dt; float gpu_dt; cout << "Starting harness with N=" << N << endl; // Initialize t0 = clock(); makeRandomParticles(N, p1); makeRandomParticles(N, p2); dt = difftime(clock(), t0)/double(CLOCKS_PER_SEC); cout << " Time to initialize: " << dt << endl; // Set up GPU timers cudaEvent_t start, stop; HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); // Move data to GPU HANDLE_ERROR( cudaEventRecord( start, 0 ) ); AllocCopyGPU(p1, pg1); AllocCopyGPU(p2, pg2); HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); HANDLE_ERROR( cudaEventElapsedTime( &gpu_dt, start, stop ) ); cout << " Time to move data on to GPU (ms): " << gpu_dt << endl; // Set up the gpu_hist HISTINT *gpu_hist; HANDLE_ERROR( cudaMalloc( (void**)&gpu_hist, Nhist*sizeof(HISTINT))); HANDLE_ERROR( cudaMemset( gpu_hist, 0, Nhist*sizeof(HISTINT))); HANDLE_ERROR( cudaEventRecord( start, 0 ) ); paircount_kernel<<<blocks, 512>>>(N, pg1.x, pg1.y, pg1.z, N, pg2.x, pg2.y, pg2.z, Nhist, gpu_hist); HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); HANDLE_ERROR( cudaEventElapsedTime( &gpu_dt, start, stop ) ); cout << " Time for GPU paircounts (ms): " << gpu_dt << endl; // reduce histogram HANDLE_ERROR( cudaEventRecord( start, 0 ) ); // Suck back the histogram array vector<HISTINT> hist1(Nhist); HANDLE_ERROR( cudaMemcpy( &hist1[0], gpu_hist, Nhist*sizeof(HISTINT), cudaMemcpyDeviceToHost)); // Clean up cudaFree(gpu_hist); FreeGPU(pg1); FreeGPU(pg2); // Clean up GPU timers HANDLE_ERROR( cudaEventDestroy( start ) ); HANDLE_ERROR( cudaEventDestroy( stop ) ); // CPU paircounting vector<HISTINT> hist(Nhist,0); t0 = clock(); cpu_paircount_v2(p1, p2, hist); dt = difftime(clock(), t0)/double(CLOCKS_PER_SEC); cout << " Time to count pairs v2: " << dt << endl; //for (int ii = 0; ii < Nhist; ++ii) { // cout << ii << " " << hist[ii] << " " << hist1[ii] << endl; //} // Now compare histograms HISTINT dhist = 0, error = 0, eval = 0; for (int ii =0; ii < Nhist; ++ii) { dhist = abs(hist[ii] - hist1[ii]); if (dhist > error) { error = dhist; eval = hist[ii]; } } cout << " Difference in histograms : " << error << " " << eval << endl; return dt; } int main() { double timing[Ntimes]; int i, N1; cout << "Pair counting timing code...." << endl; // kernel launch - 2x the number of mps gave best timing cudaDeviceProp prop; HANDLE_ERROR( cudaGetDeviceProperties( &prop, 0 ) ); int blocks = prop.multiProcessorCount * 2; cout << "Using blocks = " << blocks << endl; for (i=0, N1=Nstart; i < Ntimes; ++i, N1*=2) { timing[i] = cpu_harness(N1, blocks); } }
2de57178fa992b8f848d6c9d68ff3d734ad7b102.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Vector addition on the GPU: C = A + B */ #include <stdio.h> #include <stdlib.h> #include <string.h> #define SIZE 1024 #define BLOCKSIZE 16 // Device function (i.e. kernel) __global__ void VecAdd(float * A, float * B, float * C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if ( i < N ) { C[i] = A[i] + B[i]; } } // CPU version of the vector addition function void vecAddCPU(float * A, float * B, float * C, int N) { int i; for (i=0; i<N; i++) { C[i] = A[i] + B[i]; } } // Function compares two 1d arrays void compareVecs( float * vec1, float * vec2, int N ) { int i; int vecsEqual = 1; for (i=0; i<N; i++) { if ( abs (vec1[i] - vec2[i]) > 0.00001 ) { printf("vectors not equal! i: %d vec1[i]: %f vec2[i]: %f\n",i,vec1[i],vec2[i]); vecsEqual = 0; } } if ( vecsEqual ) printf("GPU vector addition agrees with CPU version!\n"); } /* Host function for filling vector (1d array) with random numbers between -20.0 and 20.0 */ void fillOutVector( float * vec, int vec_length ) { time_t t; srand((unsigned) time(&t)); // initialize random number generator int i; for (i=0; i<vec_length; i++) { vec[i] = ( (float)rand() / (float)(RAND_MAX) ) * 40.0; vec[i] -= 20.0; } } // Host function for printing a vector (1d array) void printVector( float * vec, int vec_length ) { int i; for (i=0; i<vec_length; i++) { printf("i: %d vec[i]: %f\n",i,vec[i]); } } void printDeviceSpecs() { int devID; hipDeviceProp_t props; hipGetDevice(&devID); hipGetDeviceProperties(&props, devID); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major,props.minor); } void vecMultiplyGPU(float * product_device_array) { // initialize timer events hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // allocate space for host arrays size_t vec_bytes = SIZE * sizeof(float); float * h_A = (float *)malloc( vec_bytes ); float * h_B = (float *)malloc( vec_bytes ); float * h_C = (float *)malloc( vec_bytes ); // initialize arrays fillOutVector( h_A, SIZE ); fillOutVector( h_B, SIZE ); printDeviceSpecs(); // allocate space for A and B on the device and copy data into space float * d_A, * d_B; hipMalloc(&d_A, vec_bytes); hipMalloc(&d_B, vec_bytes); hipMemcpy(d_A, h_A, vec_bytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, vec_bytes, hipMemcpyHostToDevice); // launch kernel and get timing info dim3 threadsPerBlock(BLOCKSIZE); dim3 blocksPerGrid( (SIZE + BLOCKSIZE - 1) / BLOCKSIZE ); hipEventRecord(start); hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_A, d_B, product_device_array, SIZE); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("kernel time (ms) : %7.5f\n",milliseconds); hipMemcpy(h_C, product_device_array, vec_bytes, hipMemcpyDeviceToHost); // verify you got the correct result float * gold_C = (float *)malloc( vec_bytes ); vecAddCPU( h_A, h_B, gold_C, SIZE ); compareVecs( gold_C, h_C, SIZE ); // clean up hipEventDestroy(start); hipEventDestroy(stop); hipFree(d_A); hipFree(d_B); free(h_A); free(h_B); free(h_C); free(gold_C); } void allocateDeviceSpace(float ** array) { size_t vec_bytes = SIZE * sizeof(float); hipError_t rc; // return code from cuda functions rc = hipMalloc(array, vec_bytes); if ( rc ) printf("Error from hipMalloc: %s\n",hipGetErrorString(rc)); } // program execution begins here int main( int argc, char ** argv ) { int nDevices; hipGetDeviceCount(&nDevices); printf("Found %d GPUs!\n",nDevices); float * dev_C[nDevices]; int i; for ( i=0; i<nDevices; i++ ) { hipSetDevice(i); allocateDeviceSpace(&dev_C[i]); vecMultiplyGPU(dev_C[i]); } return 0; }
2de57178fa992b8f848d6c9d68ff3d734ad7b102.cu
/* Vector addition on the GPU: C = A + B */ #include <stdio.h> #include <stdlib.h> #include <string.h> #define SIZE 1024 #define BLOCKSIZE 16 // Device function (i.e. kernel) __global__ void VecAdd(float * A, float * B, float * C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if ( i < N ) { C[i] = A[i] + B[i]; } } // CPU version of the vector addition function void vecAddCPU(float * A, float * B, float * C, int N) { int i; for (i=0; i<N; i++) { C[i] = A[i] + B[i]; } } // Function compares two 1d arrays void compareVecs( float * vec1, float * vec2, int N ) { int i; int vecsEqual = 1; for (i=0; i<N; i++) { if ( abs (vec1[i] - vec2[i]) > 0.00001 ) { printf("vectors not equal! i: %d vec1[i]: %f vec2[i]: %f\n",i,vec1[i],vec2[i]); vecsEqual = 0; } } if ( vecsEqual ) printf("GPU vector addition agrees with CPU version!\n"); } /* Host function for filling vector (1d array) with random numbers between -20.0 and 20.0 */ void fillOutVector( float * vec, int vec_length ) { time_t t; srand((unsigned) time(&t)); // initialize random number generator int i; for (i=0; i<vec_length; i++) { vec[i] = ( (float)rand() / (float)(RAND_MAX) ) * 40.0; vec[i] -= 20.0; } } // Host function for printing a vector (1d array) void printVector( float * vec, int vec_length ) { int i; for (i=0; i<vec_length; i++) { printf("i: %d vec[i]: %f\n",i,vec[i]); } } void printDeviceSpecs() { int devID; cudaDeviceProp props; cudaGetDevice(&devID); cudaGetDeviceProperties(&props, devID); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major,props.minor); } void vecMultiplyGPU(float * product_device_array) { // initialize timer events cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // allocate space for host arrays size_t vec_bytes = SIZE * sizeof(float); float * h_A = (float *)malloc( vec_bytes ); float * h_B = (float *)malloc( vec_bytes ); float * h_C = (float *)malloc( vec_bytes ); // initialize arrays fillOutVector( h_A, SIZE ); fillOutVector( h_B, SIZE ); printDeviceSpecs(); // allocate space for A and B on the device and copy data into space float * d_A, * d_B; cudaMalloc(&d_A, vec_bytes); cudaMalloc(&d_B, vec_bytes); cudaMemcpy(d_A, h_A, vec_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, vec_bytes, cudaMemcpyHostToDevice); // launch kernel and get timing info dim3 threadsPerBlock(BLOCKSIZE); dim3 blocksPerGrid( (SIZE + BLOCKSIZE - 1) / BLOCKSIZE ); cudaEventRecord(start); VecAdd<<< blocksPerGrid, threadsPerBlock >>>(d_A, d_B, product_device_array, SIZE); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("kernel time (ms) : %7.5f\n",milliseconds); cudaMemcpy(h_C, product_device_array, vec_bytes, cudaMemcpyDeviceToHost); // verify you got the correct result float * gold_C = (float *)malloc( vec_bytes ); vecAddCPU( h_A, h_B, gold_C, SIZE ); compareVecs( gold_C, h_C, SIZE ); // clean up cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_A); cudaFree(d_B); free(h_A); free(h_B); free(h_C); free(gold_C); } void allocateDeviceSpace(float ** array) { size_t vec_bytes = SIZE * sizeof(float); cudaError_t rc; // return code from cuda functions rc = cudaMalloc(array, vec_bytes); if ( rc ) printf("Error from cudaMalloc: %s\n",cudaGetErrorString(rc)); } // program execution begins here int main( int argc, char ** argv ) { int nDevices; cudaGetDeviceCount(&nDevices); printf("Found %d GPUs!\n",nDevices); float * dev_C[nDevices]; int i; for ( i=0; i<nDevices; i++ ) { cudaSetDevice(i); allocateDeviceSpace(&dev_C[i]); vecMultiplyGPU(dev_C[i]); } return 0; }
fdd3d44ceb5a16c559eb01e6239908ba6a9e031a.hip
// !!! This is a file automatically generated by hipify!!! /** ********************************************************************************************************************************************************************************************************************************** * @file: cuda_mul_test.cu * @author: lk * @email: [email protected] * @date: 2021-06-20 18:36:47 Sunday * @brief: ********************************************************************************************************************************************************************************************************************************** **/ #include "hip/hip_runtime.h" #include "timeutils.hpp" #include <iostream> using namespace std; void get_property() { int dev = 0; hipDeviceProp_t devProp; //CHECK(hipGetDeviceProperties(&devProp, dev)); std::cout << "GPU device " << dev << ": " << devProp.name << std::endl; std::cout << "SM" << devProp.multiProcessorCount << std::endl; std::cout << "" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl; std::cout << "" << devProp.maxThreadsPerBlock << std::endl; std::cout << "EM" << devProp.maxThreadsPerMultiProcessor << std::endl; std::cout << "EM" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl; } __global__ void mul(float *x, float *y, float *z, int n) { // int index = threadIdx.x + blockIdx.x * blockDim.x; // int stride = blockDim.x * gridDim.x; // printf("%d\t%d\t%d\t%d\t%d\t%d\n", threadIdx.x, threadIdx.y, blockDim.x, gridDim.x, stride); for (int i = index; i < n; i += stride) { z[i] = x[i] * y[i]; } } void test() { int N = 1 << 20; int nBytes = N * sizeof(float); // float *x, *y, *z; hipMallocManaged((void**)&x, nBytes); hipMallocManaged((void**)&y, nBytes); hipMallocManaged((void**)&z, nBytes); // for (int i = 0; i < N; ++i) { x[i] = 10.0; y[i] = 20.0; } cout << "threadIdx.x\tblockIdx.x\tblockDim.x\tgridDim.x\tstride\n"; // kernel dim3 blockSize(256); dim3 gridSize((N + blockSize.x - 1) / blockSize.x); printf("%d %d \n", gridSize, blockSize); // kernel mul << < gridSize, blockSize >> >(x, y, z, N); // device hipDeviceSynchronize(); // float maxError = 0.0; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(z[i] - 30.0)); std::cout << ": " << maxError << std::endl; // hipFree(x); hipFree(y); hipFree(z); } int main() { get_property(); TimeUtil t; t.startTimer(); test(); cout << "cost time: " << t.getDuration<TimeUnit::MILLISEC>() << " ms" << endl; return 0; }
fdd3d44ceb5a16c559eb01e6239908ba6a9e031a.cu
/** ********************************************************************************************************************************************************************************************************************************** * @file: cuda_mul_test.cu * @author: lk * @email: [email protected] * @date: 2021-06-20 18:36:47 Sunday * @brief: ********************************************************************************************************************************************************************************************************************************** **/ #include "cuda_runtime.h" #include "timeutils.hpp" #include <iostream> using namespace std; void get_property() { int dev = 0; cudaDeviceProp devProp; //CHECK(cudaGetDeviceProperties(&devProp, dev)); std::cout << "使用GPU device " << dev << ": " << devProp.name << std::endl; std::cout << "SM的数量:" << devProp.multiProcessorCount << std::endl; std::cout << "每个线程块的共享内存大小:" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl; std::cout << "每个线程块的最大线程数:" << devProp.maxThreadsPerBlock << std::endl; std::cout << "每个EM的最大线程数:" << devProp.maxThreadsPerMultiProcessor << std::endl; std::cout << "每个EM的最大线程束数:" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl; } __global__ void mul(float *x, float *y, float *z, int n) { // 获取全局索引 int index = threadIdx.x + blockIdx.x * blockDim.x; // 步长 int stride = blockDim.x * gridDim.x; // printf("%d\t%d\t%d\t%d\t%d\t%d\n", threadIdx.x, threadIdx.y, blockDim.x, gridDim.x, stride); for (int i = index; i < n; i += stride) { z[i] = x[i] * y[i]; } } void test() { int N = 1 << 20; int nBytes = N * sizeof(float); // 申请托管内存 float *x, *y, *z; cudaMallocManaged((void**)&x, nBytes); cudaMallocManaged((void**)&y, nBytes); cudaMallocManaged((void**)&z, nBytes); // 初始化数据 for (int i = 0; i < N; ++i) { x[i] = 10.0; y[i] = 20.0; } cout << "threadIdx.x\tblockIdx.x\tblockDim.x\tgridDim.x\tstride\n"; // 定义kernel的执行配置 dim3 blockSize(256); dim3 gridSize((N + blockSize.x - 1) / blockSize.x); printf("%d %d \n", gridSize, blockSize); // 执行kernel mul << < gridSize, blockSize >> >(x, y, z, N); // 同步device 保证结果能正确访问 cudaDeviceSynchronize(); // 检查执行结果 float maxError = 0.0; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(z[i] - 30.0)); std::cout << "最大误差: " << maxError << std::endl; // 释放内存 cudaFree(x); cudaFree(y); cudaFree(z); } int main() { get_property(); TimeUtil t; t.startTimer(); test(); cout << "cost time: " << t.getDuration<TimeUnit::MILLISEC>() << " ms" << endl; return 0; }
e98b91eb801f835a1f5fb513989c1ffac6f12e4d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include<stdio.h> #include <algorithm> #define BLOCK_SIZE 16 //int const Nx = 30, Nz = 20; __global__ void laplacian_GPU (int ordem, int Nz, int Nx,int dz, int dx, float *P, float *Lapla) { int col = blockIdx.x * blockDim.x + threadIdx.x; int colStride = blockDim.x * gridDim.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int rowStride = blockDim.y * gridDim.y; int in_n = ordem / 2; int lim_nx = Nx - ordem / 2; int lim_nz = Nz - ordem / 2; float Pxx,Pzz; // printf("in_n = %d",in_n); printf("lim_nx = %d",lim_nx); for (int i = col; i < lim_nz && i >= in_n; i += colStride){ for (int j = row; j < lim_nx && j >= in_n; j += rowStride){ Pzz = P[(i+1) * Nx + j] + P[(i-1) * Nx + j] - P[i * Nx + j] * 2.0; Pxx = P[i * Nx + j+1] + P[i * Nx + j-1] - P[i * Nx + j] * 2.0; Lapla[i * Nx + j] = Pxx/(dx*dx) + Pzz/(dz*dz); printf("Pxx %.3f, Pzz %.3f,i %d, j %d\n",Pxx,Pzz,i,j); } } } void laplacian(int Nx,int Nz,float dx,float dz) { float **P = new float*[Nx]; P[0] = new float[Nz * Nx]; for (int i = 1; i < Nz; ++i) P[i] = P[i-1] + Nx; float **Lapla = new float*[Nx]; Lapla[0] = new float[Nz * Nx]; for (int i = 1; i < Nz; ++i) Lapla[i] = Lapla[i-1] + Nx; float *dP; hipMalloc((void **) &dP, sizeof(float) * Nz * Nx); float *dLapla; hipMalloc((void **) &dLapla, sizeof(float) * Nz * Nx); for (int i = 0; i < Nz; i++){ for (int j = 0; j < Nx; j++){ P[i][j] = i*j + i*i*i; }; }; hipMemcpy(dP, P[0],sizeof(float) * Nz * Nx, hipMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); int Gridx = (Nx + dimBlock.x) / dimBlock.x; int Gridy = (Nz + dimBlock.y) / dimBlock.y; dim3 dimGrid(Gridx,Gridy); std::cout<<"dimBlock.x "<<dimBlock.x<<" dimBlock.y "<<dimBlock.y<<std::endl; std::cout<<"dimGrid.x "<<dimGrid.x<<" dimGrid.y "<<dimGrid.y<<std::endl; int ordem=2; hipLaunchKernelGGL(( laplacian_GPU), dim3(dimBlock),dim3(dimGrid), 0, 0, ordem,Nz,Nx,dz,dx,dP,dLapla); hipError_t err = hipGetLastError(); if(err != hipSuccess) std::cout<<"Error: "<<hipGetErrorString(err)<<std::endl; hipDeviceSynchronize(); hipMemcpy(Lapla[0], dLapla,sizeof(float) * Nz * Nx, hipMemcpyDeviceToHost); for(int i=0; i<Nz; i++){ for(int j=0; j<Nx; j++){ std::cout << P[i][j] << " "; } std::cout << std::endl; } for(int i=0; i<Nz; i++){ for(int j=0; j<Nx; j++){ std::cout << Lapla[i][j] << " "; } std::cout << std::endl; } hipFree(P); hipFree(Lapla); } int main() { int Nx = 10; int Nz = 10; float dz = 1; float dx = 1; laplacian(Nx,Nz,dx,dz); return 0; }
e98b91eb801f835a1f5fb513989c1ffac6f12e4d.cu
#include <iostream> #include <math.h> #include<stdio.h> #include <algorithm> #define BLOCK_SIZE 16 //int const Nx = 30, Nz = 20; __global__ void laplacian_GPU (int ordem, int Nz, int Nx,int dz, int dx, float *P, float *Lapla) { int col = blockIdx.x * blockDim.x + threadIdx.x; int colStride = blockDim.x * gridDim.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int rowStride = blockDim.y * gridDim.y; int in_n = ordem / 2; int lim_nx = Nx - ordem / 2; int lim_nz = Nz - ordem / 2; float Pxx,Pzz; // printf("in_n = %d",in_n); printf("lim_nx = %d",lim_nx); for (int i = col; i < lim_nz && i >= in_n; i += colStride){ for (int j = row; j < lim_nx && j >= in_n; j += rowStride){ Pzz = P[(i+1) * Nx + j] + P[(i-1) * Nx + j] - P[i * Nx + j] * 2.0; Pxx = P[i * Nx + j+1] + P[i * Nx + j-1] - P[i * Nx + j] * 2.0; Lapla[i * Nx + j] = Pxx/(dx*dx) + Pzz/(dz*dz); printf("Pxx %.3f, Pzz %.3f,i %d, j %d\n",Pxx,Pzz,i,j); } } } void laplacian(int Nx,int Nz,float dx,float dz) { float **P = new float*[Nx]; P[0] = new float[Nz * Nx]; for (int i = 1; i < Nz; ++i) P[i] = P[i-1] + Nx; float **Lapla = new float*[Nx]; Lapla[0] = new float[Nz * Nx]; for (int i = 1; i < Nz; ++i) Lapla[i] = Lapla[i-1] + Nx; float *dP; cudaMalloc((void **) &dP, sizeof(float) * Nz * Nx); float *dLapla; cudaMalloc((void **) &dLapla, sizeof(float) * Nz * Nx); for (int i = 0; i < Nz; i++){ for (int j = 0; j < Nx; j++){ P[i][j] = i*j + i*i*i; }; }; cudaMemcpy(dP, P[0],sizeof(float) * Nz * Nx, cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); int Gridx = (Nx + dimBlock.x) / dimBlock.x; int Gridy = (Nz + dimBlock.y) / dimBlock.y; dim3 dimGrid(Gridx,Gridy); std::cout<<"dimBlock.x "<<dimBlock.x<<" dimBlock.y "<<dimBlock.y<<std::endl; std::cout<<"dimGrid.x "<<dimGrid.x<<" dimGrid.y "<<dimGrid.y<<std::endl; int ordem=2; laplacian_GPU<<<dimBlock,dimGrid>>>(ordem,Nz,Nx,dz,dx,dP,dLapla); cudaError_t err = cudaGetLastError(); if(err != cudaSuccess) std::cout<<"Error: "<<cudaGetErrorString(err)<<std::endl; cudaDeviceSynchronize(); cudaMemcpy(Lapla[0], dLapla,sizeof(float) * Nz * Nx, cudaMemcpyDeviceToHost); for(int i=0; i<Nz; i++){ for(int j=0; j<Nx; j++){ std::cout << P[i][j] << " "; } std::cout << std::endl; } for(int i=0; i<Nz; i++){ for(int j=0; j<Nx; j++){ std::cout << Lapla[i][j] << " "; } std::cout << std::endl; } cudaFree(P); cudaFree(Lapla); } int main() { int Nx = 10; int Nz = 10; float dz = 1; float dx = 1; laplacian(Nx,Nz,dx,dz); return 0; }
bed1c99c24d7f9c8e7e3e20c700eef52c4752c08.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2017 the arraydiff authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Caffe copyright: COPYRIGHT All contributions by the University of California: Copyright (c) 2014-2017 The Regents of the University of California (Regents) All rights reserved. All other contributions: Copyright (c) 2014-2017, the respective contributors All rights reserved. Caffe uses a shared copyright model: each contributor holds copyright over their contributions to Caffe. The project versioning records all such contribution and copyright details. If a contributor wants to further mark their specific copyright on a particular contribution, they should indicate their copyright solely in the commit message of the change when it is committed. LICENSE Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "common_hip.cuh" #include <hip/hip_runtime_api.h> #include <math_constants.h> #include <stdint.h> __global__ void max_pool_fwd_f32_kernel( const uint32_t nthreads, const float* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const top_data, int32_t* const mask) { uint32_t index = threadIdx.x + blockDim.x * blockIdx.x; if (index < nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); float maxval = -CUDART_INF_F; int maxidx = -1; const float* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } if (NULL != top_data) { top_data[index] = maxval; } if (NULL != mask) { mask[index] = maxidx; } } } extern "C" void arraydiff_cuda_kernel_max_pool_fwd_f32( size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz, size_t y_w, size_t y_h, size_t kernel_w, size_t kernel_h, size_t stride_w, size_t stride_h, size_t pad_w, size_t pad_h, const float *x, float *maybe_y, int32_t *maybe_mask, hipStream_t stream) { uint32_t n = y_w * y_h * chan_dim * batch_sz; hipLaunchKernelGGL(( max_pool_fwd_f32_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream, n, x, batch_sz, chan_dim, x_h, x_w, y_h, y_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, maybe_y, maybe_mask); } __global__ void max_pool_bwd_f32_kernel( const uint32_t nthreads, const float* const top_diff, const int32_t* const mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const bottom_diff) { uint32_t index = threadIdx.x + blockDim.x * blockIdx.x; if (index < nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); float gradient = 0.0f; const int offset = (n * channels + c) * pooled_height * pooled_width; const float* const top_diff_slice = top_diff + offset; const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } bottom_diff[index] += gradient; } } extern "C" void arraydiff_cuda_kernel_max_pool_bwd_f32( size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz, size_t y_w, size_t y_h, size_t kernel_w, size_t kernel_h, size_t stride_w, size_t stride_h, size_t pad_w, size_t pad_h, const float *dy, const int32_t *mask, float *dx, hipStream_t stream) { uint32_t n = x_w * x_h * chan_dim * batch_sz; hipLaunchKernelGGL(( max_pool_bwd_f32_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream, n, dy, mask, batch_sz, chan_dim, x_h, x_w, y_h, y_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dx); } __global__ void avg_pool_fwd_f32_kernel( const uint32_t nthreads, const float* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const top_data) { uint32_t index = threadIdx.x + blockDim.x * blockIdx.x; if (index < nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); float aveval = 0.0f; const float* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } extern "C" void arraydiff_cuda_kernel_avg_pool_fwd_f32( size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz, size_t y_w, size_t y_h, size_t kernel_w, size_t kernel_h, size_t stride_w, size_t stride_h, size_t pad_w, size_t pad_h, const float *x, float *y, hipStream_t stream) { uint32_t n = y_w * y_h * chan_dim * batch_sz; hipLaunchKernelGGL(( avg_pool_fwd_f32_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream, n, x, batch_sz, chan_dim, x_h, x_w, y_h, y_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, y); } __global__ void avg_pool_bwd_f32_kernel( const uint32_t nthreads, const float* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const bottom_diff) { uint32_t index = threadIdx.x + blockDim.x * blockIdx.x; if (index < nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); float gradient = 0.0f; const float* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] += gradient; } } extern "C" void arraydiff_cuda_kernel_avg_pool_bwd_f32( size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz, size_t y_w, size_t y_h, size_t kernel_w, size_t kernel_h, size_t stride_w, size_t stride_h, size_t pad_w, size_t pad_h, const float *dy, float *dx, hipStream_t stream) { uint32_t n = x_w * x_h * chan_dim * batch_sz; hipLaunchKernelGGL(( avg_pool_bwd_f32_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream, n, dy, batch_sz, chan_dim, x_h, x_w, y_h, y_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dx); }
bed1c99c24d7f9c8e7e3e20c700eef52c4752c08.cu
/* Copyright 2017 the arraydiff authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Caffe copyright: COPYRIGHT All contributions by the University of California: Copyright (c) 2014-2017 The Regents of the University of California (Regents) All rights reserved. All other contributions: Copyright (c) 2014-2017, the respective contributors All rights reserved. Caffe uses a shared copyright model: each contributor holds copyright over their contributions to Caffe. The project versioning records all such contribution and copyright details. If a contributor wants to further mark their specific copyright on a particular contribution, they should indicate their copyright solely in the commit message of the change when it is committed. LICENSE Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "common.cuh" #include <cuda_runtime_api.h> #include <math_constants.h> #include <stdint.h> __global__ void max_pool_fwd_f32_kernel( const uint32_t nthreads, const float* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const top_data, int32_t* const mask) { uint32_t index = threadIdx.x + blockDim.x * blockIdx.x; if (index < nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); float maxval = -CUDART_INF_F; int maxidx = -1; const float* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } if (NULL != top_data) { top_data[index] = maxval; } if (NULL != mask) { mask[index] = maxidx; } } } extern "C" void arraydiff_cuda_kernel_max_pool_fwd_f32( size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz, size_t y_w, size_t y_h, size_t kernel_w, size_t kernel_h, size_t stride_w, size_t stride_h, size_t pad_w, size_t pad_h, const float *x, float *maybe_y, int32_t *maybe_mask, cudaStream_t stream) { uint32_t n = y_w * y_h * chan_dim * batch_sz; max_pool_fwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( n, x, batch_sz, chan_dim, x_h, x_w, y_h, y_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, maybe_y, maybe_mask); } __global__ void max_pool_bwd_f32_kernel( const uint32_t nthreads, const float* const top_diff, const int32_t* const mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const bottom_diff) { uint32_t index = threadIdx.x + blockDim.x * blockIdx.x; if (index < nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); float gradient = 0.0f; const int offset = (n * channels + c) * pooled_height * pooled_width; const float* const top_diff_slice = top_diff + offset; const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } bottom_diff[index] += gradient; } } extern "C" void arraydiff_cuda_kernel_max_pool_bwd_f32( size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz, size_t y_w, size_t y_h, size_t kernel_w, size_t kernel_h, size_t stride_w, size_t stride_h, size_t pad_w, size_t pad_h, const float *dy, const int32_t *mask, float *dx, cudaStream_t stream) { uint32_t n = x_w * x_h * chan_dim * batch_sz; max_pool_bwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( n, dy, mask, batch_sz, chan_dim, x_h, x_w, y_h, y_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dx); } __global__ void avg_pool_fwd_f32_kernel( const uint32_t nthreads, const float* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const top_data) { uint32_t index = threadIdx.x + blockDim.x * blockIdx.x; if (index < nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); float aveval = 0.0f; const float* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } extern "C" void arraydiff_cuda_kernel_avg_pool_fwd_f32( size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz, size_t y_w, size_t y_h, size_t kernel_w, size_t kernel_h, size_t stride_w, size_t stride_h, size_t pad_w, size_t pad_h, const float *x, float *y, cudaStream_t stream) { uint32_t n = y_w * y_h * chan_dim * batch_sz; avg_pool_fwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( n, x, batch_sz, chan_dim, x_h, x_w, y_h, y_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, y); } __global__ void avg_pool_bwd_f32_kernel( const uint32_t nthreads, const float* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const bottom_diff) { uint32_t index = threadIdx.x + blockDim.x * blockIdx.x; if (index < nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); float gradient = 0.0f; const float* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] += gradient; } } extern "C" void arraydiff_cuda_kernel_avg_pool_bwd_f32( size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz, size_t y_w, size_t y_h, size_t kernel_w, size_t kernel_h, size_t stride_w, size_t stride_h, size_t pad_w, size_t pad_h, const float *dy, float *dx, cudaStream_t stream) { uint32_t n = x_w * x_h * chan_dim * batch_sz; avg_pool_bwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( n, dy, batch_sz, chan_dim, x_h, x_w, y_h, y_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dx); }
40cf0ea82f019ebed088d8ee46189a99a37b373f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "{{project_name}}.h" /////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cu_add(int *a, int *b, int *c, int n) { const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i<n) // prevent overflow { c[i]=a[i]+b[i]; } } DLL void add(int *a, int *b, int *c, int n) { dim3 dimGrid((n+63)/64, 1, 1); dim3 dimBlock(64, 1, 1); hipLaunchKernelGGL(( cu_add), dim3(dimGrid), dim3(dimBlock), 0, 0, a,b,c,n); }
40cf0ea82f019ebed088d8ee46189a99a37b373f.cu
#include <cuda.h> #include "{{project_name}}.h" /////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cu_add(int *a, int *b, int *c, int n) { const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i<n) // prevent overflow { c[i]=a[i]+b[i]; } } DLL void add(int *a, int *b, int *c, int n) { dim3 dimGrid((n+63)/64, 1, 1); dim3 dimBlock(64, 1, 1); cu_add<<<dimGrid, dimBlock>>>(a,b,c,n); }
53efa07837ba2160c76e22e9c544582e9af405bb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "scl_i32.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *vector = NULL; hipMalloc(&vector, XSIZE*YSIZE); int value = 2; int *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( scl_i32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,value,output,len); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( scl_i32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,value,output,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( scl_i32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,value,output,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
53efa07837ba2160c76e22e9c544582e9af405bb.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "scl_i32.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *vector = NULL; cudaMalloc(&vector, XSIZE*YSIZE); int value = 2; int *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); scl_i32<<<gridBlock,threadBlock>>>(vector,value,output,len); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { scl_i32<<<gridBlock,threadBlock>>>(vector,value,output,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { scl_i32<<<gridBlock,threadBlock>>>(vector,value,output,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9a7aed68b4f76521a75d44abed72951274c7573d.hip
// !!! This is a file automatically generated by hipify!!! /* * AllDynamicSTDPSynapses_d.cu * */ #include "AllDynamicSTDPSynapses.h" #include "Book.h" /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param sim_info SimulationInfo to refer from. */ void AllDynamicSTDPSynapses::allocSynapseDeviceStruct( void** allSynapsesDevice, const SimulationInfo *sim_info ) { allocSynapseDeviceStruct( allSynapsesDevice, sim_info->totalNeurons, sim_info->maxSynapsesPerNeuron ); } /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllDynamicSTDPSynapses::allocSynapseDeviceStruct( void** allSynapsesDevice, int num_neurons, int maxSynapsesPerNeuron ) { AllDynamicSTDPSynapses allSynapses; allocDeviceStruct( allSynapses, num_neurons, maxSynapsesPerNeuron ); HANDLE_ERROR( hipMalloc( allSynapsesDevice, sizeof( AllDynamicSTDPSynapses ) ) ); HANDLE_ERROR( hipMemcpy ( *allSynapsesDevice, &allSynapses, sizeof( AllDynamicSTDPSynapses ), hipMemcpyHostToDevice ) ); } /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * (Helper function of allocSynapseDeviceStruct) * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllDynamicSTDPSynapses::allocDeviceStruct( AllDynamicSTDPSynapses &allSynapses, int num_neurons, int maxSynapsesPerNeuron ) { AllSTDPSynapses::allocDeviceStruct( allSynapses, num_neurons, maxSynapsesPerNeuron ); BGSIZE max_total_synapses = maxSynapsesPerNeuron * num_neurons; HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.lastSpike, max_total_synapses * sizeof( uint64_t ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.r, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.u, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.D, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.U, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.F, max_total_synapses * sizeof( BGFLOAT ) ) ); } /* * Delete GPU memories. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param sim_info SimulationInfo to refer from. */ void AllDynamicSTDPSynapses::deleteSynapseDeviceStruct( void* allSynapsesDevice ) { AllDynamicSTDPSynapses allSynapses; HANDLE_ERROR( hipMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllDynamicSTDPSynapses ), hipMemcpyDeviceToHost ) ); deleteDeviceStruct( allSynapses ); HANDLE_ERROR( hipFree( allSynapsesDevice ) ); } /* * Delete GPU memories. * (Helper function of deleteSynapseDeviceStruct) * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. */ void AllDynamicSTDPSynapses::deleteDeviceStruct( AllDynamicSTDPSynapses& allSynapses ) { HANDLE_ERROR( hipFree( allSynapses.lastSpike ) ); HANDLE_ERROR( hipFree( allSynapses.r ) ); HANDLE_ERROR( hipFree( allSynapses.u ) ); HANDLE_ERROR( hipFree( allSynapses.D ) ); HANDLE_ERROR( hipFree( allSynapses.U ) ); HANDLE_ERROR( hipFree( allSynapses.F ) ); AllSTDPSynapses::deleteDeviceStruct( allSynapses ); } /* * Copy all synapses' data from host to device. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param sim_info SimulationInfo to refer from. */ void AllDynamicSTDPSynapses::copySynapseHostToDevice( void* allSynapsesDevice, const SimulationInfo *sim_info ) { // copy everything necessary copySynapseHostToDevice( allSynapsesDevice, sim_info->totalNeurons, sim_info->maxSynapsesPerNeuron ); } /* * Copy all synapses' data from host to device. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllDynamicSTDPSynapses::copySynapseHostToDevice( void* allSynapsesDevice, int num_neurons, int maxSynapsesPerNeuron ) { // copy everything necessary AllDynamicSTDPSynapses allSynapses; HANDLE_ERROR( hipMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllDynamicSTDPSynapses ), hipMemcpyDeviceToHost ) ); copyHostToDevice( allSynapsesDevice, allSynapses, num_neurons, maxSynapsesPerNeuron ); } /* * Copy all synapses' data from host to device. * (Helper function of copySynapseHostToDevice) * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllDynamicSTDPSynapses::copyHostToDevice( void* allSynapsesDevice, AllDynamicSTDPSynapses& allSynapses, int num_neurons, int maxSynapsesPerNeuron ) { // copy everything necessary AllSTDPSynapses::copyHostToDevice( allSynapsesDevice, allSynapses, num_neurons, maxSynapsesPerNeuron ); BGSIZE max_total_synapses = maxSynapsesPerNeuron * num_neurons; HANDLE_ERROR( hipMemcpy ( allSynapses.lastSpike, lastSpike, max_total_synapses * sizeof( uint64_t ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.r, r, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.u, u, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.D, D, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.U, U, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.F, F, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyHostToDevice ) ); } /* * Copy all synapses' data from device to host. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param sim_info SimulationInfo to refer from. */ void AllDynamicSTDPSynapses::copySynapseDeviceToHost( void* allSynapsesDevice, const SimulationInfo *sim_info ) { // copy everything necessary AllDynamicSTDPSynapses allSynapses; HANDLE_ERROR( hipMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllDynamicSTDPSynapses ), hipMemcpyDeviceToHost ) ); copyDeviceToHost( allSynapses, sim_info ); } /* * Copy all synapses' data from device to host. * (Helper function of copySynapseDeviceToHost) * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllDynamicSTDPSynapses::copyDeviceToHost( AllDynamicSTDPSynapses& allSynapses, const SimulationInfo *sim_info ) { AllSTDPSynapses::copyDeviceToHost( allSynapses, sim_info ) ; int num_neurons = sim_info->totalNeurons; BGSIZE max_total_synapses = sim_info->maxSynapsesPerNeuron * num_neurons; HANDLE_ERROR( hipMemcpy ( lastSpike, allSynapses.lastSpike, max_total_synapses * sizeof( uint64_t ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( r, allSynapses.r, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( u, allSynapses.u, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( D, allSynapses.D, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( U, allSynapses.U, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( F, allSynapses.F, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyDeviceToHost ) ); } __device__ fpCreateSynapse_t fpCreateDynamicSTDPSynapse_d = (fpCreateSynapse_t)createDynamicSTDPSSynapse; /* * Get a pointer to the device function createDynamicSTDPSSynapse. * The function will be called from updateSynapsesWeightsDevice device function. * Because we cannot use virtual function (Polymorphism) in device functions, * we use this scheme. * * @param fpCreateSynapse_h Reference to the memory location * where the function pointer will be set. */ void AllDynamicSTDPSynapses::getFpCreateSynapse(fpCreateSynapse_t& fpCreateSynapse_h) { HANDLE_ERROR( hipMemcpyFromSymbol(&fpCreateSynapse_h, fpCreateDynamicSTDPSynapse_d, sizeof(fpCreateSynapse_t)) ); } __device__ fpChangeSynapsesPSR_t fpChangeDynamicSTDPSynapsesPSR_d = (fpChangeSynapsesPSR_t)changeDynamicSTDPSynapsePSR; /* * Get a pointer to the device function changeDynamicSTDPSynapsePSR. * The function will be called from advanceSynapsesDevice device function. * Because we cannot use virtual function (Polymorphism) in device functions, * we use this scheme. * * @param fpChangePSR_h Reference to the memory location * where the function pointer will be set. */ void AllDynamicSTDPSynapses::getFpChangePSR(fpChangeSynapsesPSR_t& fpChangePSR_h) { HANDLE_ERROR( hipMemcpyFromSymbol(&fpChangePSR_h, fpChangeDynamicSTDPSynapsesPSR_d, sizeof(fpChangeSynapsesPSR_t)) ); } /* ------------------*\ |* # Global Functions \* ------------------*/ /* ------------------*\ |* # Device Functions \* ------------------*/ /* * Create a Synapse and connect it to the model. * * @param allSynapsesDevice Pointer to the Synapse structures in device memory. * @param neuron_index Index of the source neuron. * @param synapse_index Index of the Synapse to create. * @param source_x X location of source. * @param source_y Y location of source. * @param dest_x X location of destination. * @param dest_y Y location of destination. * @param sum_point Pointer to the summation point. * @param deltaT The time step size. * @param type Type of the Synapse to create. */ __device__ void createDynamicSTDPSSynapse(AllDynamicSTDPSynapses* allSynapsesDevice, const int neuron_index, const int synapse_index, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, synapseType type) { BGFLOAT delay; BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron; BGSIZE iSyn = max_synapses * neuron_index + synapse_index; allSynapsesDevice->in_use[iSyn] = true; allSynapsesDevice->summationPoint[iSyn] = sum_point; allSynapsesDevice->destNeuronIndex[iSyn] = dest_index; allSynapsesDevice->sourceNeuronIndex[iSyn] = source_index; allSynapsesDevice->W[iSyn] = synSign(type) * 10.0e-9; allSynapsesDevice->delayQueue[iSyn] = 0; allSynapsesDevice->delayIdx[iSyn] = 0; allSynapsesDevice->ldelayQueue[iSyn] = LENGTH_OF_DELAYQUEUE; allSynapsesDevice->psr[iSyn] = 0.0; allSynapsesDevice->r[iSyn] = 1.0; allSynapsesDevice->u[iSyn] = 0.4; // DEFAULT_U allSynapsesDevice->lastSpike[iSyn] = ULONG_MAX; allSynapsesDevice->type[iSyn] = type; allSynapsesDevice->U[iSyn] = DEFAULT_U; allSynapsesDevice->tau[iSyn] = DEFAULT_tau; BGFLOAT U; BGFLOAT D; BGFLOAT F; BGFLOAT tau; switch (type) { case II: U = 0.32; D = 0.144; F = 0.06; tau = 6e-3; delay = 0.8e-3; break; case IE: U = 0.25; D = 0.7; F = 0.02; tau = 6e-3; delay = 0.8e-3; break; case EI: U = 0.05; D = 0.125; F = 1.2; tau = 3e-3; delay = 0.8e-3; break; case EE: U = 0.5; D = 1.1; F = 0.05; tau = 3e-3; delay = 1.5e-3; break; default: break; } allSynapsesDevice->U[iSyn] = U; allSynapsesDevice->D[iSyn] = D; allSynapsesDevice->F[iSyn] = F; allSynapsesDevice->tau[iSyn] = tau; allSynapsesDevice->decay[iSyn] = exp( -deltaT / tau ); allSynapsesDevice->total_delay[iSyn] = static_cast<int>( delay / deltaT ) + 1; uint32_t size = allSynapsesDevice->total_delay[iSyn] / ( sizeof(uint8_t) * 8 ) + 1; assert( size <= BYTES_OF_DELAYQUEUE ); allSynapsesDevice->Apos[iSyn] = 0.5; allSynapsesDevice->Aneg[iSyn] = -0.5; allSynapsesDevice->STDPgap[iSyn] = 2e-3; allSynapsesDevice->total_delayPost[iSyn] = 0; allSynapsesDevice->tauspost[iSyn] = 0; allSynapsesDevice->tauspre[iSyn] = 0; allSynapsesDevice->taupos[iSyn] = 15e-3; allSynapsesDevice->tauneg[iSyn] = 35e-3; allSynapsesDevice->Wex[iSyn] = 1.0; allSynapsesDevice->mupos[iSyn] = 0; allSynapsesDevice->muneg[iSyn] = 0; allSynapsesDevice->useFroemkeDanSTDP[iSyn] = false; } /* * Update PSR (post synapse response) * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param iSyn Index of the synapse to set. * @param simulationStep The current simulation step. * @param deltaT Inner simulation step duration. */ __device__ void changeDynamicSTDPSynapsePSR(AllDynamicSTDPSynapses* allSynapsesDevice, const BGSIZE iSyn, const uint64_t simulationStep, const BGFLOAT deltaT) { uint64_t &lastSpike = allSynapsesDevice->lastSpike[iSyn]; BGFLOAT &r = allSynapsesDevice->r[iSyn]; BGFLOAT &u = allSynapsesDevice->u[iSyn]; BGFLOAT D = allSynapsesDevice->D[iSyn]; BGFLOAT F = allSynapsesDevice->F[iSyn]; BGFLOAT U = allSynapsesDevice->U[iSyn]; BGFLOAT W = allSynapsesDevice->W[iSyn]; BGFLOAT &psr = allSynapsesDevice->psr[iSyn]; BGFLOAT decay = allSynapsesDevice->decay[iSyn]; // adjust synapse parameters if (lastSpike != ULONG_MAX) { BGFLOAT isi = (simulationStep - lastSpike) * deltaT ; r = 1 + ( r * ( 1 - u ) - 1 ) * exp( -isi / D ); u = U + u * ( 1 - U ) * exp( -isi / F ); } psr += ( ( W / decay ) * u * r );// calculate psr lastSpike = simulationStep; // record the time of the spike }
9a7aed68b4f76521a75d44abed72951274c7573d.cu
/* * AllDynamicSTDPSynapses_d.cu * */ #include "AllDynamicSTDPSynapses.h" #include "Book.h" /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param sim_info SimulationInfo to refer from. */ void AllDynamicSTDPSynapses::allocSynapseDeviceStruct( void** allSynapsesDevice, const SimulationInfo *sim_info ) { allocSynapseDeviceStruct( allSynapsesDevice, sim_info->totalNeurons, sim_info->maxSynapsesPerNeuron ); } /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllDynamicSTDPSynapses::allocSynapseDeviceStruct( void** allSynapsesDevice, int num_neurons, int maxSynapsesPerNeuron ) { AllDynamicSTDPSynapses allSynapses; allocDeviceStruct( allSynapses, num_neurons, maxSynapsesPerNeuron ); HANDLE_ERROR( cudaMalloc( allSynapsesDevice, sizeof( AllDynamicSTDPSynapses ) ) ); HANDLE_ERROR( cudaMemcpy ( *allSynapsesDevice, &allSynapses, sizeof( AllDynamicSTDPSynapses ), cudaMemcpyHostToDevice ) ); } /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * (Helper function of allocSynapseDeviceStruct) * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllDynamicSTDPSynapses::allocDeviceStruct( AllDynamicSTDPSynapses &allSynapses, int num_neurons, int maxSynapsesPerNeuron ) { AllSTDPSynapses::allocDeviceStruct( allSynapses, num_neurons, maxSynapsesPerNeuron ); BGSIZE max_total_synapses = maxSynapsesPerNeuron * num_neurons; HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.lastSpike, max_total_synapses * sizeof( uint64_t ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.r, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.u, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.D, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.U, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.F, max_total_synapses * sizeof( BGFLOAT ) ) ); } /* * Delete GPU memories. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param sim_info SimulationInfo to refer from. */ void AllDynamicSTDPSynapses::deleteSynapseDeviceStruct( void* allSynapsesDevice ) { AllDynamicSTDPSynapses allSynapses; HANDLE_ERROR( cudaMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllDynamicSTDPSynapses ), cudaMemcpyDeviceToHost ) ); deleteDeviceStruct( allSynapses ); HANDLE_ERROR( cudaFree( allSynapsesDevice ) ); } /* * Delete GPU memories. * (Helper function of deleteSynapseDeviceStruct) * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. */ void AllDynamicSTDPSynapses::deleteDeviceStruct( AllDynamicSTDPSynapses& allSynapses ) { HANDLE_ERROR( cudaFree( allSynapses.lastSpike ) ); HANDLE_ERROR( cudaFree( allSynapses.r ) ); HANDLE_ERROR( cudaFree( allSynapses.u ) ); HANDLE_ERROR( cudaFree( allSynapses.D ) ); HANDLE_ERROR( cudaFree( allSynapses.U ) ); HANDLE_ERROR( cudaFree( allSynapses.F ) ); AllSTDPSynapses::deleteDeviceStruct( allSynapses ); } /* * Copy all synapses' data from host to device. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param sim_info SimulationInfo to refer from. */ void AllDynamicSTDPSynapses::copySynapseHostToDevice( void* allSynapsesDevice, const SimulationInfo *sim_info ) { // copy everything necessary copySynapseHostToDevice( allSynapsesDevice, sim_info->totalNeurons, sim_info->maxSynapsesPerNeuron ); } /* * Copy all synapses' data from host to device. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllDynamicSTDPSynapses::copySynapseHostToDevice( void* allSynapsesDevice, int num_neurons, int maxSynapsesPerNeuron ) { // copy everything necessary AllDynamicSTDPSynapses allSynapses; HANDLE_ERROR( cudaMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllDynamicSTDPSynapses ), cudaMemcpyDeviceToHost ) ); copyHostToDevice( allSynapsesDevice, allSynapses, num_neurons, maxSynapsesPerNeuron ); } /* * Copy all synapses' data from host to device. * (Helper function of copySynapseHostToDevice) * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllDynamicSTDPSynapses::copyHostToDevice( void* allSynapsesDevice, AllDynamicSTDPSynapses& allSynapses, int num_neurons, int maxSynapsesPerNeuron ) { // copy everything necessary AllSTDPSynapses::copyHostToDevice( allSynapsesDevice, allSynapses, num_neurons, maxSynapsesPerNeuron ); BGSIZE max_total_synapses = maxSynapsesPerNeuron * num_neurons; HANDLE_ERROR( cudaMemcpy ( allSynapses.lastSpike, lastSpike, max_total_synapses * sizeof( uint64_t ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.r, r, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.u, u, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.D, D, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.U, U, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.F, F, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyHostToDevice ) ); } /* * Copy all synapses' data from device to host. * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param sim_info SimulationInfo to refer from. */ void AllDynamicSTDPSynapses::copySynapseDeviceToHost( void* allSynapsesDevice, const SimulationInfo *sim_info ) { // copy everything necessary AllDynamicSTDPSynapses allSynapses; HANDLE_ERROR( cudaMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllDynamicSTDPSynapses ), cudaMemcpyDeviceToHost ) ); copyDeviceToHost( allSynapses, sim_info ); } /* * Copy all synapses' data from device to host. * (Helper function of copySynapseDeviceToHost) * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllDynamicSTDPSynapses::copyDeviceToHost( AllDynamicSTDPSynapses& allSynapses, const SimulationInfo *sim_info ) { AllSTDPSynapses::copyDeviceToHost( allSynapses, sim_info ) ; int num_neurons = sim_info->totalNeurons; BGSIZE max_total_synapses = sim_info->maxSynapsesPerNeuron * num_neurons; HANDLE_ERROR( cudaMemcpy ( lastSpike, allSynapses.lastSpike, max_total_synapses * sizeof( uint64_t ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( r, allSynapses.r, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( u, allSynapses.u, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( D, allSynapses.D, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( U, allSynapses.U, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( F, allSynapses.F, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyDeviceToHost ) ); } __device__ fpCreateSynapse_t fpCreateDynamicSTDPSynapse_d = (fpCreateSynapse_t)createDynamicSTDPSSynapse; /* * Get a pointer to the device function createDynamicSTDPSSynapse. * The function will be called from updateSynapsesWeightsDevice device function. * Because we cannot use virtual function (Polymorphism) in device functions, * we use this scheme. * * @param fpCreateSynapse_h Reference to the memory location * where the function pointer will be set. */ void AllDynamicSTDPSynapses::getFpCreateSynapse(fpCreateSynapse_t& fpCreateSynapse_h) { HANDLE_ERROR( cudaMemcpyFromSymbol(&fpCreateSynapse_h, fpCreateDynamicSTDPSynapse_d, sizeof(fpCreateSynapse_t)) ); } __device__ fpChangeSynapsesPSR_t fpChangeDynamicSTDPSynapsesPSR_d = (fpChangeSynapsesPSR_t)changeDynamicSTDPSynapsePSR; /* * Get a pointer to the device function changeDynamicSTDPSynapsePSR. * The function will be called from advanceSynapsesDevice device function. * Because we cannot use virtual function (Polymorphism) in device functions, * we use this scheme. * * @param fpChangePSR_h Reference to the memory location * where the function pointer will be set. */ void AllDynamicSTDPSynapses::getFpChangePSR(fpChangeSynapsesPSR_t& fpChangePSR_h) { HANDLE_ERROR( cudaMemcpyFromSymbol(&fpChangePSR_h, fpChangeDynamicSTDPSynapsesPSR_d, sizeof(fpChangeSynapsesPSR_t)) ); } /* ------------------*\ |* # Global Functions \* ------------------*/ /* ------------------*\ |* # Device Functions \* ------------------*/ /* * Create a Synapse and connect it to the model. * * @param allSynapsesDevice Pointer to the Synapse structures in device memory. * @param neuron_index Index of the source neuron. * @param synapse_index Index of the Synapse to create. * @param source_x X location of source. * @param source_y Y location of source. * @param dest_x X location of destination. * @param dest_y Y location of destination. * @param sum_point Pointer to the summation point. * @param deltaT The time step size. * @param type Type of the Synapse to create. */ __device__ void createDynamicSTDPSSynapse(AllDynamicSTDPSynapses* allSynapsesDevice, const int neuron_index, const int synapse_index, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, synapseType type) { BGFLOAT delay; BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron; BGSIZE iSyn = max_synapses * neuron_index + synapse_index; allSynapsesDevice->in_use[iSyn] = true; allSynapsesDevice->summationPoint[iSyn] = sum_point; allSynapsesDevice->destNeuronIndex[iSyn] = dest_index; allSynapsesDevice->sourceNeuronIndex[iSyn] = source_index; allSynapsesDevice->W[iSyn] = synSign(type) * 10.0e-9; allSynapsesDevice->delayQueue[iSyn] = 0; allSynapsesDevice->delayIdx[iSyn] = 0; allSynapsesDevice->ldelayQueue[iSyn] = LENGTH_OF_DELAYQUEUE; allSynapsesDevice->psr[iSyn] = 0.0; allSynapsesDevice->r[iSyn] = 1.0; allSynapsesDevice->u[iSyn] = 0.4; // DEFAULT_U allSynapsesDevice->lastSpike[iSyn] = ULONG_MAX; allSynapsesDevice->type[iSyn] = type; allSynapsesDevice->U[iSyn] = DEFAULT_U; allSynapsesDevice->tau[iSyn] = DEFAULT_tau; BGFLOAT U; BGFLOAT D; BGFLOAT F; BGFLOAT tau; switch (type) { case II: U = 0.32; D = 0.144; F = 0.06; tau = 6e-3; delay = 0.8e-3; break; case IE: U = 0.25; D = 0.7; F = 0.02; tau = 6e-3; delay = 0.8e-3; break; case EI: U = 0.05; D = 0.125; F = 1.2; tau = 3e-3; delay = 0.8e-3; break; case EE: U = 0.5; D = 1.1; F = 0.05; tau = 3e-3; delay = 1.5e-3; break; default: break; } allSynapsesDevice->U[iSyn] = U; allSynapsesDevice->D[iSyn] = D; allSynapsesDevice->F[iSyn] = F; allSynapsesDevice->tau[iSyn] = tau; allSynapsesDevice->decay[iSyn] = exp( -deltaT / tau ); allSynapsesDevice->total_delay[iSyn] = static_cast<int>( delay / deltaT ) + 1; uint32_t size = allSynapsesDevice->total_delay[iSyn] / ( sizeof(uint8_t) * 8 ) + 1; assert( size <= BYTES_OF_DELAYQUEUE ); allSynapsesDevice->Apos[iSyn] = 0.5; allSynapsesDevice->Aneg[iSyn] = -0.5; allSynapsesDevice->STDPgap[iSyn] = 2e-3; allSynapsesDevice->total_delayPost[iSyn] = 0; allSynapsesDevice->tauspost[iSyn] = 0; allSynapsesDevice->tauspre[iSyn] = 0; allSynapsesDevice->taupos[iSyn] = 15e-3; allSynapsesDevice->tauneg[iSyn] = 35e-3; allSynapsesDevice->Wex[iSyn] = 1.0; allSynapsesDevice->mupos[iSyn] = 0; allSynapsesDevice->muneg[iSyn] = 0; allSynapsesDevice->useFroemkeDanSTDP[iSyn] = false; } /* * Update PSR (post synapse response) * * @param allSynapsesDevice Reference to the allSynapses struct on device memory. * @param iSyn Index of the synapse to set. * @param simulationStep The current simulation step. * @param deltaT Inner simulation step duration. */ __device__ void changeDynamicSTDPSynapsePSR(AllDynamicSTDPSynapses* allSynapsesDevice, const BGSIZE iSyn, const uint64_t simulationStep, const BGFLOAT deltaT) { uint64_t &lastSpike = allSynapsesDevice->lastSpike[iSyn]; BGFLOAT &r = allSynapsesDevice->r[iSyn]; BGFLOAT &u = allSynapsesDevice->u[iSyn]; BGFLOAT D = allSynapsesDevice->D[iSyn]; BGFLOAT F = allSynapsesDevice->F[iSyn]; BGFLOAT U = allSynapsesDevice->U[iSyn]; BGFLOAT W = allSynapsesDevice->W[iSyn]; BGFLOAT &psr = allSynapsesDevice->psr[iSyn]; BGFLOAT decay = allSynapsesDevice->decay[iSyn]; // adjust synapse parameters if (lastSpike != ULONG_MAX) { BGFLOAT isi = (simulationStep - lastSpike) * deltaT ; r = 1 + ( r * ( 1 - u ) - 1 ) * exp( -isi / D ); u = U + u * ( 1 - U ) * exp( -isi / F ); } psr += ( ( W / decay ) * u * r );// calculate psr lastSpike = simulationStep; // record the time of the spike }
6b61b0c562828b0c68222fbbe9207be373f5723d.hip
// !!! This is a file automatically generated by hipify!!! /** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/dgemv2_offset.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 2.0.0 * @author Ahmad Abdelfattah * @date 2017-11-13 **/ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <rocblas.h> #include "gemv2_offset_core.cuh" #if(SM >= 30) #define dgemvn_offset_nb (32) #define dgemvn_offset_ntcol (4) #define dgemvn_offset_ept (4) #define dgemvn_offset_width (dgemvn_offset_ntcol*dgemvn_offset_ept) #define dgemvn_offset_by (8) #define dgemvt_offset_nb (32) #define dgemvt_offset_ntcol (4) #define dgemvt_offset_ept (2) #define dgemvt_offset_width (dgemvt_offset_ntcol*dgemvt_offset_ept) #define dgemvt_offset_by (4) #else #define dgemvn_offset_nb (64) #define dgemvn_offset_ntcol (8) #define dgemvn_offset_ept (2) #define dgemvn_offset_width (dgemvn_offset_ntcol*dgemvn_offset_ept) #define dgemvn_offset_by (1) #define dgemvt_offset_nb (64) #define dgemvt_offset_ntcol (8) #define dgemvt_offset_ept (2) #define dgemvt_offset_width (dgemvt_offset_ntcol*dgemvt_offset_ept) #define dgemvt_offset_by (1) #endif extern "C" int kblas_dscal_async(int n, double alpha, double *x, int incx, hipStream_t stream); int kblas_dgemv2_offset_driver(char trans, int rows, int cols, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int offset_r, int offset_c, hipStream_t stream) { if(trans == 'n' || trans == 'N') { // offset necessary calculations int offset_r_ = offset_r % dgemvn_offset_nb; int offset_c_ = offset_c % dgemvn_offset_width; int rows_ = rows - (offset_r - offset_r_); int cols_ = cols - (offset_c - offset_c_); // Advance pointers dA += (offset_c - offset_c_) * lda + (offset_r - offset_r_); dX += (offset_c - offset_c_) * incx; dY += (offset_r - offset_r_) * incy; // scaling with beta kblas_dscal_async(rows_, beta, dY, incy, stream); int mod_r = rows_ % dgemvn_offset_nb; int mod_c = cols_ % dgemvn_offset_width; int blocks = rows_/dgemvn_offset_nb; if(mod_r != 0) blocks += 1; const int thread_x = dgemvn_offset_nb; const int thread_y = dgemvn_offset_ntcol; const int ept = dgemvn_offset_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, dgemvn_offset_by); //printf("rows_ = %d - cols_ = %d - mod_r = %d - mod_c = %d - offset_r_ = %d - offset_c_ = %d \n", rows_, cols_, mod_r, mod_c, offset_r_, offset_c_); switch(ept_) { case 0:hipLaunchKernelGGL(( gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 1:hipLaunchKernelGGL(( gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 2:hipLaunchKernelGGL(( gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 3:hipLaunchKernelGGL(( gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 4:hipLaunchKernelGGL(( gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 5:hipLaunchKernelGGL(( gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 6:hipLaunchKernelGGL(( gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 7:hipLaunchKernelGGL(( gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 8:hipLaunchKernelGGL(( gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; default: printf("irregular part %d is not supported, please extend the case statement of dgemv\n", ept_); exit(1); } } // end of non-transpose case else if(trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { // offset necessary calculations int offset_r_ = offset_r % dgemvt_offset_nb; int offset_c_ = offset_c % dgemvt_offset_width; int rows_ = rows - (offset_r - offset_r_); int cols_ = cols - (offset_c - offset_c_); // Advance pointers dA += (offset_c - offset_c_) * lda + (offset_r - offset_r_); dX += (offset_r - offset_r_) * incx; dY += (offset_c - offset_c_) * incy; // scaling with beta kblas_dscal_async(cols_, beta, dY, incy, stream); int mod_r = rows_ % dgemvt_offset_nb; int mod_c = cols_ % dgemvt_offset_width; int blocks = cols_/dgemvt_offset_width; if(mod_c != 0) blocks += 1; const int thread_x = dgemvt_offset_nb; const int thread_y = dgemvt_offset_ntcol; const int ept = dgemvt_offset_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, dgemvt_offset_by); int conj; if(trans == 'c' || trans == 'C')conj = 1; else conj = 0; //printf("modr = %d, modc = %d, threshold = %d, ept_ = %d \n", mod_r, mod_c, threshold, ept_); //printf("rows_ = %d - cols_ = %d - mod_r = %d - mod_c = %d - offset_r_ = %d - offset_c_ = %d \n", rows_, cols_, mod_r, mod_c, offset_r_, offset_c_); switch(ept_) { case 0:hipLaunchKernelGGL(( gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 1:hipLaunchKernelGGL(( gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 2:hipLaunchKernelGGL(( gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 3:hipLaunchKernelGGL(( gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 4:hipLaunchKernelGGL(( gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 5:hipLaunchKernelGGL(( gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 6:hipLaunchKernelGGL(( gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 7:hipLaunchKernelGGL(( gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 8:hipLaunchKernelGGL(( gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; default: printf("irregular part %d is not supported, please extend the case statement of dgemv\n", ept_); exit(1); } } else { printf("DGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } extern "C" int kblas_dgemv2_offset(char trans, int rows, int cols, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int offset_r, int offset_c) { return kblas_dgemv2_offset_driver(trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c, 0); } extern "C" int kblas_dgemv2_offset_async( char trans, int rows, int cols, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int offset_r, int offset_c, hipStream_t stream) { return kblas_dgemv2_offset_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c, stream); }
6b61b0c562828b0c68222fbbe9207be373f5723d.cu
/** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/dgemv2_offset.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 2.0.0 * @author Ahmad Abdelfattah * @date 2017-11-13 **/ #include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <cublas.h> #include "gemv2_offset_core.cuh" #if(SM >= 30) #define dgemvn_offset_nb (32) #define dgemvn_offset_ntcol (4) #define dgemvn_offset_ept (4) #define dgemvn_offset_width (dgemvn_offset_ntcol*dgemvn_offset_ept) #define dgemvn_offset_by (8) #define dgemvt_offset_nb (32) #define dgemvt_offset_ntcol (4) #define dgemvt_offset_ept (2) #define dgemvt_offset_width (dgemvt_offset_ntcol*dgemvt_offset_ept) #define dgemvt_offset_by (4) #else #define dgemvn_offset_nb (64) #define dgemvn_offset_ntcol (8) #define dgemvn_offset_ept (2) #define dgemvn_offset_width (dgemvn_offset_ntcol*dgemvn_offset_ept) #define dgemvn_offset_by (1) #define dgemvt_offset_nb (64) #define dgemvt_offset_ntcol (8) #define dgemvt_offset_ept (2) #define dgemvt_offset_width (dgemvt_offset_ntcol*dgemvt_offset_ept) #define dgemvt_offset_by (1) #endif extern "C" int kblas_dscal_async(int n, double alpha, double *x, int incx, cudaStream_t stream); int kblas_dgemv2_offset_driver(char trans, int rows, int cols, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int offset_r, int offset_c, cudaStream_t stream) { if(trans == 'n' || trans == 'N') { // offset necessary calculations int offset_r_ = offset_r % dgemvn_offset_nb; int offset_c_ = offset_c % dgemvn_offset_width; int rows_ = rows - (offset_r - offset_r_); int cols_ = cols - (offset_c - offset_c_); // Advance pointers dA += (offset_c - offset_c_) * lda + (offset_r - offset_r_); dX += (offset_c - offset_c_) * incx; dY += (offset_r - offset_r_) * incy; // scaling with beta kblas_dscal_async(rows_, beta, dY, incy, stream); int mod_r = rows_ % dgemvn_offset_nb; int mod_c = cols_ % dgemvn_offset_width; int blocks = rows_/dgemvn_offset_nb; if(mod_r != 0) blocks += 1; const int thread_x = dgemvn_offset_nb; const int thread_y = dgemvn_offset_ntcol; const int ept = dgemvn_offset_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, dgemvn_offset_by); //printf("rows_ = %d - cols_ = %d - mod_r = %d - mod_c = %d - offset_r_ = %d - offset_c_ = %d \n", rows_, cols_, mod_r, mod_c, offset_r_, offset_c_); switch(ept_) { case 0: gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 0><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 1: gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 1><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 2: gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 2><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 3: gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 3><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 4: gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 4><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 5: gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 5><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 6: gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 6><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 7: gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 7><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; case 8: gemvn_offset<double, dgemvn_offset_nb, dgemvn_offset_ntcol, ept, dgemvn_offset_width, 8><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break; default: printf("irregular part %d is not supported, please extend the case statement of dgemv\n", ept_); exit(1); } } // end of non-transpose case else if(trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { // offset necessary calculations int offset_r_ = offset_r % dgemvt_offset_nb; int offset_c_ = offset_c % dgemvt_offset_width; int rows_ = rows - (offset_r - offset_r_); int cols_ = cols - (offset_c - offset_c_); // Advance pointers dA += (offset_c - offset_c_) * lda + (offset_r - offset_r_); dX += (offset_r - offset_r_) * incx; dY += (offset_c - offset_c_) * incy; // scaling with beta kblas_dscal_async(cols_, beta, dY, incy, stream); int mod_r = rows_ % dgemvt_offset_nb; int mod_c = cols_ % dgemvt_offset_width; int blocks = cols_/dgemvt_offset_width; if(mod_c != 0) blocks += 1; const int thread_x = dgemvt_offset_nb; const int thread_y = dgemvt_offset_ntcol; const int ept = dgemvt_offset_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, dgemvt_offset_by); int conj; if(trans == 'c' || trans == 'C')conj = 1; else conj = 0; //printf("modr = %d, modc = %d, threshold = %d, ept_ = %d \n", mod_r, mod_c, threshold, ept_); //printf("rows_ = %d - cols_ = %d - mod_r = %d - mod_c = %d - offset_r_ = %d - offset_c_ = %d \n", rows_, cols_, mod_r, mod_c, offset_r_, offset_c_); switch(ept_) { case 0: gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 0><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 1: gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 1><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 2: gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 2><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 3: gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 3><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 4: gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 4><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 5: gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 5><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 6: gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 6><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 7: gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 7><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; case 8: gemvt_offset<double, dgemvt_offset_nb, dgemvt_offset_ntcol, ept, dgemvt_offset_width, 8><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break; default: printf("irregular part %d is not supported, please extend the case statement of dgemv\n", ept_); exit(1); } } else { printf("DGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } extern "C" int kblas_dgemv2_offset(char trans, int rows, int cols, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int offset_r, int offset_c) { return kblas_dgemv2_offset_driver(trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c, 0); } extern "C" int kblas_dgemv2_offset_async( char trans, int rows, int cols, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int offset_r, int offset_c, cudaStream_t stream) { return kblas_dgemv2_offset_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c, stream); }
26a8107b27859a6b7437052193da460f769fb97b.hip
// !!! This is a file automatically generated by hipify!!! // Utilities and system includes #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP #define SIZE 60000000 #define TILE_DIM 1024 #define INNER_REPS 512 template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; T ra, rb, rc, rd; if (xIndex < SIZE) { ra=A[xIndex]; rb=A[SIZE-xIndex]; rc=A[xIndex]; rd=A[SIZE-xIndex]; // rb=A[xIndex]; #pragma unroll 512 for (int i=0;i<INNER_REPS;i++) { ra=ra+rb; rb=rb+rc; rc=rc+rd; rd=rd+ra; } C1[xIndex]=ra; C2[xIndex]=rb; C3[xIndex]=rc; C4[xIndex]=rd; } } int main(int argc, char **argv) { int outer_reps, vector_size, tile_dim; vector_size = SIZE; tile_dim = TILE_DIM; if (argc>1){ outer_reps = atoi(argv[1]); }else{ outer_reps = 1; } // execution configuration parameters dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1); // CUDA events hipEvent_t start, stop; size_t mem_size = static_cast<size_t>(sizeof(float) * vector_size); // allocate host memory float *h_iA = (float *) malloc(mem_size); float *h_oC1 = (float *) malloc(mem_size); float *h_oC2 = (float *) malloc(mem_size); float *h_oC3 = (float *) malloc(mem_size); float *h_oC4 = (float *) malloc(mem_size); // initalize host data for (int i = 0; i < vector_size; ++i) { h_iA[i] = (float) i+3; // h_iB[i] = (float) i+3; } // allocate device memory float *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4; hipMalloc((void **) &d_iA, mem_size); // hipMalloc((void **) &d_iB, mem_size); hipMalloc((void **) &d_oC1, mem_size); hipMalloc((void **) &d_oC2, mem_size); hipMalloc((void **) &d_oC3, mem_size); hipMalloc((void **) &d_oC4, mem_size); // copy host data to device hipMemcpy(d_iA, h_iA, mem_size, hipMemcpyHostToDevice); // hipMemcpy(d_iB, h_iB, mem_size, hipMemcpyHostToDevice); // print out common data for all kernels printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x); // initialize events hipEventCreate(&start); hipEventCreate(&stop); // take measurements for loop over kernel launches hipEventRecord(start, 0); for (int i=0; i < outer_reps; i++) { hipLaunchKernelGGL(( simpleKernel<float>), dim3(grid), dim3(threads), 0, 0, d_iA, d_oC1, d_oC2, d_oC3, d_oC4); } hipEventRecord(stop, 0); hipEventSynchronize(stop); float kernelTime; hipEventElapsedTime(&kernelTime, start, stop); // take measurements for loop inside kernel hipMemcpy(h_oC1, d_oC1, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC2, d_oC2, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC3, d_oC3, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC4, d_oC4, mem_size, hipMemcpyDeviceToHost); printf("teste: %f\n", h_oC1[0]); // report effective bandwidths float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps); printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", kernelBandwidth, kernelTime/outer_reps, vector_size, 1, tile_dim * 1); free(h_iA); // free(h_iB); free(h_oC1); free(h_oC2); free(h_oC3); free(h_oC4); hipFree(d_iA); // hipFree(d_iB); hipFree(d_oC1); hipFree(d_oC2); hipFree(d_oC3); hipFree(d_oC4); hipEventDestroy(start); hipEventDestroy(stop); hipDeviceReset(); printf("Test passed\n"); exit(EXIT_SUCCESS); }
26a8107b27859a6b7437052193da460f769fb97b.cu
// Utilities and system includes #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_profiler_api.h> #define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP #define SIZE 60000000 #define TILE_DIM 1024 #define INNER_REPS 512 template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; T ra, rb, rc, rd; if (xIndex < SIZE) { ra=A[xIndex]; rb=A[SIZE-xIndex]; rc=A[xIndex]; rd=A[SIZE-xIndex]; // rb=A[xIndex]; #pragma unroll 512 for (int i=0;i<INNER_REPS;i++) { ra=ra+rb; rb=rb+rc; rc=rc+rd; rd=rd+ra; } C1[xIndex]=ra; C2[xIndex]=rb; C3[xIndex]=rc; C4[xIndex]=rd; } } int main(int argc, char **argv) { int outer_reps, vector_size, tile_dim; vector_size = SIZE; tile_dim = TILE_DIM; if (argc>1){ outer_reps = atoi(argv[1]); }else{ outer_reps = 1; } // execution configuration parameters dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1); // CUDA events cudaEvent_t start, stop; size_t mem_size = static_cast<size_t>(sizeof(float) * vector_size); // allocate host memory float *h_iA = (float *) malloc(mem_size); float *h_oC1 = (float *) malloc(mem_size); float *h_oC2 = (float *) malloc(mem_size); float *h_oC3 = (float *) malloc(mem_size); float *h_oC4 = (float *) malloc(mem_size); // initalize host data for (int i = 0; i < vector_size; ++i) { h_iA[i] = (float) i+3; // h_iB[i] = (float) i+3; } // allocate device memory float *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4; cudaMalloc((void **) &d_iA, mem_size); // cudaMalloc((void **) &d_iB, mem_size); cudaMalloc((void **) &d_oC1, mem_size); cudaMalloc((void **) &d_oC2, mem_size); cudaMalloc((void **) &d_oC3, mem_size); cudaMalloc((void **) &d_oC4, mem_size); // copy host data to device cudaMemcpy(d_iA, h_iA, mem_size, cudaMemcpyHostToDevice); // cudaMemcpy(d_iB, h_iB, mem_size, cudaMemcpyHostToDevice); // print out common data for all kernels printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x); // initialize events cudaEventCreate(&start); cudaEventCreate(&stop); // take measurements for loop over kernel launches cudaEventRecord(start, 0); for (int i=0; i < outer_reps; i++) { simpleKernel<float><<<grid, threads>>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float kernelTime; cudaEventElapsedTime(&kernelTime, start, stop); // take measurements for loop inside kernel cudaMemcpy(h_oC1, d_oC1, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC2, d_oC2, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC3, d_oC3, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC4, d_oC4, mem_size, cudaMemcpyDeviceToHost); printf("teste: %f\n", h_oC1[0]); // report effective bandwidths float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps); printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", kernelBandwidth, kernelTime/outer_reps, vector_size, 1, tile_dim * 1); free(h_iA); // free(h_iB); free(h_oC1); free(h_oC2); free(h_oC3); free(h_oC4); cudaFree(d_iA); // cudaFree(d_iB); cudaFree(d_oC1); cudaFree(d_oC2); cudaFree(d_oC3); cudaFree(d_oC4); cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); printf("Test passed\n"); exit(EXIT_SUCCESS); }
d52e40b77c8294004775767979cf6582e21c9ad5.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <cstdlib> #include <iostream> #include <string> #include <vector> #include "solver.h" using namespace std; typedef unsigned char uchar; int num_train = 512, num_test = 500; int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int)ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *)&magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *)&n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *)&n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *)&n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *)&magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *)&n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printTimes(vector<float> &time, string filename); void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename); void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> > &bwd_times, bool computation, string filename); int main(int argc, char *argv[]) { // int num_train = 100 * batch_size, num_val = batch_size; // void *X_train = malloc(num_train * input_channels * sizeof(float)); // int *y_train = (int *)malloc(num_train * sizeof(int)); // void *X_val = malloc(num_val * input_channels * sizeof(float)); // int *y_val = (int *)malloc(num_val * sizeof(int)); // for (int i = 0; i < num_train; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 / // 1000; // y_train[i] = 0; // } // for (int i = 0; i < num_val; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 / // 1000; // y_val[i] = rand() % 2; // } // int rows = 28, cols = 28, channels = 1; // vector<vector<uchar> > train_images, test_images; // vector<uchar> train_labels, test_labels; // readMNIST(train_images, test_images, train_labels, test_labels); // float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels; float *f_train_images, *f_test_images; int *f_train_labels, *f_test_labels; int rows = 227, cols = 227, channels = 3; int input_size = rows * cols * channels; // f_train_images = (float *)malloc(num_train * input_size * sizeof(float)); // f_train_labels = (int *)malloc(num_train * sizeof(int)); checkCudaErrors( hipHostMalloc(&f_train_images, num_train * input_size * sizeof(float))); checkCudaErrors(hipHostMalloc(&f_train_labels, num_train * sizeof(int))); f_test_images = (float *)malloc(num_test * input_size * sizeof(float)); f_test_labels = (int *)malloc(num_test * sizeof(int)); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < num_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= num_train; } for (int i = 0; i < num_train; i++) { for (int j = 0; j < input_size; j++) { f_train_images[i * input_size + j] -= mean_image[j]; } } for (int i = 0; i < num_test; i++) { for (int j = 0; j < input_size; j++) { f_test_images[i * input_size + j] -= mean_image[j]; } } // int input_channels = rows * cols * channels * 3, hidden_channels1 = 50, // hidden_channels2 = 100, output_channels = 10; // vector<LayerSpecifier> layer_specifier; // ConvDescriptor layer0; // LayerSpecifier temp; // layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // ActivationDescriptor layer0_actv; // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // BatchNormDescriptor layer0_bn; // for (int i = 0; i < 200; i++) { // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // } // PoolingDescriptor layer0_pool; // layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX); // temp.initPointer(POOLING); // *((PoolingDescriptor *)temp.params) = layer0_pool; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols // / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // // DropoutDescriptor layer0_dropout; // // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2); // // temp.initPointer(DROPOUT); // // *((DropoutDescriptor *)temp.params) = layer0_dropout; // // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols // / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // FCDescriptor layer1; // layer1.initializeValues(input_channels, hidden_channels1); // temp.initPointer(FULLY_CONNECTED); // *((FCDescriptor *)(temp.params)) = layer1; // layer_specifier.push_back(temp); // temp.initPointer(ACTV); // ActivationDescriptor layer1_actv; // layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1); // *((ActivationDescriptor *)temp.params) = layer1_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1, // hidden_channels1, 1, 1); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // temp.initPointer(FULLY_CONNECTED); // FCDescriptor layer2; // layer2.initializeValues(hidden_channels1, output_channels); // *((FCDescriptor *)temp.params) = layer2; // layer_specifier.push_back(temp); // // temp.initPointer(FULLY_CONNECTED); // // FCDescriptor layer3; // // layer3.initializeValues(hidden_channels2, output_channels); // // *((FCDescriptor *)temp.params) = layer3; // // layer_specifier.push_back(temp); // temp.initPointer(SOFTMAX); // SoftmaxDescriptor smax; // smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, // output_channels, 1, 1); // *((SoftmaxDescriptor *)(temp.params)) = smax; // layer_specifier.push_back(temp); // AlexNet vector<LayerSpecifier> layer_specifier; { ConvDescriptor layer0; layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer0; layer_specifier.push_back(temp); } { PoolingDescriptor layer1; layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer1; layer_specifier.push_back(temp); } { ConvDescriptor layer2; layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer2; layer_specifier.push_back(temp); } { PoolingDescriptor layer3; layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer3; layer_specifier.push_back(temp); } { ConvDescriptor layer4; layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer4; layer_specifier.push_back(temp); } { ConvDescriptor layer5; layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer5; layer_specifier.push_back(temp); } { ConvDescriptor layer6; layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer6; layer_specifier.push_back(temp); } { PoolingDescriptor layer7; layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer7; layer_specifier.push_back(temp); } { FCDescriptor layer8; layer8.initializeValues(9216, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer8; layer_specifier.push_back(temp); } { FCDescriptor layer9; layer9.initializeValues(4096, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer9; layer_specifier.push_back(temp); } { FCDescriptor layer10; layer10.initializeValues(4096, 1000); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer10; layer_specifier.push_back(temp); } { SoftmaxDescriptor layer11; layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1); LayerSpecifier temp; temp.initPointer(SOFTMAX); *((SoftmaxDescriptor *)temp.params) = layer11; layer_specifier.push_back(temp); } vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; vDNNType vdnn_type = vDNN_DYN; string filename("vdnn_dyn"); if (argc == 3) { filename.assign("vdnn"); // argv[1] - layers to offload, argv[2] - conv algo to use if (strcmp(argv[1], "dyn") == 0) { vdnn_type = vDNN_DYN; filename.append("_dyn"); } else if (strcmp(argv[1], "conv") == 0) { vdnn_type = vDNN_CONV; filename.append("_conv"); } else if (strcmp(argv[1], "all") == 0) { vdnn_type = vDNN_ALL; filename.append("_all"); } else if (strcmp(argv[1], "alternate_conv") == 0) { vdnn_type = vDNN_ALTERNATE_CONV; filename.append("_alternate_conv"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0 or strcmp(argv[1], "alternate_conv") == 0)) { if (strcmp(argv[2], "p") == 0) { vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; filename.append("_p"); } else if (strcmp(argv[2], "m") == 0) { vdnn_conv_algo = vDNN_MEMORY_OPTIMAL; filename.append("_m"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } } } int batch_size = 256; long long dropout_seed = 1; float softmax_eps = 1e-8; float init_std_dev = 0.1; NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vdnn_type, vdnn_conv_algo, SGD); int num_epoch = 1000; double learning_rate = 1e-3; double learning_rate_decay = 0.9; Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train); vector<float> loss; vector<float> time; vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag; solver.getTrainTime(loss, time, 100, fwd_vdnn_lag, bwd_vdnn_lag); printTimes(time, filename); printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename); vector<vector<float> > fwd_computation_time, bwd_computation_time; solver.getComputationTime(1, fwd_computation_time, bwd_computation_time); vector<vector<float> > fwd_transfer_time, bwd_transfer_time; solver.getTransferTime(1, fwd_transfer_time, bwd_transfer_time); printComputationTransferTimes(fwd_computation_time, bwd_computation_time, true, filename); printComputationTransferTimes(fwd_transfer_time, bwd_transfer_time, false, filename); } void printTimes(vector<float> &time, string filename) { float mean_time = 0.0; float std_dev = 0.0; int N = time.size(); for (int i = 0; i < N; i++) { mean_time += time[i]; } mean_time /= N; for (int i = 0; i < N; i++) { std_dev += pow(time[i] - mean_time, 2); } std_dev /= N; std_dev = pow(std_dev, 0.5); cout << "Average time: " << mean_time << endl; cout << "Standard deviation: " << std_dev << endl; filename.append(".dat"); fstream f; f.open(filename.c_str(), ios_base::out); for (int i = 0; i < N; i++) { f << time[i] << endl; } f << "mean_time: " << mean_time << endl; f << "standard_deviation: " << std_dev << endl; f.close(); filename.append(".bin"); fstream f_bin; f_bin.open(filename.c_str(), ios_base::out); f_bin.write((char *)&N, sizeof(N)); for (int i = 0; i < N; i++) { f_bin.write((char *)&time[i], sizeof(time[i])); } f_bin.close(); } void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename) { filename.append("_lag.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_vdnn_lag.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) { f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl; } for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) { f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl; } f << endl; } f.close(); } void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> > &bwd_times, bool computation, string filename) { if (computation) filename.append("_compute_time.dat"); else filename.append("_transfer_time.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_times.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_times[i].size(); j++) { f << "fwd" << j << ": " << fwd_times[i][j] << endl; } for (int j = 0; j < bwd_times[i].size(); j++) { f << "bwd" << j << ": " << bwd_times[i][j] << endl; } f << endl; } f.close(); }
d52e40b77c8294004775767979cf6582e21c9ad5.cu
#include <cmath> #include <cstdlib> #include <iostream> #include <string> #include <vector> #include "solver.h" using namespace std; typedef unsigned char uchar; int num_train = 512, num_test = 500; int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int)ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *)&magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *)&n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *)&n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *)&n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *)&magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *)&n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printTimes(vector<float> &time, string filename); void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename); void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> > &bwd_times, bool computation, string filename); int main(int argc, char *argv[]) { // int num_train = 100 * batch_size, num_val = batch_size; // void *X_train = malloc(num_train * input_channels * sizeof(float)); // int *y_train = (int *)malloc(num_train * sizeof(int)); // void *X_val = malloc(num_val * input_channels * sizeof(float)); // int *y_val = (int *)malloc(num_val * sizeof(int)); // for (int i = 0; i < num_train; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 / // 1000; // y_train[i] = 0; // } // for (int i = 0; i < num_val; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 / // 1000; // y_val[i] = rand() % 2; // } // int rows = 28, cols = 28, channels = 1; // vector<vector<uchar> > train_images, test_images; // vector<uchar> train_labels, test_labels; // readMNIST(train_images, test_images, train_labels, test_labels); // float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels; float *f_train_images, *f_test_images; int *f_train_labels, *f_test_labels; int rows = 227, cols = 227, channels = 3; int input_size = rows * cols * channels; // f_train_images = (float *)malloc(num_train * input_size * sizeof(float)); // f_train_labels = (int *)malloc(num_train * sizeof(int)); checkCudaErrors( cudaMallocHost(&f_train_images, num_train * input_size * sizeof(float))); checkCudaErrors(cudaMallocHost(&f_train_labels, num_train * sizeof(int))); f_test_images = (float *)malloc(num_test * input_size * sizeof(float)); f_test_labels = (int *)malloc(num_test * sizeof(int)); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < num_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= num_train; } for (int i = 0; i < num_train; i++) { for (int j = 0; j < input_size; j++) { f_train_images[i * input_size + j] -= mean_image[j]; } } for (int i = 0; i < num_test; i++) { for (int j = 0; j < input_size; j++) { f_test_images[i * input_size + j] -= mean_image[j]; } } // int input_channels = rows * cols * channels * 3, hidden_channels1 = 50, // hidden_channels2 = 100, output_channels = 10; // vector<LayerSpecifier> layer_specifier; // ConvDescriptor layer0; // LayerSpecifier temp; // layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // ActivationDescriptor layer0_actv; // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // BatchNormDescriptor layer0_bn; // for (int i = 0; i < 200; i++) { // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // } // PoolingDescriptor layer0_pool; // layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX); // temp.initPointer(POOLING); // *((PoolingDescriptor *)temp.params) = layer0_pool; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols // / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // // DropoutDescriptor layer0_dropout; // // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2); // // temp.initPointer(DROPOUT); // // *((DropoutDescriptor *)temp.params) = layer0_dropout; // // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols // / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // FCDescriptor layer1; // layer1.initializeValues(input_channels, hidden_channels1); // temp.initPointer(FULLY_CONNECTED); // *((FCDescriptor *)(temp.params)) = layer1; // layer_specifier.push_back(temp); // temp.initPointer(ACTV); // ActivationDescriptor layer1_actv; // layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1); // *((ActivationDescriptor *)temp.params) = layer1_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1, // hidden_channels1, 1, 1); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // temp.initPointer(FULLY_CONNECTED); // FCDescriptor layer2; // layer2.initializeValues(hidden_channels1, output_channels); // *((FCDescriptor *)temp.params) = layer2; // layer_specifier.push_back(temp); // // temp.initPointer(FULLY_CONNECTED); // // FCDescriptor layer3; // // layer3.initializeValues(hidden_channels2, output_channels); // // *((FCDescriptor *)temp.params) = layer3; // // layer_specifier.push_back(temp); // temp.initPointer(SOFTMAX); // SoftmaxDescriptor smax; // smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, // output_channels, 1, 1); // *((SoftmaxDescriptor *)(temp.params)) = smax; // layer_specifier.push_back(temp); // AlexNet vector<LayerSpecifier> layer_specifier; { ConvDescriptor layer0; layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer0; layer_specifier.push_back(temp); } { PoolingDescriptor layer1; layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer1; layer_specifier.push_back(temp); } { ConvDescriptor layer2; layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer2; layer_specifier.push_back(temp); } { PoolingDescriptor layer3; layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer3; layer_specifier.push_back(temp); } { ConvDescriptor layer4; layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer4; layer_specifier.push_back(temp); } { ConvDescriptor layer5; layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer5; layer_specifier.push_back(temp); } { ConvDescriptor layer6; layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer6; layer_specifier.push_back(temp); } { PoolingDescriptor layer7; layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer7; layer_specifier.push_back(temp); } { FCDescriptor layer8; layer8.initializeValues(9216, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer8; layer_specifier.push_back(temp); } { FCDescriptor layer9; layer9.initializeValues(4096, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer9; layer_specifier.push_back(temp); } { FCDescriptor layer10; layer10.initializeValues(4096, 1000); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer10; layer_specifier.push_back(temp); } { SoftmaxDescriptor layer11; layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1); LayerSpecifier temp; temp.initPointer(SOFTMAX); *((SoftmaxDescriptor *)temp.params) = layer11; layer_specifier.push_back(temp); } vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; vDNNType vdnn_type = vDNN_DYN; string filename("vdnn_dyn"); if (argc == 3) { filename.assign("vdnn"); // argv[1] - layers to offload, argv[2] - conv algo to use if (strcmp(argv[1], "dyn") == 0) { vdnn_type = vDNN_DYN; filename.append("_dyn"); } else if (strcmp(argv[1], "conv") == 0) { vdnn_type = vDNN_CONV; filename.append("_conv"); } else if (strcmp(argv[1], "all") == 0) { vdnn_type = vDNN_ALL; filename.append("_all"); } else if (strcmp(argv[1], "alternate_conv") == 0) { vdnn_type = vDNN_ALTERNATE_CONV; filename.append("_alternate_conv"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0 or strcmp(argv[1], "alternate_conv") == 0)) { if (strcmp(argv[2], "p") == 0) { vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; filename.append("_p"); } else if (strcmp(argv[2], "m") == 0) { vdnn_conv_algo = vDNN_MEMORY_OPTIMAL; filename.append("_m"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } } } int batch_size = 256; long long dropout_seed = 1; float softmax_eps = 1e-8; float init_std_dev = 0.1; NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vdnn_type, vdnn_conv_algo, SGD); int num_epoch = 1000; double learning_rate = 1e-3; double learning_rate_decay = 0.9; Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train); vector<float> loss; vector<float> time; vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag; solver.getTrainTime(loss, time, 100, fwd_vdnn_lag, bwd_vdnn_lag); printTimes(time, filename); printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename); vector<vector<float> > fwd_computation_time, bwd_computation_time; solver.getComputationTime(1, fwd_computation_time, bwd_computation_time); vector<vector<float> > fwd_transfer_time, bwd_transfer_time; solver.getTransferTime(1, fwd_transfer_time, bwd_transfer_time); printComputationTransferTimes(fwd_computation_time, bwd_computation_time, true, filename); printComputationTransferTimes(fwd_transfer_time, bwd_transfer_time, false, filename); } void printTimes(vector<float> &time, string filename) { float mean_time = 0.0; float std_dev = 0.0; int N = time.size(); for (int i = 0; i < N; i++) { mean_time += time[i]; } mean_time /= N; for (int i = 0; i < N; i++) { std_dev += pow(time[i] - mean_time, 2); } std_dev /= N; std_dev = pow(std_dev, 0.5); cout << "Average time: " << mean_time << endl; cout << "Standard deviation: " << std_dev << endl; filename.append(".dat"); fstream f; f.open(filename.c_str(), ios_base::out); for (int i = 0; i < N; i++) { f << time[i] << endl; } f << "mean_time: " << mean_time << endl; f << "standard_deviation: " << std_dev << endl; f.close(); filename.append(".bin"); fstream f_bin; f_bin.open(filename.c_str(), ios_base::out); f_bin.write((char *)&N, sizeof(N)); for (int i = 0; i < N; i++) { f_bin.write((char *)&time[i], sizeof(time[i])); } f_bin.close(); } void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename) { filename.append("_lag.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_vdnn_lag.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) { f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl; } for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) { f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl; } f << endl; } f.close(); } void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> > &bwd_times, bool computation, string filename) { if (computation) filename.append("_compute_time.dat"); else filename.append("_transfer_time.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_times.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_times[i].size(); j++) { f << "fwd" << j << ": " << fwd_times[i][j] << endl; } for (int j = 0; j < bwd_times[i].size(); j++) { f << "bwd" << j << ": " << bwd_times[i][j] << endl; } f << endl; } f.close(); }
8d8993077adc3ebcf8555c091deea164ddae3b42.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "reduce_v4.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; hipMalloc(&in, XSIZE*YSIZE); float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( reduce_v4), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( reduce_v4), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( reduce_v4), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8d8993077adc3ebcf8555c091deea164ddae3b42.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "reduce_v4.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); reduce_v4<<<gridBlock,threadBlock>>>(in,out,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { reduce_v4<<<gridBlock,threadBlock>>>(in,out,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { reduce_v4<<<gridBlock,threadBlock>>>(in,out,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
regression_obj.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2015-2022 by XGBoost Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <xgboost/tree_model.h> #include <cmath> #include <memory> #include <vector> #include "../common/common.h" #include "../common/linalg_op.h" #include "../common/pseudo_huber.h" #include "../common/threading_utils.h" #include "../common/transform.h" #include "./regression_loss.h" #include "adaptive.h" #include "xgboost/base.h" #include "xgboost/data.h" #include "xgboost/generic_parameters.h" #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/linalg.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #if defined(XGBOOST_USE_CUDA) #include "../common/device_helpers.cuh" #include "../common/linalg_op.cuh" #endif // defined(XGBOOST_USE_CUDA) namespace xgboost { namespace obj { namespace { void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) { CHECK_EQ(info.labels.Shape(0), info.num_row_) << "Invalid shape of labels."; CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels."; if (!info.weights_.Empty()) { CHECK_EQ(info.weights_.Size(), info.num_row_) << "Number of weights should be equal to number of data points."; } } } // anonymous namespace #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public ObjFunction { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return Loss::Info(); } uint32_t Targets(MetaInfo const& info) const override { // Multi-target regression. return ::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; const size_t nthreads = ctx_->Threads(); bool on_device = device >= 0; // On CPU we run the transformation each thread processing a contigious block of data // for better performance. const size_t n_data_blocks = ::max(static_cast<size_t>(1), (on_device ? ndata : nthreads)); const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks); auto const n_targets = ::max(info.labels.Shape(1), static_cast<size_t>(1)); common::Transform<>::Init( [block_size, ndata, n_targets] XGBOOST_DEVICE( size_t data_block_idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const bst_float* preds_ptr = _preds.data(); const bst_float* labels_ptr = _labels.data(); const bst_float* weights_ptr = _weights.data(); GradientPair* out_gpair_ptr = _out_gpair.data(); const size_t begin = data_block_idx*block_size; const size_t end = ::min(ndata, begin + block_size); const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; for (size_t idx = begin; idx < end; ++idx) { bst_float p = Loss::PredTransform(preds_ptr[idx]); bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets]; bst_float label = labels_ptr[idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } }, common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device) .Eval(&additional_input_, out_gpair, &preds, info.labels.Data(), &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated class PseudoHuberRegression : public ObjFunction { PesudoHuberParam param_; public: void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } uint32_t Targets(MetaInfo const& info) const override { return ::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto slope = param_.huber_slope; CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0."; auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); const float z = predt(i) - y; const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope)); float grad = z / scale_sqrt; auto scale = common::Sqr(slope) + common::Sqr(z); float hess = common::Sqr(slope) / (scale * scale_sqrt); auto w = weight[sample_id]; gpair(i) = {grad * w, hess * w}; }); } const char* DefaultEvalMetric() const override { return "mphe"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:pseudohubererror"); out["pseudo_huber_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); if (config.find("pseudo_huber_param") == config.cend()) { // The parameter is added in 1.6. return; } FromJson(in["pseudo_huber_param"], &param_); } }; XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror") .describe("Regression Pseudo Huber error.") .set_body([]() { return new PseudoHuberRegression(); }); // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += ::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels.HostView(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = ::exp(p); const double w = info.GetWeight(ind); const double y = labels(ind); const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*) preds[j] = ::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = ctx_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ ::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device) .Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); class MeanAbsoluteError : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sign = [](auto x) { return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0)); }; auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); auto grad = sign(predt(i) - y) * weight[i]; auto hess = weight[sample_id]; gpair(i) = GradientPair{grad, hess}; }); } void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info, HostDeviceVector<float> const& prediction, RegTree* p_tree) const override { if (ctx_->IsCPU()) { auto const& h_position = position.ConstHostVector(); detail::UpdateTreeLeafHost(ctx_, h_position, info, prediction, 0.5, p_tree); } else { #if defined(XGBOOST_USE_CUDA) position.SetDevice(ctx_->gpu_id); auto d_position = position.ConstDeviceSpan(); detail::UpdateTreeLeafDevice(ctx_, d_position, info, prediction, 0.5, p_tree); #else common::AssertGPUSupport(); #endif // defined(XGBOOST_USE_CUDA) } } const char* DefaultEvalMetric() const override { return "mae"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:absoluteerror"); } void LoadConfig(Json const& in) override {} }; XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror") .describe("Mean absoluate error.") .set_body([]() { return new MeanAbsoluteError(); }); } // namespace obj } // namespace xgboost
regression_obj.cu
/*! * Copyright 2015-2022 by XGBoost Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <xgboost/tree_model.h> #include <cmath> #include <memory> #include <vector> #include "../common/common.h" #include "../common/linalg_op.h" #include "../common/pseudo_huber.h" #include "../common/threading_utils.h" #include "../common/transform.h" #include "./regression_loss.h" #include "adaptive.h" #include "xgboost/base.h" #include "xgboost/data.h" #include "xgboost/generic_parameters.h" #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/linalg.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #if defined(XGBOOST_USE_CUDA) #include "../common/device_helpers.cuh" #include "../common/linalg_op.cuh" #endif // defined(XGBOOST_USE_CUDA) namespace xgboost { namespace obj { namespace { void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) { CHECK_EQ(info.labels.Shape(0), info.num_row_) << "Invalid shape of labels."; CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels."; if (!info.weights_.Empty()) { CHECK_EQ(info.weights_.Size(), info.num_row_) << "Number of weights should be equal to number of data points."; } } } // anonymous namespace #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public ObjFunction { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return Loss::Info(); } uint32_t Targets(MetaInfo const& info) const override { // Multi-target regression. return std::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; const size_t nthreads = ctx_->Threads(); bool on_device = device >= 0; // On CPU we run the transformation each thread processing a contigious block of data // for better performance. const size_t n_data_blocks = std::max(static_cast<size_t>(1), (on_device ? ndata : nthreads)); const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks); auto const n_targets = std::max(info.labels.Shape(1), static_cast<size_t>(1)); common::Transform<>::Init( [block_size, ndata, n_targets] XGBOOST_DEVICE( size_t data_block_idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const bst_float* preds_ptr = _preds.data(); const bst_float* labels_ptr = _labels.data(); const bst_float* weights_ptr = _weights.data(); GradientPair* out_gpair_ptr = _out_gpair.data(); const size_t begin = data_block_idx*block_size; const size_t end = std::min(ndata, begin + block_size); const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; for (size_t idx = begin; idx < end; ++idx) { bst_float p = Loss::PredTransform(preds_ptr[idx]); bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets]; bst_float label = labels_ptr[idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } }, common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device) .Eval(&additional_input_, out_gpair, &preds, info.labels.Data(), &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated class PseudoHuberRegression : public ObjFunction { PesudoHuberParam param_; public: void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } uint32_t Targets(MetaInfo const& info) const override { return std::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto slope = param_.huber_slope; CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0."; auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); const float z = predt(i) - y; const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope)); float grad = z / scale_sqrt; auto scale = common::Sqr(slope) + common::Sqr(z); float hess = common::Sqr(slope) / (scale * scale_sqrt); auto w = weight[sample_id]; gpair(i) = {grad * w, hess * w}; }); } const char* DefaultEvalMetric() const override { return "mphe"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:pseudohubererror"); out["pseudo_huber_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); if (config.find("pseudo_huber_param") == config.cend()) { // The parameter is added in 1.6. return; } FromJson(in["pseudo_huber_param"], &param_); } }; XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror") .describe("Regression Pseudo Huber error.") .set_body([]() { return new PseudoHuberRegression(); }); // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += std::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels.HostView(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = std::exp(p); const double w = info.GetWeight(ind); const double y = labels(ind); const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*) preds[j] = std::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = ctx_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device) .Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); class MeanAbsoluteError : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sign = [](auto x) { return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0)); }; auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); auto grad = sign(predt(i) - y) * weight[i]; auto hess = weight[sample_id]; gpair(i) = GradientPair{grad, hess}; }); } void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info, HostDeviceVector<float> const& prediction, RegTree* p_tree) const override { if (ctx_->IsCPU()) { auto const& h_position = position.ConstHostVector(); detail::UpdateTreeLeafHost(ctx_, h_position, info, prediction, 0.5, p_tree); } else { #if defined(XGBOOST_USE_CUDA) position.SetDevice(ctx_->gpu_id); auto d_position = position.ConstDeviceSpan(); detail::UpdateTreeLeafDevice(ctx_, d_position, info, prediction, 0.5, p_tree); #else common::AssertGPUSupport(); #endif // defined(XGBOOST_USE_CUDA) } } const char* DefaultEvalMetric() const override { return "mae"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:absoluteerror"); } void LoadConfig(Json const& in) override {} }; XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror") .describe("Mean absoluate error.") .set_body([]() { return new MeanAbsoluteError(); }); } // namespace obj } // namespace xgboost
59ceec477246b9987e7aad7a91f025d8ed0a8b76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "fbgemm_gpu/embedding_backward_template_helpers.cuh" using Tensor = at::Tensor; using namespace fbgemm_gpu; template <typename index_t> __device__ void adjust_offset_kernel( index_t& indices_start, index_t& indices_end, const index_t num_indices, index_t* const offset_acc_start, index_t* const offset_acc_end) { indices_start = ::max(static_cast<index_t>(0), ::min(indices_start, num_indices)); indices_end = ::max(indices_start, ::min(indices_end, num_indices)); *offset_acc_start = indices_start; *offset_acc_end = indices_end; } template <typename index_t, bool vbe> __global__ __launch_bounds__(kMaxThreads) void bounds_check_indices_kernel( const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> rows_per_table, at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> indices, at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> offsets, const int32_t* const vbe_metadata, const int64_t bounds_check_mode_, at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> warning, FixedDivisor fd) { int32_t T = rows_per_table.size(0); int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y; int32_t b; int32_t t; int32_t B = 0; int32_t total_B = offsets.size(0) - 1; if (!vbe && b_t >= total_B) { return; } fd.DivMod(b_t, &t, &b); if (vbe) { // Check if t is valid if (t >= T) { return; } const auto B_start = vbe_metadata[t]; B = vbe_metadata[t + 1] - B_start; // Check if b is valid if (b >= B) { return; } // Update b_t value b_t = B_start + b; } else { B = total_B / T; } const auto bounds_check_mode = static_cast<BoundsCheckMode>(bounds_check_mode_); const auto num_rows = rows_per_table[t]; auto indices_start = offsets[b_t]; auto indices_end = offsets[b_t + 1]; const index_t num_indices = indices.size(0); if (bounds_check_mode == BoundsCheckMode::FATAL) { CUDA_KERNEL_ASSERT(indices_start >= 0); CUDA_KERNEL_ASSERT(indices_start <= indices_end); CUDA_KERNEL_ASSERT(indices_end <= num_indices); } else if (bounds_check_mode == BoundsCheckMode::WARNING) { if (indices_start < 0 || indices_start > indices_end || indices_end > num_indices) { if (gpuAtomicIncrement(&warning[0]) == 0) { printf( "EmbeddingBoundsCheck (VBE %s): (at least one) Out of bounds access for " "batch: %d, table: %d, indices_start: %lld, indices_end: %lld," " num_indices: %lld. Setting indices_start and indices_end within " "the range.\n", vbe ? "true" : "false", b, t, static_cast<int64_t>(indices_start), static_cast<int64_t>(indices_end), static_cast<int64_t>(num_indices)); } adjust_offset_kernel( indices_start, indices_end, num_indices, &offsets[b_t], &offsets[b_t + 1]); } } else if (bounds_check_mode == BoundsCheckMode::IGNORE) { adjust_offset_kernel( indices_start, indices_end, num_indices, &offsets[b_t], &offsets[b_t + 1]); } const auto L = indices_end - indices_start; for (index_t i = static_cast<index_t>(threadIdx.x); i < L; i += static_cast<index_t>(fbgemm_gpu::kWarpSize)) { const auto idx = indices[indices_start + i]; if (idx == -1) { // -1 indicates pruned rows. continue; } if (bounds_check_mode == BoundsCheckMode::FATAL) { CUDA_KERNEL_ASSERT(idx >= 0 && "Failed idx >= 0 in bounds_check_indices"); CUDA_KERNEL_ASSERT( idx < num_rows && "Failed idx < num_rows in bounds_check_indices"); } else if (bounds_check_mode == BoundsCheckMode::WARNING) { if (idx < 0 || idx >= num_rows) { if (gpuAtomicIncrement(&warning[0]) == 0) { printf( "EmbeddingBoundsCheck (VBE %s): (at least one) Out of bounds access for batch: %d, table: %d, bag element: %lld, idx: %lld, num_rows: %lld, indices_start: %lld, indices_end: %lld, T: %d, B: %d, b_t: %d. Setting idx to zero.\n", vbe ? "true" : "false", b, t, static_cast<int64_t>(i), static_cast<int64_t>(idx), num_rows, static_cast<int64_t>(indices_start), static_cast<int64_t>(indices_end), T, B, b_t); } indices[indices_start + i] = 0; } } else if (bounds_check_mode == BoundsCheckMode::IGNORE) { if (idx < 0 || idx >= num_rows) { indices[indices_start + i] = 0; } } } if (bounds_check_mode == BoundsCheckMode::FATAL) { CUDA_KERNEL_ASSERT(num_indices == offsets[total_B]); } else if (bounds_check_mode == BoundsCheckMode::WARNING) { if (num_indices != offsets[total_B]) { if (gpuAtomicIncrement(&warning[0]) == 0) { printf( "EmbeddingBoundsCheck (VBE %s): the last element in offsets is incorrect for " "total batch size %s: %d, total table num T: %d, " " last element in offsets: %lld, indices size: %lld. " " Setting the last element in offsets to be indices size.\n", vbe ? "true" : "false", vbe ? "total_B" : "B", vbe ? total_B : B, T, static_cast<int64_t>(offsets[total_B]), static_cast<int64_t>(num_indices)); } offsets[total_B] = num_indices; } } else if (bounds_check_mode == BoundsCheckMode::IGNORE) { if (num_indices != offsets[total_B]) { offsets[total_B] = num_indices; } } } void bounds_check_indices_cuda( Tensor& rows_per_table, Tensor& indices, Tensor& offsets, int64_t bounds_check_mode_, Tensor& warning, const c10::optional<Tensor>& weights, const c10::optional<Tensor>& vbe_metadata, const int64_t max_B) { TENSOR_ON_CUDA_GPU(rows_per_table); TENSOR_ON_CUDA_GPU(indices); TENSOR_ON_CUDA_GPU(offsets); TENSOR_ON_CUDA_GPU(warning); TENSOR_EMPTY_OR_ON_CUDA_GPU(weights); TENSOR_EMPTY_OR_ON_CUDA_GPU(vbe_metadata); at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(rows_per_table.get_device()); const int32_t T = rows_per_table.size(0); const int32_t total_B = offsets.size(0) - 1; const int32_t B = (total_B) / T; if (total_B == 0 || T == 0) { return; } const auto bounds_check_mode = static_cast<BoundsCheckMode>(bounds_check_mode_); if (bounds_check_mode == BoundsCheckMode::WARNING) { warning.zero_(); } const int64_t num_indices = indices.size(0); const auto vbe = vbe_metadata.has_value(); if (vbe) { TORCH_CHECK(max_B >= 0); } else { TORCH_CHECK( offsets.size(0) == B * T + 1, "offsets size " + std::to_string(offsets.size(0)) + " is not equal to B (" + std::to_string(B) + ") * T (" + std::to_string(T) + ") + 1"); } if (weights.has_value()) { TORCH_CHECK( weights.value().size(0) == num_indices, "weights size " + std::to_string(weights.value().size(0)) + " is not equal to indices size " + std::to_string(num_indices)); } constexpr size_t kNumThreads = 256; const auto max_B_ = vbe ? max_B : B; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "bounds_check_indices", [&] { const auto bounds_check_kernel = (vbe ? bounds_check_indices_kernel<index_t, true> : bounds_check_indices_kernel<index_t, false>); hipLaunchKernelGGL(( bounds_check_kernel), dim3(div_round_up(max_B_ * T, kNumThreads / fbgemm_gpu::kWarpSize)), dim3(dim3(fbgemm_gpu::kWarpSize, kNumThreads / fbgemm_gpu::kWarpSize)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), rows_per_table.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), vbe ? vbe_metadata.value().data_ptr<int32_t>() : nullptr, bounds_check_mode_, warning.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), FixedDivisor(max_B_)); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }
59ceec477246b9987e7aad7a91f025d8ed0a8b76.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "fbgemm_gpu/embedding_backward_template_helpers.cuh" using Tensor = at::Tensor; using namespace fbgemm_gpu; template <typename index_t> __device__ void adjust_offset_kernel( index_t& indices_start, index_t& indices_end, const index_t num_indices, index_t* const offset_acc_start, index_t* const offset_acc_end) { indices_start = std::max(static_cast<index_t>(0), std::min(indices_start, num_indices)); indices_end = std::max(indices_start, std::min(indices_end, num_indices)); *offset_acc_start = indices_start; *offset_acc_end = indices_end; } template <typename index_t, bool vbe> __global__ __launch_bounds__(kMaxThreads) void bounds_check_indices_kernel( const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> rows_per_table, at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> indices, at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> offsets, const int32_t* const vbe_metadata, const int64_t bounds_check_mode_, at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> warning, FixedDivisor fd) { int32_t T = rows_per_table.size(0); int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y; int32_t b; int32_t t; int32_t B = 0; int32_t total_B = offsets.size(0) - 1; if (!vbe && b_t >= total_B) { return; } fd.DivMod(b_t, &t, &b); if (vbe) { // Check if t is valid if (t >= T) { return; } const auto B_start = vbe_metadata[t]; B = vbe_metadata[t + 1] - B_start; // Check if b is valid if (b >= B) { return; } // Update b_t value b_t = B_start + b; } else { B = total_B / T; } const auto bounds_check_mode = static_cast<BoundsCheckMode>(bounds_check_mode_); const auto num_rows = rows_per_table[t]; auto indices_start = offsets[b_t]; auto indices_end = offsets[b_t + 1]; const index_t num_indices = indices.size(0); if (bounds_check_mode == BoundsCheckMode::FATAL) { CUDA_KERNEL_ASSERT(indices_start >= 0); CUDA_KERNEL_ASSERT(indices_start <= indices_end); CUDA_KERNEL_ASSERT(indices_end <= num_indices); } else if (bounds_check_mode == BoundsCheckMode::WARNING) { if (indices_start < 0 || indices_start > indices_end || indices_end > num_indices) { if (gpuAtomicIncrement(&warning[0]) == 0) { printf( "EmbeddingBoundsCheck (VBE %s): (at least one) Out of bounds access for " "batch: %d, table: %d, indices_start: %lld, indices_end: %lld," " num_indices: %lld. Setting indices_start and indices_end within " "the range.\n", vbe ? "true" : "false", b, t, static_cast<int64_t>(indices_start), static_cast<int64_t>(indices_end), static_cast<int64_t>(num_indices)); } adjust_offset_kernel( indices_start, indices_end, num_indices, &offsets[b_t], &offsets[b_t + 1]); } } else if (bounds_check_mode == BoundsCheckMode::IGNORE) { adjust_offset_kernel( indices_start, indices_end, num_indices, &offsets[b_t], &offsets[b_t + 1]); } const auto L = indices_end - indices_start; for (index_t i = static_cast<index_t>(threadIdx.x); i < L; i += static_cast<index_t>(fbgemm_gpu::kWarpSize)) { const auto idx = indices[indices_start + i]; if (idx == -1) { // -1 indicates pruned rows. continue; } if (bounds_check_mode == BoundsCheckMode::FATAL) { CUDA_KERNEL_ASSERT(idx >= 0 && "Failed idx >= 0 in bounds_check_indices"); CUDA_KERNEL_ASSERT( idx < num_rows && "Failed idx < num_rows in bounds_check_indices"); } else if (bounds_check_mode == BoundsCheckMode::WARNING) { if (idx < 0 || idx >= num_rows) { if (gpuAtomicIncrement(&warning[0]) == 0) { printf( "EmbeddingBoundsCheck (VBE %s): (at least one) Out of bounds access for batch: %d, table: %d, bag element: %lld, idx: %lld, num_rows: %lld, indices_start: %lld, indices_end: %lld, T: %d, B: %d, b_t: %d. Setting idx to zero.\n", vbe ? "true" : "false", b, t, static_cast<int64_t>(i), static_cast<int64_t>(idx), num_rows, static_cast<int64_t>(indices_start), static_cast<int64_t>(indices_end), T, B, b_t); } indices[indices_start + i] = 0; } } else if (bounds_check_mode == BoundsCheckMode::IGNORE) { if (idx < 0 || idx >= num_rows) { indices[indices_start + i] = 0; } } } if (bounds_check_mode == BoundsCheckMode::FATAL) { CUDA_KERNEL_ASSERT(num_indices == offsets[total_B]); } else if (bounds_check_mode == BoundsCheckMode::WARNING) { if (num_indices != offsets[total_B]) { if (gpuAtomicIncrement(&warning[0]) == 0) { printf( "EmbeddingBoundsCheck (VBE %s): the last element in offsets is incorrect for " "total batch size %s: %d, total table num T: %d, " " last element in offsets: %lld, indices size: %lld. " " Setting the last element in offsets to be indices size.\n", vbe ? "true" : "false", vbe ? "total_B" : "B", vbe ? total_B : B, T, static_cast<int64_t>(offsets[total_B]), static_cast<int64_t>(num_indices)); } offsets[total_B] = num_indices; } } else if (bounds_check_mode == BoundsCheckMode::IGNORE) { if (num_indices != offsets[total_B]) { offsets[total_B] = num_indices; } } } void bounds_check_indices_cuda( Tensor& rows_per_table, Tensor& indices, Tensor& offsets, int64_t bounds_check_mode_, Tensor& warning, const c10::optional<Tensor>& weights, const c10::optional<Tensor>& vbe_metadata, const int64_t max_B) { TENSOR_ON_CUDA_GPU(rows_per_table); TENSOR_ON_CUDA_GPU(indices); TENSOR_ON_CUDA_GPU(offsets); TENSOR_ON_CUDA_GPU(warning); TENSOR_EMPTY_OR_ON_CUDA_GPU(weights); TENSOR_EMPTY_OR_ON_CUDA_GPU(vbe_metadata); at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(rows_per_table.get_device()); const int32_t T = rows_per_table.size(0); const int32_t total_B = offsets.size(0) - 1; const int32_t B = (total_B) / T; if (total_B == 0 || T == 0) { return; } const auto bounds_check_mode = static_cast<BoundsCheckMode>(bounds_check_mode_); if (bounds_check_mode == BoundsCheckMode::WARNING) { warning.zero_(); } const int64_t num_indices = indices.size(0); const auto vbe = vbe_metadata.has_value(); if (vbe) { TORCH_CHECK(max_B >= 0); } else { TORCH_CHECK( offsets.size(0) == B * T + 1, "offsets size " + std::to_string(offsets.size(0)) + " is not equal to B (" + std::to_string(B) + ") * T (" + std::to_string(T) + ") + 1"); } if (weights.has_value()) { TORCH_CHECK( weights.value().size(0) == num_indices, "weights size " + std::to_string(weights.value().size(0)) + " is not equal to indices size " + std::to_string(num_indices)); } constexpr size_t kNumThreads = 256; const auto max_B_ = vbe ? max_B : B; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "bounds_check_indices", [&] { const auto bounds_check_kernel = (vbe ? bounds_check_indices_kernel<index_t, true> : bounds_check_indices_kernel<index_t, false>); bounds_check_kernel<<< div_round_up(max_B_ * T, kNumThreads / fbgemm_gpu::kWarpSize), dim3(fbgemm_gpu::kWarpSize, kNumThreads / fbgemm_gpu::kWarpSize), 0, at::cuda::getCurrentCUDAStream()>>>( rows_per_table.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), vbe ? vbe_metadata.value().data_ptr<int32_t>() : nullptr, bounds_check_mode_, warning.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), FixedDivisor(max_B_)); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }
640e297b6b3d63fa17991b130a76ebe644ff002d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void subdiagMult(float *d_Subdiag, float *d_Data, float *d_Output, int shiftSubdiag, int shiftV, int N, int subdiagOffset) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; float *d_Src = d_Data + blockIdx.y * N; float *d_Dst = d_Output + blockIdx.y * N; float *d_Sub = d_Subdiag + blockIdx.y * subdiagOffset; // for (int pos = tid; pos < N; pos += numThreads) { if (pos < N) { d_Dst[pos] = d_Sub[(pos + shiftSubdiag + N) % N] * d_Src[(pos + shiftV + N) % N]; } } void subdiagMultGPU(float *d_Subdiag, float *d_Data, float *d_Output, int shiftSubdiag, int shiftV, int batchSize, int N, bool batchedSubdiag) { const int THREAD_N = 256; dim3 grid((N + THREAD_N - 1) / THREAD_N, batchSize); int subdiagOffset = batchedSubdiag ? N : 0; hipLaunchKernelGGL(( subdiagMult), dim3(grid), dim3(THREAD_N), 0, 0, d_Subdiag, d_Data, d_Output, shiftSubdiag, shiftV, N, subdiagOffset); }
640e297b6b3d63fa17991b130a76ebe644ff002d.cu
__global__ void subdiagMult(float *d_Subdiag, float *d_Data, float *d_Output, int shiftSubdiag, int shiftV, int N, int subdiagOffset) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; float *d_Src = d_Data + blockIdx.y * N; float *d_Dst = d_Output + blockIdx.y * N; float *d_Sub = d_Subdiag + blockIdx.y * subdiagOffset; // for (int pos = tid; pos < N; pos += numThreads) { if (pos < N) { d_Dst[pos] = d_Sub[(pos + shiftSubdiag + N) % N] * d_Src[(pos + shiftV + N) % N]; } } void subdiagMultGPU(float *d_Subdiag, float *d_Data, float *d_Output, int shiftSubdiag, int shiftV, int batchSize, int N, bool batchedSubdiag) { const int THREAD_N = 256; dim3 grid((N + THREAD_N - 1) / THREAD_N, batchSize); int subdiagOffset = batchedSubdiag ? N : 0; subdiagMult<<<grid, THREAD_N>>>(d_Subdiag, d_Data, d_Output, shiftSubdiag, shiftV, N, subdiagOffset); }
77ed71010f7d03610e38d9f1050b1ad77cd942af.hip
// !!! This is a file automatically generated by hipify!!! /** * @file pctdemo_life_cuda_texture.cu * @brief Example of implementing a stencil operation on the GPU using texture memory. * * This file contains both host and device code. The MEX function that calls this * code is in pctdemo_life_mex_texture.cu. * Three features of this file enable the use of texture memory in the device function. * (1) The texture reference is declared at the top of the MEX-file. * (2) The CUDA device function fetches the input from the texture reference. * (3) The MEX-file binds the texture reference to the input buffer. * * Copyright 2013 The MathWorks, Inc. */ #include <algorithm> #include <hip/hip_runtime_api.h> #include "pctdemo_life_texture.hpp" /** * (1) Declare the texture reference. * There is no texture reference to bool: we use uint8_T instead. */ texture<uint8_T, hipTextureType1D> texRef; /** * Calculate the proper value of the element at <row,col> on the * next board given the present board state. */ __device__ uint8_T gameOfLifeCalculation(unsigned int const row, unsigned int const col, unsigned int const N, size_t const offset) { // Work out the linear row and column indices for the previous/next // rows/columns, repeating the edge value at the boundary. unsigned int const top = (row > 0) ? row-1 : 0; unsigned int const middle = (row < N) ? row : N-1; unsigned int const bottom = (row < (N-1)) ? row+1 : N-1; unsigned int const left = (col > 0) ? (col-1)*N : 0; unsigned int const centre = (col < N) ? col*N : (N-1)*N; unsigned int const right = (col < (N-1)) ? (col+1)*N : (N-1)*N; // Work out if this cell should be alive // (2) Fetch the input from the texture reference. bool const alive = static_cast<bool>(tex1Dfetch(texRef, offset + middle + centre)); unsigned int const liveNeighbours = tex1Dfetch(texRef, offset + top + left) + tex1Dfetch(texRef, offset + middle + left) + tex1Dfetch(texRef, offset + bottom + left) + tex1Dfetch(texRef, offset + top + centre) + tex1Dfetch(texRef, offset + bottom + centre) + tex1Dfetch(texRef, offset + top + right) + tex1Dfetch(texRef, offset + middle + right) + tex1Dfetch(texRef, offset + bottom + right); // Finally, set the element of "newboard". return static_cast<uint8_T>((alive && (liveNeighbours == 2)) || (liveNeighbours == 3)); } /** * One generation of the game of life. Calculates "newboard" from "board". * Both arrays are of assumed to be size NxN. */ __global__ void life(uint8_T * const newboard, uint8_T const * const board, unsigned int const N, size_t const offset) { // Coordinates for this thread within the board unsigned int const row = blockIdx.x * blockDim.x + threadIdx.x; unsigned int const col = blockIdx.y * blockDim.y + threadIdx.y; // Only threads inside the grid need to compute a result if ((row < N) && (col < N)) { // Game of life stencil computation newboard[row+col*N] = gameOfLifeCalculation(row, col, N, static_cast<uint32_T>(offset)); } } /** * Host function called by MEX gateway. Sets up and calls the device function * for each generation. */ bool playGameOfLife(bool const * const pInitialBoard, uint8_T * const d_board1, uint8_T * const d_board2, int const boardDim, size_t const numGenerations) { // Choose a reasonably sized number of threads in each dimension for the block. int const threadsPerBlockEachDim = 16; // Copy the initial values from the host to the first workspace gpuArray. int const boardBytes = boardDim*boardDim*sizeof(uint8_T); hipMemcpy(d_board1, pInitialBoard, boardBytes, hipMemcpyHostToDevice); // Compute the thread block and grid sizes based on the board dimensions. int const blocksPerGridEachDim = (boardDim + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim; dim3 const dimBlock(blocksPerGridEachDim, blocksPerGridEachDim); dim3 const dimThread(threadsPerBlockEachDim, threadsPerBlockEachDim); // In each iteration, we treat one workspace as the input and one as the output. uint8_T * d_bufferIn = d_board1; uint8_T * d_bufferOut = d_board2; // If numGenerations = 0, the output is the initial value. bool boardOneIsCurrentOutput = true; for (size_t ix = 0; ix < numGenerations; ++ix) { // (3) Bind the texture reference to the input workspace. size_t offset; hipBindTexture(&offset, texRef, d_bufferIn, boardBytes); // Play one generation of the game. hipLaunchKernelGGL(( life), dim3(dimBlock), dim3(dimThread), 0, 0, d_bufferOut, d_bufferIn, boardDim, offset); // Swap the input and output workspace pointers for the next generation. std::swap(d_bufferOut, d_bufferIn); // Keep track of the current output workspace. boardOneIsCurrentOutput = !boardOneIsCurrentOutput; // Undo the current texture binding so we leave things in a good state // for the next loop iteration or upon exiting. hipUnbindTexture(texRef); } return boardOneIsCurrentOutput; }
77ed71010f7d03610e38d9f1050b1ad77cd942af.cu
/** * @file pctdemo_life_cuda_texture.cu * @brief Example of implementing a stencil operation on the GPU using texture memory. * * This file contains both host and device code. The MEX function that calls this * code is in pctdemo_life_mex_texture.cu. * Three features of this file enable the use of texture memory in the device function. * (1) The texture reference is declared at the top of the MEX-file. * (2) The CUDA device function fetches the input from the texture reference. * (3) The MEX-file binds the texture reference to the input buffer. * * Copyright 2013 The MathWorks, Inc. */ #include <algorithm> #include <cuda_runtime_api.h> #include "pctdemo_life_texture.hpp" /** * (1) Declare the texture reference. * There is no texture reference to bool: we use uint8_T instead. */ texture<uint8_T, cudaTextureType1D> texRef; /** * Calculate the proper value of the element at <row,col> on the * next board given the present board state. */ __device__ uint8_T gameOfLifeCalculation(unsigned int const row, unsigned int const col, unsigned int const N, size_t const offset) { // Work out the linear row and column indices for the previous/next // rows/columns, repeating the edge value at the boundary. unsigned int const top = (row > 0) ? row-1 : 0; unsigned int const middle = (row < N) ? row : N-1; unsigned int const bottom = (row < (N-1)) ? row+1 : N-1; unsigned int const left = (col > 0) ? (col-1)*N : 0; unsigned int const centre = (col < N) ? col*N : (N-1)*N; unsigned int const right = (col < (N-1)) ? (col+1)*N : (N-1)*N; // Work out if this cell should be alive // (2) Fetch the input from the texture reference. bool const alive = static_cast<bool>(tex1Dfetch(texRef, offset + middle + centre)); unsigned int const liveNeighbours = tex1Dfetch(texRef, offset + top + left) + tex1Dfetch(texRef, offset + middle + left) + tex1Dfetch(texRef, offset + bottom + left) + tex1Dfetch(texRef, offset + top + centre) + tex1Dfetch(texRef, offset + bottom + centre) + tex1Dfetch(texRef, offset + top + right) + tex1Dfetch(texRef, offset + middle + right) + tex1Dfetch(texRef, offset + bottom + right); // Finally, set the element of "newboard". return static_cast<uint8_T>((alive && (liveNeighbours == 2)) || (liveNeighbours == 3)); } /** * One generation of the game of life. Calculates "newboard" from "board". * Both arrays are of assumed to be size NxN. */ __global__ void life(uint8_T * const newboard, uint8_T const * const board, unsigned int const N, size_t const offset) { // Coordinates for this thread within the board unsigned int const row = blockIdx.x * blockDim.x + threadIdx.x; unsigned int const col = blockIdx.y * blockDim.y + threadIdx.y; // Only threads inside the grid need to compute a result if ((row < N) && (col < N)) { // Game of life stencil computation newboard[row+col*N] = gameOfLifeCalculation(row, col, N, static_cast<uint32_T>(offset)); } } /** * Host function called by MEX gateway. Sets up and calls the device function * for each generation. */ bool playGameOfLife(bool const * const pInitialBoard, uint8_T * const d_board1, uint8_T * const d_board2, int const boardDim, size_t const numGenerations) { // Choose a reasonably sized number of threads in each dimension for the block. int const threadsPerBlockEachDim = 16; // Copy the initial values from the host to the first workspace gpuArray. int const boardBytes = boardDim*boardDim*sizeof(uint8_T); cudaMemcpy(d_board1, pInitialBoard, boardBytes, cudaMemcpyHostToDevice); // Compute the thread block and grid sizes based on the board dimensions. int const blocksPerGridEachDim = (boardDim + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim; dim3 const dimBlock(blocksPerGridEachDim, blocksPerGridEachDim); dim3 const dimThread(threadsPerBlockEachDim, threadsPerBlockEachDim); // In each iteration, we treat one workspace as the input and one as the output. uint8_T * d_bufferIn = d_board1; uint8_T * d_bufferOut = d_board2; // If numGenerations = 0, the output is the initial value. bool boardOneIsCurrentOutput = true; for (size_t ix = 0; ix < numGenerations; ++ix) { // (3) Bind the texture reference to the input workspace. size_t offset; cudaBindTexture(&offset, texRef, d_bufferIn, boardBytes); // Play one generation of the game. life<<<dimBlock, dimThread>>>(d_bufferOut, d_bufferIn, boardDim, offset); // Swap the input and output workspace pointers for the next generation. std::swap(d_bufferOut, d_bufferIn); // Keep track of the current output workspace. boardOneIsCurrentOutput = !boardOneIsCurrentOutput; // Undo the current texture binding so we leave things in a good state // for the next loop iteration or upon exiting. cudaUnbindTexture(texRef); } return boardOneIsCurrentOutput; }
ac2065efcf37a4842adb2d242803a7b353057101.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //******************************************************************************************************************// // Copyright (c) 2021, University of North Carolina at Charlotte // and Lawrence Livermore National Security, LLC. // SPDX-License-Identifier: (BSD-3-Clause) //*****************************************************************************************************************// #include "axpy.h" #include"cuda_runtime.h" #include"device_launch_parameters.h" #include <stdio.h> texture<float, 1, hipReadModeElementType> rT1; __global__ void axpy_cudakernel_warmingup(REAL* x, REAL* y, int n, REAL a) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) y[i] += a*x[i]; } __global__ void axpy_cudakernel_1perThread_texture(REAL* y, int n, REAL a) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) y[i] += a * tex1Dfetch(rT1, i); } __global__ void axpy_cudakernel_1perThread(REAL* x, REAL* y, int n, REAL a) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) y[i] += a*x[i]; } void axpy_cuda(REAL* x, REAL* y, int n, REAL a) { REAL *d_x, *d_y; hipMalloc(&d_x, n*sizeof(REAL)); hipMalloc(&d_y, n*sizeof(REAL)); hipMemcpy(d_x, x, n*sizeof(REAL), hipMemcpyHostToDevice); hipMemcpy(d_y, y, n*sizeof(REAL), hipMemcpyHostToDevice); hipBindTexture(0, rT1, d_x); // Perform axpy elements hipLaunchKernelGGL(( axpy_cudakernel_warmingup), dim3((n+255)/256), dim3(256), 0, 0, d_x, d_y, n, a); hipDeviceSynchronize(); hipLaunchKernelGGL(( axpy_cudakernel_1perThread_texture), dim3((n+255)/256), dim3(256), 0, 0, d_y, n, a); hipDeviceSynchronize(); hipLaunchKernelGGL(( axpy_cudakernel_1perThread), dim3((n+255)/256), dim3(256), 0, 0, d_x, d_y, n, a); hipDeviceSynchronize(); hipMemcpy(y, d_y, n*sizeof(REAL), hipMemcpyDeviceToHost); hipUnbindTexture(rT1); hipFree(d_x); hipFree(d_y); }
ac2065efcf37a4842adb2d242803a7b353057101.cu
//******************************************************************************************************************// // Copyright (c) 2021, University of North Carolina at Charlotte // and Lawrence Livermore National Security, LLC. // SPDX-License-Identifier: (BSD-3-Clause) //*****************************************************************************************************************// #include "axpy.h" #include"cuda_runtime.h" #include"device_launch_parameters.h" #include <stdio.h> texture<float, 1, cudaReadModeElementType> rT1; __global__ void axpy_cudakernel_warmingup(REAL* x, REAL* y, int n, REAL a) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) y[i] += a*x[i]; } __global__ void axpy_cudakernel_1perThread_texture(REAL* y, int n, REAL a) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) y[i] += a * tex1Dfetch(rT1, i); } __global__ void axpy_cudakernel_1perThread(REAL* x, REAL* y, int n, REAL a) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) y[i] += a*x[i]; } void axpy_cuda(REAL* x, REAL* y, int n, REAL a) { REAL *d_x, *d_y; cudaMalloc(&d_x, n*sizeof(REAL)); cudaMalloc(&d_y, n*sizeof(REAL)); cudaMemcpy(d_x, x, n*sizeof(REAL), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, n*sizeof(REAL), cudaMemcpyHostToDevice); cudaBindTexture(0, rT1, d_x); // Perform axpy elements axpy_cudakernel_warmingup<<<(n+255)/256, 256>>>(d_x, d_y, n, a); cudaDeviceSynchronize(); axpy_cudakernel_1perThread_texture<<<(n+255)/256, 256>>>(d_y, n, a); cudaDeviceSynchronize(); axpy_cudakernel_1perThread<<<(n+255)/256, 256>>>(d_x, d_y, n, a); cudaDeviceSynchronize(); cudaMemcpy(y, d_y, n*sizeof(REAL), cudaMemcpyDeviceToHost); cudaUnbindTexture(rT1); cudaFree(d_x); cudaFree(d_y); }
21373d1b2c123e90a26b79f424524471fefa6e7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by wei on 17-10-26. // #include "primal_dual.h" #include <device_launch_parameters.h> #include <glog/logging.h> #include "geometry/primal_dual_query.h" __device__ inline float Huber(float x, float alpha) { return (fabsf(x) < alpha) ? 0.5f * (x*x)/alpha : (fabsf(x) - 0.5f*alpha); } __global__ void PrimalDualInitKernel( EntryArray candidate_entries, BlockArray blocks, HashTable hash_table, GeometryHelper geometry_helper ) { const HashEntry& entry = candidate_entries[blockIdx.x]; Block& block = blocks[entry.ptr]; PrimalDualVariables& primal_dual_variables = block.primal_dual_variables[threadIdx.x]; Voxel& voxel = block.voxels[threadIdx.x]; if (voxel.inv_sigma2 < EPSILON) return; int3 voxel_base_pos = geometry_helper.BlockToVoxel(entry.pos); uint3 offset = geometry_helper.DevectorizeIndex(threadIdx.x); int3 voxel_pos = voxel_base_pos + make_int3(offset); float3 gradient; GetInitSDFGradient(entry, voxel_pos, blocks, hash_table, geometry_helper, &gradient); // primal primal_dual_variables.Clear(); primal_dual_variables.inv_sigma2 = expf(length(gradient) / geometry_helper.voxel_size); primal_dual_variables.sdf0 = voxel.sdf; primal_dual_variables.mask = true; } __global__ /** * Primal dual: dual step * @param candidate_entries * @param blocks * @param hash_table * @param geometry_helper * @param lambda * @param sigma * @param tau */ void PrimalDualIteratePass1Kernel( EntryArray candidate_entries, BlockArray blocks, HashTable hash_table, GeometryHelper geometry_helper, float lambda, float sigma, float tau, float* err_data, float* err_tv ) { const float alpha = 0.02; const HashEntry &entry = candidate_entries[blockIdx.x]; Block& block = blocks[entry.ptr]; Voxel &voxel = block.voxels[threadIdx.x]; PrimalDualVariables &primal_dual_variable = block.primal_dual_variables[threadIdx.x]; if (voxel.inv_sigma2 < EPSILON) return; int3 voxel_base_pos = geometry_helper.BlockToVoxel(entry.pos); uint3 offset = geometry_helper.DevectorizeIndex(threadIdx.x); int3 voxel_pos = voxel_base_pos + make_int3(offset); // Compute error float data_diff = fabsf(voxel.sdf - primal_dual_variable.sdf0); data_diff *= data_diff; if (voxel.inv_sigma2 > EPSILON) { atomicAdd(err_data, data_diff); } float3 gradient; GetSDFGradient(entry, voxel_pos, blocks, hash_table, geometry_helper, &gradient); atomicAdd(err_tv, Huber(length(gradient), alpha)); // Dual step // p_{n+1} = prox_F* (p_{n} + \sigma \nabla x_bar{n}) // prox_F* (y) = \delta (y) (projection function) float3 primal_gradient; GetPrimalGradient( entry, voxel_pos, blocks, hash_table, geometry_helper, &primal_gradient ); //float tv_diff = primal_dual_variable.p = primal_dual_variable.p + sigma * primal_gradient; // huber primal_dual_variable.p /= (1 + sigma * alpha); primal_dual_variable.p /= fmaxf(1, length(primal_dual_variable.p)); } __global__ void PrimalDualIteratePass2Kernel( EntryArray candidate_entries, BlockArray blocks, HashTable hash_table, GeometryHelper geometry_helper, float lambda, float sigma, float tau ) { const HashEntry &entry = candidate_entries[blockIdx.x]; Block& block = blocks[entry.ptr]; Voxel &voxel = block.voxels[threadIdx.x]; PrimalDualVariables& primal_dual_variables = block.primal_dual_variables[threadIdx.x]; if (voxel.inv_sigma2 < EPSILON) return; int3 voxel_base_pos = geometry_helper.BlockToVoxel(entry.pos); uint3 offset = geometry_helper.DevectorizeIndex(threadIdx.x); int3 voxel_pos = voxel_base_pos + make_int3(offset); float voxel_sdf_prev = voxel.sdf; // Primal step // x_{n+1} = prox_G (x_{n} - \tau -Div p_{n+1}) // prox_G = (1 + \lambda y) / (1 + \lambda) float dual_divergence = 0; GetDualDivergence( entry, voxel_pos, blocks, hash_table, geometry_helper, &dual_divergence ); lambda *= primal_dual_variables.inv_sigma2; voxel.sdf = voxel.sdf - tau * dual_divergence; voxel.sdf = (voxel.sdf + lambda * tau * primal_dual_variables.sdf0) / (1 + lambda * tau); if (primal_dual_variables.inv_sigma2 > 2.0f) voxel.sdf = primal_dual_variables.sdf0; // Extrapolation primal_dual_variables.sdf_bar = 2 * voxel.sdf - voxel_sdf_prev; } void PrimalDualInit( EntryArray& candidate_entries, BlockArray& blocks, HashTable& hash_table, GeometryHelper& geometry_helper ) { const uint threads_per_block = BLOCK_SIZE; uint candidate_entry_count = candidate_entries.count(); if (candidate_entry_count <= 0) return; const dim3 grid_size(candidate_entry_count, 1); const dim3 block_size(threads_per_block, 1); hipLaunchKernelGGL(( PrimalDualInitKernel), dim3(grid_size), dim3(block_size), 0, 0, candidate_entries, blocks, hash_table, geometry_helper); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); } void PrimalDualIterate( EntryArray& candidate_entries, BlockArray& blocks, HashTable& hash_table, GeometryHelper& geometry_helper, const float lambda, const float sigma, const float tau ) { const uint threads_per_block = BLOCK_SIZE; uint candidate_entry_count = candidate_entries.count(); if (candidate_entry_count <= 0) return; const dim3 grid_size(candidate_entry_count, 1); const dim3 block_size(threads_per_block, 1); float* err_data, *err_tv; checkCudaErrors(hipMalloc(&err_data, sizeof(float))); checkCudaErrors(hipMemset(err_data, 0, sizeof(float))); checkCudaErrors(hipMalloc(&err_tv, sizeof(float))); checkCudaErrors(hipMemset(err_tv, 0, sizeof(float))); hipLaunchKernelGGL(( PrimalDualIteratePass1Kernel) , dim3(grid_size), dim3(block_size), 0, 0, candidate_entries, blocks, hash_table, geometry_helper, lambda, sigma, tau, err_data, err_tv); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); float err_data_cpu, err_tv_cpu; checkCudaErrors(hipMemcpy(&err_data_cpu, err_data, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&err_tv_cpu, err_tv, sizeof(float), hipMemcpyDeviceToHost)); std::cout << err_data_cpu * lambda / 2 + err_tv_cpu << " " << err_data_cpu << " " << err_tv_cpu << std::endl; hipLaunchKernelGGL(( PrimalDualIteratePass2Kernel) , dim3(grid_size), dim3(block_size), 0, 0, candidate_entries, blocks, hash_table, geometry_helper, lambda, sigma, tau); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); }
21373d1b2c123e90a26b79f424524471fefa6e7c.cu
// // Created by wei on 17-10-26. // #include "primal_dual.h" #include <device_launch_parameters.h> #include <glog/logging.h> #include "geometry/primal_dual_query.h" __device__ inline float Huber(float x, float alpha) { return (fabsf(x) < alpha) ? 0.5f * (x*x)/alpha : (fabsf(x) - 0.5f*alpha); } __global__ void PrimalDualInitKernel( EntryArray candidate_entries, BlockArray blocks, HashTable hash_table, GeometryHelper geometry_helper ) { const HashEntry& entry = candidate_entries[blockIdx.x]; Block& block = blocks[entry.ptr]; PrimalDualVariables& primal_dual_variables = block.primal_dual_variables[threadIdx.x]; Voxel& voxel = block.voxels[threadIdx.x]; if (voxel.inv_sigma2 < EPSILON) return; int3 voxel_base_pos = geometry_helper.BlockToVoxel(entry.pos); uint3 offset = geometry_helper.DevectorizeIndex(threadIdx.x); int3 voxel_pos = voxel_base_pos + make_int3(offset); float3 gradient; GetInitSDFGradient(entry, voxel_pos, blocks, hash_table, geometry_helper, &gradient); // primal primal_dual_variables.Clear(); primal_dual_variables.inv_sigma2 = expf(length(gradient) / geometry_helper.voxel_size); primal_dual_variables.sdf0 = voxel.sdf; primal_dual_variables.mask = true; } __global__ /** * Primal dual: dual step * @param candidate_entries * @param blocks * @param hash_table * @param geometry_helper * @param lambda * @param sigma * @param tau */ void PrimalDualIteratePass1Kernel( EntryArray candidate_entries, BlockArray blocks, HashTable hash_table, GeometryHelper geometry_helper, float lambda, float sigma, float tau, float* err_data, float* err_tv ) { const float alpha = 0.02; const HashEntry &entry = candidate_entries[blockIdx.x]; Block& block = blocks[entry.ptr]; Voxel &voxel = block.voxels[threadIdx.x]; PrimalDualVariables &primal_dual_variable = block.primal_dual_variables[threadIdx.x]; if (voxel.inv_sigma2 < EPSILON) return; int3 voxel_base_pos = geometry_helper.BlockToVoxel(entry.pos); uint3 offset = geometry_helper.DevectorizeIndex(threadIdx.x); int3 voxel_pos = voxel_base_pos + make_int3(offset); // Compute error float data_diff = fabsf(voxel.sdf - primal_dual_variable.sdf0); data_diff *= data_diff; if (voxel.inv_sigma2 > EPSILON) { atomicAdd(err_data, data_diff); } float3 gradient; GetSDFGradient(entry, voxel_pos, blocks, hash_table, geometry_helper, &gradient); atomicAdd(err_tv, Huber(length(gradient), alpha)); // Dual step // p_{n+1} = prox_F* (p_{n} + \sigma \nabla x_bar{n}) // prox_F* (y) = \delta (y) (projection function) float3 primal_gradient; GetPrimalGradient( entry, voxel_pos, blocks, hash_table, geometry_helper, &primal_gradient ); //float tv_diff = primal_dual_variable.p = primal_dual_variable.p + sigma * primal_gradient; // huber primal_dual_variable.p /= (1 + sigma * alpha); primal_dual_variable.p /= fmaxf(1, length(primal_dual_variable.p)); } __global__ void PrimalDualIteratePass2Kernel( EntryArray candidate_entries, BlockArray blocks, HashTable hash_table, GeometryHelper geometry_helper, float lambda, float sigma, float tau ) { const HashEntry &entry = candidate_entries[blockIdx.x]; Block& block = blocks[entry.ptr]; Voxel &voxel = block.voxels[threadIdx.x]; PrimalDualVariables& primal_dual_variables = block.primal_dual_variables[threadIdx.x]; if (voxel.inv_sigma2 < EPSILON) return; int3 voxel_base_pos = geometry_helper.BlockToVoxel(entry.pos); uint3 offset = geometry_helper.DevectorizeIndex(threadIdx.x); int3 voxel_pos = voxel_base_pos + make_int3(offset); float voxel_sdf_prev = voxel.sdf; // Primal step // x_{n+1} = prox_G (x_{n} - \tau -Div p_{n+1}) // prox_G = (1 + \lambda y) / (1 + \lambda) float dual_divergence = 0; GetDualDivergence( entry, voxel_pos, blocks, hash_table, geometry_helper, &dual_divergence ); lambda *= primal_dual_variables.inv_sigma2; voxel.sdf = voxel.sdf - tau * dual_divergence; voxel.sdf = (voxel.sdf + lambda * tau * primal_dual_variables.sdf0) / (1 + lambda * tau); if (primal_dual_variables.inv_sigma2 > 2.0f) voxel.sdf = primal_dual_variables.sdf0; // Extrapolation primal_dual_variables.sdf_bar = 2 * voxel.sdf - voxel_sdf_prev; } void PrimalDualInit( EntryArray& candidate_entries, BlockArray& blocks, HashTable& hash_table, GeometryHelper& geometry_helper ) { const uint threads_per_block = BLOCK_SIZE; uint candidate_entry_count = candidate_entries.count(); if (candidate_entry_count <= 0) return; const dim3 grid_size(candidate_entry_count, 1); const dim3 block_size(threads_per_block, 1); PrimalDualInitKernel<<<grid_size, block_size>>> (candidate_entries, blocks, hash_table, geometry_helper); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); } void PrimalDualIterate( EntryArray& candidate_entries, BlockArray& blocks, HashTable& hash_table, GeometryHelper& geometry_helper, const float lambda, const float sigma, const float tau ) { const uint threads_per_block = BLOCK_SIZE; uint candidate_entry_count = candidate_entries.count(); if (candidate_entry_count <= 0) return; const dim3 grid_size(candidate_entry_count, 1); const dim3 block_size(threads_per_block, 1); float* err_data, *err_tv; checkCudaErrors(cudaMalloc(&err_data, sizeof(float))); checkCudaErrors(cudaMemset(err_data, 0, sizeof(float))); checkCudaErrors(cudaMalloc(&err_tv, sizeof(float))); checkCudaErrors(cudaMemset(err_tv, 0, sizeof(float))); PrimalDualIteratePass1Kernel <<<grid_size, block_size>>> ( candidate_entries, blocks, hash_table, geometry_helper, lambda, sigma, tau, err_data, err_tv); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); float err_data_cpu, err_tv_cpu; checkCudaErrors(cudaMemcpy(&err_data_cpu, err_data, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&err_tv_cpu, err_tv, sizeof(float), cudaMemcpyDeviceToHost)); std::cout << err_data_cpu * lambda / 2 + err_tv_cpu << " " << err_data_cpu << " " << err_tv_cpu << std::endl; PrimalDualIteratePass2Kernel <<<grid_size, block_size>>> ( candidate_entries, blocks, hash_table, geometry_helper, lambda, sigma, tau); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); }
56fdf7fca1c698fc3e297581b154301d959ba320.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ double computeMatch(unsigned char *im, int im_step, unsigned char *bl, int bl_step, int bl_cols, int bl_rows, int oi, int oj, int stride){ if (!im || !bl) return 0.0; double nb = (bl_cols*bl_rows); double x = 0; for(int i = 0;i < bl_rows-stride+1;i+= stride){ for(int j = 0;j < bl_cols-stride+1;j+= stride){ unsigned char v1 = im[INDXs(im_step,oi+i,oj+j)]; unsigned char v2 = bl[INDXs(bl_step,i,j)]; x += (v2-v1)*(v2-v1); //im[INDXs(im_step,oi+i,oj+j)] = ABS(v2-v1); } } x = x / nb; // printf("%f\n",x); return x; } __global__ void blockMatching_kernel( int jend,int stride,int im, int im_step, int bl_step,int bl_cols,int bl_rows){ int tid = blockDim.x * blockIdx.x + threadIdx.x; for(int j = 0;j < jend-stride+1;j+=stride){ double x = computeMatch(im,im_step, bl,bl_step,bl_cols,bl_rows, tid,j,stride); if(x < minVal){ minVal = x; coord_i_min = tid; coord_j_min = j; bestScale = s; bestRotation = r; } } }
56fdf7fca1c698fc3e297581b154301d959ba320.cu
__device__ double computeMatch(unsigned char *im, int im_step, unsigned char *bl, int bl_step, int bl_cols, int bl_rows, int oi, int oj, int stride){ if (!im || !bl) return 0.0; double nb = (bl_cols*bl_rows); double x = 0; for(int i = 0;i < bl_rows-stride+1;i+= stride){ for(int j = 0;j < bl_cols-stride+1;j+= stride){ unsigned char v1 = im[INDXs(im_step,oi+i,oj+j)]; unsigned char v2 = bl[INDXs(bl_step,i,j)]; x += (v2-v1)*(v2-v1); //im[INDXs(im_step,oi+i,oj+j)] = ABS(v2-v1); } } x = x / nb; // printf("%f\n",x); return x; } __global__ void blockMatching_kernel( int jend,int stride,int im, int im_step, int bl_step,int bl_cols,int bl_rows){ int tid = blockDim.x * blockIdx.x + threadIdx.x; for(int j = 0;j < jend-stride+1;j+=stride){ double x = computeMatch(im,im_step, bl,bl_step,bl_cols,bl_rows, tid,j,stride); if(x < minVal){ minVal = x; coord_i_min = tid; coord_j_min = j; bestScale = s; bestRotation = r; } } }
8cea1c26763401ce0a41b8706d0b57fdafa9e084.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "simOBEcudaCoPolTest.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *AC = NULL; hipMalloc(&AC, XSIZE*YSIZE); const double *Delaylist = NULL; hipMalloc(&Delaylist, XSIZE*YSIZE); const double w = 1; const double FWHM = 1; const double G1 = 1; const double G2 = 1; const double G3 = 1; const double t_min = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( simOBEcudaCoPolTest), dim3(gridBlock),dim3(threadBlock), 0, 0, AC,Delaylist,w,FWHM,G1,G2,G3,t_min); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( simOBEcudaCoPolTest), dim3(gridBlock),dim3(threadBlock), 0, 0, AC,Delaylist,w,FWHM,G1,G2,G3,t_min); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( simOBEcudaCoPolTest), dim3(gridBlock),dim3(threadBlock), 0, 0, AC,Delaylist,w,FWHM,G1,G2,G3,t_min); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8cea1c26763401ce0a41b8706d0b57fdafa9e084.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "simOBEcudaCoPolTest.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *AC = NULL; cudaMalloc(&AC, XSIZE*YSIZE); const double *Delaylist = NULL; cudaMalloc(&Delaylist, XSIZE*YSIZE); const double w = 1; const double FWHM = 1; const double G1 = 1; const double G2 = 1; const double G3 = 1; const double t_min = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); simOBEcudaCoPolTest<<<gridBlock,threadBlock>>>(AC,Delaylist,w,FWHM,G1,G2,G3,t_min); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { simOBEcudaCoPolTest<<<gridBlock,threadBlock>>>(AC,Delaylist,w,FWHM,G1,G2,G3,t_min); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { simOBEcudaCoPolTest<<<gridBlock,threadBlock>>>(AC,Delaylist,w,FWHM,G1,G2,G3,t_min); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8aadbdc89bb0882bc7910704866849864030a23e.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdio.h> #include <rocblas.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include "implicit/gpu/als.h" #include "implicit/gpu/dot.cuh" #include "implicit/gpu/utils.h" namespace implicit { namespace gpu { __global__ void bpr_update_kernel(int samples, unsigned int *random_likes, unsigned int *random_dislikes, int *itemids, int *userids, int *indptr, int factors, float *X, float *Y, float learning_rate, float reg, bool verify_negative_samples, int *stats) { extern __shared__ float shared_memory[]; float *temp = &shared_memory[0]; int correct = 0, skipped = 0; for (int i = blockIdx.x; i < samples; i += gridDim.x) { int liked_index = random_likes[i] % samples, disliked_index = random_dislikes[i] % samples; int userid = userids[liked_index], likedid = itemids[liked_index], dislikedid = itemids[disliked_index]; if (verify_negative_samples && thrust::binary_search(thrust::seq, &itemids[indptr[userid]], &itemids[indptr[userid + 1]], dislikedid)) { skipped += 1; continue; } float *user = &X[userid * factors], *liked = &Y[likedid * factors], *disliked = &Y[dislikedid * factors]; float user_val = user[threadIdx.x], liked_val = liked[threadIdx.x], disliked_val = disliked[threadIdx.x]; float score = dot(user_val, liked_val - disliked_val, temp); float z = 1.0 / (1.0 + exp(score)); if (z < .5) correct++; liked[threadIdx.x] += learning_rate * (z * user_val - reg * liked_val); disliked[threadIdx.x] += learning_rate * (-z * user_val - reg * disliked_val); // We're storing the item bias in the last column of the matrix - with the // user = 1 in that column. Don't update the user value in that case if (threadIdx.x < factors) { user[threadIdx.x] += learning_rate * (z * (liked_val - disliked_val) - reg * user_val); } } if (threadIdx.x == 0) { atomicAdd(stats, correct); atomicAdd(stats + 1, skipped); } } std::pair<int, int> bpr_update(const Vector<int> &userids, const Vector<int> &itemids, const Vector<int> &indptr, Matrix *X, Matrix *Y, float learning_rate, float reg, long seed, bool verify_negative_samples) { if (X->cols != Y->cols) throw std::invalid_argument( "X and Y should have the same number of columns"); if (userids.size != itemids.size) throw std::invalid_argument( "userids and itemids should have same number of elements"); // todo: check indptr = X->rows + 1 int nonzeros = userids.size; // allocate some memory int *stats; CHECK_CUDA(hipMalloc(&stats, sizeof(int) * 2)); CHECK_CUDA(hipMemset(stats, 0, sizeof(int) * 2)); // initialize memory for randomly picked positive/negative items unsigned int *random_likes, *random_dislikes; CHECK_CUDA(hipMalloc(&random_likes, nonzeros * sizeof(unsigned int))); CHECK_CUDA(hipMalloc(&random_dislikes, nonzeros * sizeof(unsigned int))); // Create a seeded RNG hiprandGenerator_t rng; CHECK_CURAND(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT)); CHECK_CURAND(hiprandSetPseudoRandomGeneratorSeed(rng, seed)); // Randomly pick values CHECK_CURAND(hiprandGenerate(rng, random_likes, nonzeros)); CHECK_CURAND(hiprandGenerate(rng, random_dislikes, nonzeros)); // TODO: multi-gpu support int devId; CHECK_CUDA(hipGetDevice(&devId)); int multiprocessor_count; CHECK_CUDA(hipDeviceGetAttribute(&multiprocessor_count, hipDeviceAttributeMultiprocessorCount, devId)); int factors = X->cols; int block_count = 256 * multiprocessor_count; int thread_count = factors; int shared_memory_size = sizeof(float) * (factors); // TODO: get rows passed in here hipLaunchKernelGGL(( bpr_update_kernel), dim3(block_count), dim3(thread_count), shared_memory_size, 0, nonzeros, random_likes, random_dislikes, itemids.data, userids.data, indptr.data, factors, X->data, Y->data, learning_rate, reg, verify_negative_samples, stats); CHECK_CUDA(hipDeviceSynchronize()); // we're returning the number of correctly ranked items, get that value from // the device int output[2]; CHECK_CUDA( hipMemcpy(output, stats, 2 * sizeof(int), hipMemcpyDeviceToHost)); CHECK_CUDA(hipFree(random_likes)); CHECK_CUDA(hipFree(random_dislikes)); CHECK_CUDA(hipFree(stats)); hiprandDestroyGenerator(rng); return std::make_pair(output[0], output[1]); } } // namespace gpu } // namespace implicit
8aadbdc89bb0882bc7910704866849864030a23e.cu
#include <math.h> #include <stdio.h> #include <cublas_v2.h> #include <cuda_runtime.h> #include <curand.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include "implicit/gpu/als.h" #include "implicit/gpu/dot.cuh" #include "implicit/gpu/utils.h" namespace implicit { namespace gpu { __global__ void bpr_update_kernel(int samples, unsigned int *random_likes, unsigned int *random_dislikes, int *itemids, int *userids, int *indptr, int factors, float *X, float *Y, float learning_rate, float reg, bool verify_negative_samples, int *stats) { extern __shared__ float shared_memory[]; float *temp = &shared_memory[0]; int correct = 0, skipped = 0; for (int i = blockIdx.x; i < samples; i += gridDim.x) { int liked_index = random_likes[i] % samples, disliked_index = random_dislikes[i] % samples; int userid = userids[liked_index], likedid = itemids[liked_index], dislikedid = itemids[disliked_index]; if (verify_negative_samples && thrust::binary_search(thrust::seq, &itemids[indptr[userid]], &itemids[indptr[userid + 1]], dislikedid)) { skipped += 1; continue; } float *user = &X[userid * factors], *liked = &Y[likedid * factors], *disliked = &Y[dislikedid * factors]; float user_val = user[threadIdx.x], liked_val = liked[threadIdx.x], disliked_val = disliked[threadIdx.x]; float score = dot(user_val, liked_val - disliked_val, temp); float z = 1.0 / (1.0 + exp(score)); if (z < .5) correct++; liked[threadIdx.x] += learning_rate * (z * user_val - reg * liked_val); disliked[threadIdx.x] += learning_rate * (-z * user_val - reg * disliked_val); // We're storing the item bias in the last column of the matrix - with the // user = 1 in that column. Don't update the user value in that case if (threadIdx.x < factors) { user[threadIdx.x] += learning_rate * (z * (liked_val - disliked_val) - reg * user_val); } } if (threadIdx.x == 0) { atomicAdd(stats, correct); atomicAdd(stats + 1, skipped); } } std::pair<int, int> bpr_update(const Vector<int> &userids, const Vector<int> &itemids, const Vector<int> &indptr, Matrix *X, Matrix *Y, float learning_rate, float reg, long seed, bool verify_negative_samples) { if (X->cols != Y->cols) throw std::invalid_argument( "X and Y should have the same number of columns"); if (userids.size != itemids.size) throw std::invalid_argument( "userids and itemids should have same number of elements"); // todo: check indptr = X->rows + 1 int nonzeros = userids.size; // allocate some memory int *stats; CHECK_CUDA(cudaMalloc(&stats, sizeof(int) * 2)); CHECK_CUDA(cudaMemset(stats, 0, sizeof(int) * 2)); // initialize memory for randomly picked positive/negative items unsigned int *random_likes, *random_dislikes; CHECK_CUDA(cudaMalloc(&random_likes, nonzeros * sizeof(unsigned int))); CHECK_CUDA(cudaMalloc(&random_dislikes, nonzeros * sizeof(unsigned int))); // Create a seeded RNG curandGenerator_t rng; CHECK_CURAND(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(rng, seed)); // Randomly pick values CHECK_CURAND(curandGenerate(rng, random_likes, nonzeros)); CHECK_CURAND(curandGenerate(rng, random_dislikes, nonzeros)); // TODO: multi-gpu support int devId; CHECK_CUDA(cudaGetDevice(&devId)); int multiprocessor_count; CHECK_CUDA(cudaDeviceGetAttribute(&multiprocessor_count, cudaDevAttrMultiProcessorCount, devId)); int factors = X->cols; int block_count = 256 * multiprocessor_count; int thread_count = factors; int shared_memory_size = sizeof(float) * (factors); // TODO: get rows passed in here bpr_update_kernel<<<block_count, thread_count, shared_memory_size>>>( nonzeros, random_likes, random_dislikes, itemids.data, userids.data, indptr.data, factors, X->data, Y->data, learning_rate, reg, verify_negative_samples, stats); CHECK_CUDA(cudaDeviceSynchronize()); // we're returning the number of correctly ranked items, get that value from // the device int output[2]; CHECK_CUDA( cudaMemcpy(output, stats, 2 * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA(cudaFree(random_likes)); CHECK_CUDA(cudaFree(random_dislikes)); CHECK_CUDA(cudaFree(stats)); curandDestroyGenerator(rng); return std::make_pair(output[0], output[1]); } } // namespace gpu } // namespace implicit
b61aaaf53e41ff3d1b40be2101a9e4a1770df650.hip
// !!! This is a file automatically generated by hipify!!! /** MIT License Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdint.h> #include <stdio.h> #include <string.h> inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); } __global__ void gpuYoloLayerV3(const float* input, float* output, const uint gridSizeW, const uint gridSizeH, const uint numOutputClasses, const uint numBBoxes) { uint x_id = blockIdx.x * blockDim.x + threadIdx.x; uint y_id = blockIdx.y * blockDim.y + threadIdx.y; uint z_id = blockIdx.z * blockDim.z + threadIdx.z; if ((x_id >= gridSizeW) || (y_id >= gridSizeH) || (z_id >= numBBoxes)) { return; } const int numGridCells = gridSizeW * gridSizeH; const int bbindex = y_id * gridSizeW + x_id; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]); for (uint i = 0; i < numOutputClasses; ++i) { output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]); } } hipError_t cudaYoloLayerV3(const void* input, void* output, const uint& batchSize, const uint& gridSizeW, const uint& gridSizeH, const uint& numOutputClasses, const uint& numBBoxes, uint64_t outputSize, hipStream_t stream) { dim3 threads_per_block(16, 16, 4); dim3 number_of_blocks((gridSizeW / threads_per_block.x) + 1, (gridSizeH / threads_per_block.y) + 1, (numBBoxes / threads_per_block.z) + 1); for (int batch = 0; batch < batchSize; ++batch) { hipLaunchKernelGGL(( gpuYoloLayerV3), dim3(number_of_blocks), dim3(threads_per_block), 0, stream, reinterpret_cast<const float*>(input) + (batch * outputSize), reinterpret_cast<float*>(output) + (batch * outputSize), gridSizeW, gridSizeH, numOutputClasses, numBBoxes); } return hipGetLastError(); }
b61aaaf53e41ff3d1b40be2101a9e4a1770df650.cu
/** MIT License Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <cuda.h> #include <cuda_runtime.h> #include <stdint.h> #include <stdio.h> #include <string.h> inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); } __global__ void gpuYoloLayerV3(const float* input, float* output, const uint gridSizeW, const uint gridSizeH, const uint numOutputClasses, const uint numBBoxes) { uint x_id = blockIdx.x * blockDim.x + threadIdx.x; uint y_id = blockIdx.y * blockDim.y + threadIdx.y; uint z_id = blockIdx.z * blockDim.z + threadIdx.z; if ((x_id >= gridSizeW) || (y_id >= gridSizeH) || (z_id >= numBBoxes)) { return; } const int numGridCells = gridSizeW * gridSizeH; const int bbindex = y_id * gridSizeW + x_id; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]); for (uint i = 0; i < numOutputClasses; ++i) { output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]); } } cudaError_t cudaYoloLayerV3(const void* input, void* output, const uint& batchSize, const uint& gridSizeW, const uint& gridSizeH, const uint& numOutputClasses, const uint& numBBoxes, uint64_t outputSize, cudaStream_t stream) { dim3 threads_per_block(16, 16, 4); dim3 number_of_blocks((gridSizeW / threads_per_block.x) + 1, (gridSizeH / threads_per_block.y) + 1, (numBBoxes / threads_per_block.z) + 1); for (int batch = 0; batch < batchSize; ++batch) { gpuYoloLayerV3<<<number_of_blocks, threads_per_block, 0, stream>>>( reinterpret_cast<const float*>(input) + (batch * outputSize), reinterpret_cast<float*>(output) + (batch * outputSize), gridSizeW, gridSizeH, numOutputClasses, numBBoxes); } return cudaGetLastError(); }
153c813d5fb4ad9de500df3014c2a72cbb1f803c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ================================================================ * * PyCA Project * * Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All * rights reserved. See Copyright.txt or for details. * * This software is distributed WITHOUT ANY WARRANTY; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the above copyright notice for more information. * * ================================================================ */ #include "GFluidKernelFFTKernels.h" #include <pycaUtils.h> // TEST make sure boost isn't included in nvcc code #if defined(BOOST_COMPILER) int bla[-1]; #endif namespace PyCA { // ================================================================ // Begin Device Code // ================================================================ __device__ __constant__ float sdCosWX[MAX_FFT_TABLE_LENGTH]; __device__ __constant__ float sdSinWX[MAX_FFT_TABLE_LENGTH]; __device__ __constant__ float sdCosWY[MAX_FFT_TABLE_LENGTH]; __device__ __constant__ float sdSinWY[MAX_FFT_TABLE_LENGTH]; __device__ __constant__ float sdCosWZ[MAX_FFT_TABLE_LENGTH]; __device__ __constant__ float sdSinWZ[MAX_FFT_TABLE_LENGTH]; template<class T> __device__ void InverseOperatorMultiply(ComplexT<T>& bX, ComplexT<T>& bY, ComplexT<T>& bZ, T L00, T L10, T L11, T L20, T L21, T L22) { T G00; T G10, G11; T G20, G21, G22; T y0, y1, y2; // // Given that A is pos-def symetric matrix, solve Ax=b by finding // cholesky decomposition GG'=A // and then performing 2 back-solves, Gy=b and then G'x=y to get x. // // 1. find cholesky decomposition by finding G such that GG'=A. // A must be positive definite symetric (we assume that here) // G is then lower triangular, see algorithm 4.2.1 p142-3 // in Golub and VanLoan // Note: these are in matlab notation 1:3 // [ G(1,1) 0 0 ] [ G(1,1) G(2,1) G(3,1) ] // [ G(2,1) G(2,2) 0 ] * [ 0 G(2,2) G(3,2) ] = Amatrix // [ G(3,1) G(3,2) G(3,3) ] [ 0 0 G(3,3) ] T bRealX = bX.x; T bRealY = bY.x; T bRealZ = bZ.x; T bImagX = bX.y; T bImagY = bY.y; T bImagZ = bZ.y; T& vRealX = bX.x; T& vRealY = bY.x; T& vRealZ = bZ.x; T& vImagX = bX.y; T& vImagY = bY.y; T& vImagZ = bZ.y; G00 = sqrt(L00); G10 = L10 / G00; G20 = L20 / G00; G11 = L11 - G10 * G10; G21 = L21 - G20 * G10; G11 = sqrt(G11); G21 = G21 / G11; G22 = L22 - (G20*G20 + G21*G21); G22 = sqrt(G22); // back-solve Gy=b to get a temporary vector y // back-solve G'x=y to get answer in x // // Note: these are in matlab notation 1:3 // [ G(1,1) 0 0 ] [ y(1) ] = b(1) // [ G(2,1) G(2,2) 0 ] * [ y(2) ] = b(2) // [ G(3,1) G(3,2) G(3,3) ] [ y(3) ] = b(3) // // [ G(1,1) G(2,1) G(3,1) ] [ x(1) ] = y(1) // [ 0 G(2,2) G(3,2) ] * [ x(2) ] = y(2) // [ 0 0 G(3,3) ] [ x(3) ] = y(3) y0 = bRealX / G00; y1 = (bRealY - G10*y0) / G11; y2 = (bRealZ - G20*y0 - G21*y1) / G22; vRealZ = y2 / G22; vRealY = (y1 - G21*vRealZ) / G11; vRealX = (y0 - G10*vRealY - G20*vRealZ) / G00; y0 = bImagX / G00; y1 = (bImagY - G10*y0) / G11; y2 = (bImagZ - G20*y0 - G21*y1) / G22; vImagZ = y2 / G22; vImagY = (y1 - G21*vImagZ) / G11; vImagX = (y0 - G10*vImagY - G20*vImagZ) / G00; } //-------------------------------------------------------------------------------- // General Navier Stoker solver with beta is different than 0 // //-------------------------------------------------------------------------------- template<class T> __device__ void OperatorMultiply(ComplexT<T>& bX, ComplexT<T>& bY, ComplexT<T>& bZ, T L00, T L10, T L11, T L20, T L21, T L22) { T bRealX = bX.x; T bRealY = bY.x; T bRealZ = bZ.x; T bImagX = bX.y; T bImagY = bY.y; T bImagZ = bZ.y; T& vRealX = bX.x; T& vRealY = bY.x; T& vRealZ = bZ.x; T& vImagX = bX.y; T& vImagY = bY.y; T& vImagZ = bZ.y; vRealX = L00*bRealX + L10*bRealY + L20*bRealZ; vRealY = L10*bRealX + L11*bRealY + L21*bRealZ; vRealZ = L20*bRealX + L21*bRealY + L22*bRealZ; vImagX = L00*bImagX + L10*bImagY + L20*bImagZ; vImagY = L10*bImagX + L11*bImagY + L21*bImagZ; vImagZ = L20*bImagX + L21*bImagY + L22*bImagZ; } template<class T> __device__ inline void ProjectIncomp(ComplexT<T>& bX, ComplexT<T>& bY, ComplexT<T>& bZ, T sWXx, T sWYy, T sWZz) { // in Fourier space we project onto (-i*sin(u),-i*sin(v),-i*sin(w)) and remove that component // 2008 jdh T bRealX = bX.x; T bRealY = bY.x; T bRealZ = bZ.x; T bImagX = bX.y; T bImagY = bY.y; T bImagZ = bZ.y; T& vRealX = bX.x; T& vRealY = bY.x; T& vRealZ = bZ.x; T& vImagX = bX.y; T& vImagY = bY.y; T& vImagZ = bZ.y; T nsq = sWXx*sWXx + sWYy*sWYy + sWZz*sWZz; // norm squared of projection vector // S=(sinwx,sinwy,sinwz) // Real part of S dot V in Fourier T ReSdotV = ( bRealX*sWXx +bRealY*sWYy +bRealZ*sWZz); // Imag part of S dot V in Fourier T ImSdotV = ( bImagX*sWXx +bImagY*sWYy +bImagZ*sWZz); // Subtract S dot V (normalizing S) vRealX = bRealX - ReSdotV*sWXx/nsq; vRealY = bRealY - ReSdotV*sWYy/nsq; vRealZ = bRealZ - ReSdotV*sWZz/nsq; vImagX = bImagX - ImSdotV*sWXx/nsq; vImagY = bImagY - ImSdotV*sWYy/nsq; vImagZ = bImagZ - ImSdotV*sWZz/nsq; } template<bool inverseOp, bool incomp, class T> __global__ void fullNavierStokesSolver3D_C3_kernel(ComplexT<T>* bX, ComplexT<T>* bY, ComplexT<T>* bZ, const T alpha, const T beta, const T gamma, const int sizeX, const int sizeY, const int sizeZ) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; T lambda; T L00; T L10, L11; T L20, L21, L22; uint index = x + y * sizeX; uint planeSize = sizeX * sizeY; if ( x < sizeX && y < sizeY){ T wx = sdCosWX[x]; T wy = sdCosWY[y]; for (int z=0; z < sizeZ ; ++z, index+=planeSize){ // // compute L (it is symmetric, only need lower triangular part) // lambda = -alpha * (wx + wy + sdCosWZ[z]) + gamma; L00 = lambda - beta * sdCosWX[x]; L11 = lambda - beta * sdCosWY[y]; L22 = lambda - beta * sdCosWZ[z]; L10 = beta * sdSinWX[x] * sdSinWY[y]; L20 = beta * sdSinWX[x] * sdSinWZ[z]; L21 = beta * sdSinWY[y] * sdSinWZ[z]; if(inverseOp){ InverseOperatorMultiply<T>(bX[index], bY[index], bZ[index], L00, L10, L11, L20, L21, L22); }else{ OperatorMultiply<T>(bX[index], bY[index], bZ[index], L00, L10, L11, L20, L21, L22); } if(incomp){ if (index > 0) // do nothing with zero component ProjectIncomp<T>(bX[index], bY[index], bZ[index], sdSinWX[x], sdSinWY[y], sdSinWZ[z]); } } } } // ================================================================ // End Device Code // ================================================================ template<class T> void UploadFFTLookupTable3D (const T* cosWX, const T* cosWY, const T* cosWZ, const T* sinWX, const T* sinWY, const T* sinWZ, size_t sz_x, size_t sz_y, size_t sz_z) { hipMemcpyToSymbol(sdCosWX, cosWX, sz_x * sizeof(T)); hipMemcpyToSymbol(sdSinWX, sinWX, sz_x * sizeof(T)); hipMemcpyToSymbol(sdCosWY, cosWY, sz_y * sizeof(T)); hipMemcpyToSymbol(sdSinWY, sinWY, sz_y * sizeof(T)); hipMemcpyToSymbol(sdCosWZ, cosWZ, sz_z * sizeof(T)); hipMemcpyToSymbol(sdSinWZ, sinWZ, sz_z * sizeof(T)); } template<class T> void frequencyDomainApply(ComplexT<T>* fftX, ComplexT<T>* fftY, ComplexT<T>* fftZ, float alpha, float beta, float gamma, int cSz_x, int cSz_y, int cSz_z, bool inverseOp, bool divergenceFree, StreamT stream) { dim3 threads(16,16); dim3 grids(iDivUp(cSz_x, threads.x), iDivUp(cSz_y, threads.y)); if (inverseOp == true) { if (divergenceFree == true) { hipLaunchKernelGGL(( fullNavierStokesSolver3D_C3_kernel<true, true, T>) , dim3(grids), dim3(threads), 0, stream, fftX, fftY, fftZ, alpha, beta, gamma, cSz_x, cSz_y, cSz_z); } else { hipLaunchKernelGGL(( fullNavierStokesSolver3D_C3_kernel<true, false, T>) , dim3(grids), dim3(threads), 0, stream, fftX, fftY, fftZ, alpha, beta, gamma, cSz_x, cSz_y, cSz_z); } } else { if (divergenceFree == true) { hipLaunchKernelGGL(( fullNavierStokesSolver3D_C3_kernel<false, true, T>) , dim3(grids), dim3(threads), 0, stream, fftX, fftY, fftZ, alpha, beta, gamma, cSz_x, cSz_y, cSz_z); } else { hipLaunchKernelGGL(( fullNavierStokesSolver3D_C3_kernel<false, false, T>) , dim3(grids), dim3(threads), 0, stream, fftX, fftY, fftZ, alpha, beta, gamma, cSz_x, cSz_y, cSz_z); } } } // template instantiation template void UploadFFTLookupTable3D<float> (const float*, const float*, const float*, const float*, const float*, const float*, size_t, size_t, size_t); template void frequencyDomainApply<float> (ComplexT<float>*, ComplexT<float>*, ComplexT<float>*, float alpha, float beta, float gamma, int cSz_x, int cSz_y, int cSz_z, bool inverseOp, bool divergenceFree, StreamT stream); } // end namespace PyCA
153c813d5fb4ad9de500df3014c2a72cbb1f803c.cu
/* ================================================================ * * PyCA Project * * Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All * rights reserved. See Copyright.txt or for details. * * This software is distributed WITHOUT ANY WARRANTY; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the above copyright notice for more information. * * ================================================================ */ #include "GFluidKernelFFTKernels.h" #include <pycaUtils.h> // TEST make sure boost isn't included in nvcc code #if defined(BOOST_COMPILER) int bla[-1]; #endif namespace PyCA { // ================================================================ // Begin Device Code // ================================================================ __device__ __constant__ float sdCosWX[MAX_FFT_TABLE_LENGTH]; __device__ __constant__ float sdSinWX[MAX_FFT_TABLE_LENGTH]; __device__ __constant__ float sdCosWY[MAX_FFT_TABLE_LENGTH]; __device__ __constant__ float sdSinWY[MAX_FFT_TABLE_LENGTH]; __device__ __constant__ float sdCosWZ[MAX_FFT_TABLE_LENGTH]; __device__ __constant__ float sdSinWZ[MAX_FFT_TABLE_LENGTH]; template<class T> __device__ void InverseOperatorMultiply(ComplexT<T>& bX, ComplexT<T>& bY, ComplexT<T>& bZ, T L00, T L10, T L11, T L20, T L21, T L22) { T G00; T G10, G11; T G20, G21, G22; T y0, y1, y2; // // Given that A is pos-def symetric matrix, solve Ax=b by finding // cholesky decomposition GG'=A // and then performing 2 back-solves, Gy=b and then G'x=y to get x. // // 1. find cholesky decomposition by finding G such that GG'=A. // A must be positive definite symetric (we assume that here) // G is then lower triangular, see algorithm 4.2.1 p142-3 // in Golub and VanLoan // Note: these are in matlab notation 1:3 // [ G(1,1) 0 0 ] [ G(1,1) G(2,1) G(3,1) ] // [ G(2,1) G(2,2) 0 ] * [ 0 G(2,2) G(3,2) ] = Amatrix // [ G(3,1) G(3,2) G(3,3) ] [ 0 0 G(3,3) ] T bRealX = bX.x; T bRealY = bY.x; T bRealZ = bZ.x; T bImagX = bX.y; T bImagY = bY.y; T bImagZ = bZ.y; T& vRealX = bX.x; T& vRealY = bY.x; T& vRealZ = bZ.x; T& vImagX = bX.y; T& vImagY = bY.y; T& vImagZ = bZ.y; G00 = sqrt(L00); G10 = L10 / G00; G20 = L20 / G00; G11 = L11 - G10 * G10; G21 = L21 - G20 * G10; G11 = sqrt(G11); G21 = G21 / G11; G22 = L22 - (G20*G20 + G21*G21); G22 = sqrt(G22); // back-solve Gy=b to get a temporary vector y // back-solve G'x=y to get answer in x // // Note: these are in matlab notation 1:3 // [ G(1,1) 0 0 ] [ y(1) ] = b(1) // [ G(2,1) G(2,2) 0 ] * [ y(2) ] = b(2) // [ G(3,1) G(3,2) G(3,3) ] [ y(3) ] = b(3) // // [ G(1,1) G(2,1) G(3,1) ] [ x(1) ] = y(1) // [ 0 G(2,2) G(3,2) ] * [ x(2) ] = y(2) // [ 0 0 G(3,3) ] [ x(3) ] = y(3) y0 = bRealX / G00; y1 = (bRealY - G10*y0) / G11; y2 = (bRealZ - G20*y0 - G21*y1) / G22; vRealZ = y2 / G22; vRealY = (y1 - G21*vRealZ) / G11; vRealX = (y0 - G10*vRealY - G20*vRealZ) / G00; y0 = bImagX / G00; y1 = (bImagY - G10*y0) / G11; y2 = (bImagZ - G20*y0 - G21*y1) / G22; vImagZ = y2 / G22; vImagY = (y1 - G21*vImagZ) / G11; vImagX = (y0 - G10*vImagY - G20*vImagZ) / G00; } //-------------------------------------------------------------------------------- // General Navier Stoker solver with beta is different than 0 // //-------------------------------------------------------------------------------- template<class T> __device__ void OperatorMultiply(ComplexT<T>& bX, ComplexT<T>& bY, ComplexT<T>& bZ, T L00, T L10, T L11, T L20, T L21, T L22) { T bRealX = bX.x; T bRealY = bY.x; T bRealZ = bZ.x; T bImagX = bX.y; T bImagY = bY.y; T bImagZ = bZ.y; T& vRealX = bX.x; T& vRealY = bY.x; T& vRealZ = bZ.x; T& vImagX = bX.y; T& vImagY = bY.y; T& vImagZ = bZ.y; vRealX = L00*bRealX + L10*bRealY + L20*bRealZ; vRealY = L10*bRealX + L11*bRealY + L21*bRealZ; vRealZ = L20*bRealX + L21*bRealY + L22*bRealZ; vImagX = L00*bImagX + L10*bImagY + L20*bImagZ; vImagY = L10*bImagX + L11*bImagY + L21*bImagZ; vImagZ = L20*bImagX + L21*bImagY + L22*bImagZ; } template<class T> __device__ inline void ProjectIncomp(ComplexT<T>& bX, ComplexT<T>& bY, ComplexT<T>& bZ, T sWXx, T sWYy, T sWZz) { // in Fourier space we project onto (-i*sin(u),-i*sin(v),-i*sin(w)) and remove that component // 2008 jdh T bRealX = bX.x; T bRealY = bY.x; T bRealZ = bZ.x; T bImagX = bX.y; T bImagY = bY.y; T bImagZ = bZ.y; T& vRealX = bX.x; T& vRealY = bY.x; T& vRealZ = bZ.x; T& vImagX = bX.y; T& vImagY = bY.y; T& vImagZ = bZ.y; T nsq = sWXx*sWXx + sWYy*sWYy + sWZz*sWZz; // norm squared of projection vector // S=(sinwx,sinwy,sinwz) // Real part of S dot V in Fourier T ReSdotV = ( bRealX*sWXx +bRealY*sWYy +bRealZ*sWZz); // Imag part of S dot V in Fourier T ImSdotV = ( bImagX*sWXx +bImagY*sWYy +bImagZ*sWZz); // Subtract S dot V (normalizing S) vRealX = bRealX - ReSdotV*sWXx/nsq; vRealY = bRealY - ReSdotV*sWYy/nsq; vRealZ = bRealZ - ReSdotV*sWZz/nsq; vImagX = bImagX - ImSdotV*sWXx/nsq; vImagY = bImagY - ImSdotV*sWYy/nsq; vImagZ = bImagZ - ImSdotV*sWZz/nsq; } template<bool inverseOp, bool incomp, class T> __global__ void fullNavierStokesSolver3D_C3_kernel(ComplexT<T>* bX, ComplexT<T>* bY, ComplexT<T>* bZ, const T alpha, const T beta, const T gamma, const int sizeX, const int sizeY, const int sizeZ) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; T lambda; T L00; T L10, L11; T L20, L21, L22; uint index = x + y * sizeX; uint planeSize = sizeX * sizeY; if ( x < sizeX && y < sizeY){ T wx = sdCosWX[x]; T wy = sdCosWY[y]; for (int z=0; z < sizeZ ; ++z, index+=planeSize){ // // compute L (it is symmetric, only need lower triangular part) // lambda = -alpha * (wx + wy + sdCosWZ[z]) + gamma; L00 = lambda - beta * sdCosWX[x]; L11 = lambda - beta * sdCosWY[y]; L22 = lambda - beta * sdCosWZ[z]; L10 = beta * sdSinWX[x] * sdSinWY[y]; L20 = beta * sdSinWX[x] * sdSinWZ[z]; L21 = beta * sdSinWY[y] * sdSinWZ[z]; if(inverseOp){ InverseOperatorMultiply<T>(bX[index], bY[index], bZ[index], L00, L10, L11, L20, L21, L22); }else{ OperatorMultiply<T>(bX[index], bY[index], bZ[index], L00, L10, L11, L20, L21, L22); } if(incomp){ if (index > 0) // do nothing with zero component ProjectIncomp<T>(bX[index], bY[index], bZ[index], sdSinWX[x], sdSinWY[y], sdSinWZ[z]); } } } } // ================================================================ // End Device Code // ================================================================ template<class T> void UploadFFTLookupTable3D (const T* cosWX, const T* cosWY, const T* cosWZ, const T* sinWX, const T* sinWY, const T* sinWZ, size_t sz_x, size_t sz_y, size_t sz_z) { cudaMemcpyToSymbol(sdCosWX, cosWX, sz_x * sizeof(T)); cudaMemcpyToSymbol(sdSinWX, sinWX, sz_x * sizeof(T)); cudaMemcpyToSymbol(sdCosWY, cosWY, sz_y * sizeof(T)); cudaMemcpyToSymbol(sdSinWY, sinWY, sz_y * sizeof(T)); cudaMemcpyToSymbol(sdCosWZ, cosWZ, sz_z * sizeof(T)); cudaMemcpyToSymbol(sdSinWZ, sinWZ, sz_z * sizeof(T)); } template<class T> void frequencyDomainApply(ComplexT<T>* fftX, ComplexT<T>* fftY, ComplexT<T>* fftZ, float alpha, float beta, float gamma, int cSz_x, int cSz_y, int cSz_z, bool inverseOp, bool divergenceFree, StreamT stream) { dim3 threads(16,16); dim3 grids(iDivUp(cSz_x, threads.x), iDivUp(cSz_y, threads.y)); if (inverseOp == true) { if (divergenceFree == true) { fullNavierStokesSolver3D_C3_kernel<true, true, T> <<<grids, threads, 0, stream>>> (fftX, fftY, fftZ, alpha, beta, gamma, cSz_x, cSz_y, cSz_z); } else { fullNavierStokesSolver3D_C3_kernel<true, false, T> <<<grids, threads, 0, stream>>> (fftX, fftY, fftZ, alpha, beta, gamma, cSz_x, cSz_y, cSz_z); } } else { if (divergenceFree == true) { fullNavierStokesSolver3D_C3_kernel<false, true, T> <<<grids, threads, 0, stream>>> (fftX, fftY, fftZ, alpha, beta, gamma, cSz_x, cSz_y, cSz_z); } else { fullNavierStokesSolver3D_C3_kernel<false, false, T> <<<grids, threads, 0, stream>>> (fftX, fftY, fftZ, alpha, beta, gamma, cSz_x, cSz_y, cSz_z); } } } // template instantiation template void UploadFFTLookupTable3D<float> (const float*, const float*, const float*, const float*, const float*, const float*, size_t, size_t, size_t); template void frequencyDomainApply<float> (ComplexT<float>*, ComplexT<float>*, ComplexT<float>*, float alpha, float beta, float gamma, int cSz_x, int cSz_y, int cSz_z, bool inverseOp, bool divergenceFree, StreamT stream); } // end namespace PyCA
c2b18e1d8b580fc9f4ad3991abc61151aecbf799.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> int main() { int deviceCount; hipDeviceProp_t devProp; hipGetDeviceCount(&deviceCount); printf("Found %d devices\n", deviceCount); for(int device = 0;device < deviceCount;device++) { hipGetDeviceProperties(&devProp, device); printf("Device %d\n", device); printf("Compute capability : %d.%d\n", devProp.major, devProp.minor); printf("Name : %s\n", devProp.name); printf("Total Global Memory : %zu\n", devProp.totalGlobalMem); printf("Shared memory per block : %zu\n", devProp.sharedMemPerBlock); printf("Registers per block : %d\n", devProp.regsPerBlock); printf("Warp size : %d\n", devProp.warpSize); printf("Max threads per block : (%d, %d, %d)\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]); printf("Max block : (%d, %d, %d)\n", devProp.maxGridSize[0], devProp.maxGridSize[1], devProp.maxGridSize[2]); printf("Total constant memory : %zu\n", devProp.totalConstMem); printf("Multiprocessors count : %d\n", devProp.multiProcessorCount); } return 0; }
c2b18e1d8b580fc9f4ad3991abc61151aecbf799.cu
#include <stdio.h> int main() { int deviceCount; cudaDeviceProp devProp; cudaGetDeviceCount(&deviceCount); printf("Found %d devices\n", deviceCount); for(int device = 0;device < deviceCount;device++) { cudaGetDeviceProperties(&devProp, device); printf("Device %d\n", device); printf("Compute capability : %d.%d\n", devProp.major, devProp.minor); printf("Name : %s\n", devProp.name); printf("Total Global Memory : %zu\n", devProp.totalGlobalMem); printf("Shared memory per block : %zu\n", devProp.sharedMemPerBlock); printf("Registers per block : %d\n", devProp.regsPerBlock); printf("Warp size : %d\n", devProp.warpSize); printf("Max threads per block : (%d, %d, %d)\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]); printf("Max block : (%d, %d, %d)\n", devProp.maxGridSize[0], devProp.maxGridSize[1], devProp.maxGridSize[2]); printf("Total constant memory : %zu\n", devProp.totalConstMem); printf("Multiprocessors count : %d\n", devProp.multiProcessorCount); } return 0; }
7628d9669370716fecf1a6f625f742e400e1642c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil_inline.h> #include "GpuMathKernel.cu" static const int max_thread = 2; static const int max_block = 3; void getDimension(int numElement, unsigned int *blockDimx, unsigned int *gridDimx, unsigned int *gridDimy) { int numthread = numElement>>2; if (*blockDimx > max_thread) { *blockDimx = max_thread; *gridDimx = (numthread-1)/max_thread + 1; if (*gridDimx > max_block) { *gridDimy = (*gridDimx - 1)/max_block + 1; *gridDimx = max_block; } } } extern "C" void omgSin(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); // printf("numthreads(%d) grid.x(%d), grid.y(%d), threads(%d)\n", numthread, grid.x, grid.y, threads.x); hipLaunchKernelGGL(( OmgSin_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgCos(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgCos_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgTan(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgTan_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgCot(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgCot_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgLog(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgLog_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgLog2(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgLog2_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgLog10(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgLog10_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgExp(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgExp_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgAbs(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgAbs_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgFix(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgFix_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgCeil(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgCeil_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgFloor(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgFloor_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); } extern "C" void omgRound(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); hipLaunchKernelGGL(( OmgRound_kernel), dim3(grid), dim3(threads), 0, 0, numElement, bufferin,bufferout); }
7628d9669370716fecf1a6f625f742e400e1642c.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil_inline.h> #include "GpuMathKernel.cu" static const int max_thread = 2; static const int max_block = 3; void getDimension(int numElement, unsigned int *blockDimx, unsigned int *gridDimx, unsigned int *gridDimy) { int numthread = numElement>>2; if (*blockDimx > max_thread) { *blockDimx = max_thread; *gridDimx = (numthread-1)/max_thread + 1; if (*gridDimx > max_block) { *gridDimy = (*gridDimx - 1)/max_block + 1; *gridDimx = max_block; } } } extern "C" void omgSin(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); // printf("numthreads(%d) grid.x(%d), grid.y(%d), threads(%d)\n", numthread, grid.x, grid.y, threads.x); OmgSin_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgCos(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgCos_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgTan(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgTan_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgCot(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgCot_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgLog(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgLog_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgLog2(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgLog2_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgLog10(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgLog10_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgExp(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgExp_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgAbs(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgAbs_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgFix(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgFix_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgCeil(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgCeil_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgFloor(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgFloor_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); } extern "C" void omgRound(int numElement, const float *bufferin, float *bufferout) { dim3 threads(1,1,1); dim3 grid(1,1,1); getDimension(numElement, &(threads.x), &(grid.x), &(grid.y)); OmgRound_kernel<<<grid, threads>>>(numElement, bufferin,bufferout); }
2548d91db7c920aee9b8ee10397d81856bae6334.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef TF32_LAUNCHER_CU #define TF32_LAUNCHER_CU #include "../cuda_kernels/dot_based_interact_tf32.cu" #include "../launchers/dot_based_interact_fp32_launcher.cu" inline void dotBasedInteractTF32Fwd(const void *input, const void *bottom_mlp_output, void *output, uint batch_size, uint num_rows, uint num_cols, hipStream_t stream) { const uint kWarpSize = 32; const uint kWarpSizeLog2 = Log2<kWarpSize>::value; const uint kTileLength = 16; const uint kTileLengthLog2 = Log2<kTileLength>::value; const uint kTileWidth = 8; const uint kTileWidthLog2 = Log2<kTileWidth>::value; const uint kWarpsPerBlock = 2; const uint kThreadBlockSize = kWarpsPerBlock * kWarpSize; const uint kRowTilesPerStep = 2; const uint kColTilesPerStep = 1; const uint kSkewFloat = 4; // Ensures we are 16 byte align as required by nvcuda::wmma::load_matrix_sync // num tiles uint mat_a_num_row_tiles = (num_rows + kTileLength - 1) >> kTileLengthLog2; uint mat_a_num_col_tiles = (num_cols + kTileWidth - 1) >> kTileWidthLog2; // const uint &mat_b_num_row_tiles = mat_a_num_col_tiles; // const uint &mat_b_num_col_tiles = mat_a_num_row_tiles; // number of rows and columns after padding uint num_rows_after_padding = mat_a_num_row_tiles << kTileLengthLog2; uint num_cols_after_padding = mat_a_num_col_tiles << kTileWidthLog2; uint num_row_steps = mat_a_num_row_tiles / kRowTilesPerStep; uint num_col_steps = mat_a_num_col_tiles / kColTilesPerStep; const uint smem_stride = num_cols_after_padding + kSkewFloat; const uint smem_elems_per_warp_mat = num_rows_after_padding * smem_stride; const uint smem_stride_acc = num_rows_after_padding + kSkewFloat; const uint smem_elems_per_warp_acc = num_rows_after_padding * smem_stride_acc; const uint smem_elems_per_warp = smem_elems_per_warp_mat > smem_elems_per_warp_acc ? smem_elems_per_warp_mat : smem_elems_per_warp_acc; uint raw_output_size = num_cols + ((num_rows * (num_rows - 1)) >> 1); uint output_size = ((raw_output_size-1)/8 + 1)*8; //round up to multiple of 8 uint padding_size = output_size-raw_output_size; bool float4_predicate = !((num_cols & 7) || (output_size & 7)); if (float4_predicate) { hipLaunchKernelGGL(( dotBasedInteractTF32FwdKernel<kWarpsPerBlock, kThreadBlockSize, kWarpSize, kWarpSizeLog2, kTileLength, kTileLengthLog2, kTileWidth, kTileWidthLog2, kRowTilesPerStep>) , dim3((batch_size + kWarpsPerBlock - 1) / kWarpsPerBlock), dim3(kThreadBlockSize), kWarpsPerBlock * smem_elems_per_warp * sizeof(float), stream, (const float *)input, (float *)output, batch_size, num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp, output_size, num_row_steps, num_col_steps, smem_stride, smem_stride_acc, padding_size); } else { // GENERIC VERSION IS UNFINISHED: Use FP32 instead for now dotBasedInteractFP32Fwd(input, bottom_mlp_output, output, batch_size, num_rows, num_cols, stream); } } inline void dotBasedInteractTF32Bwd(const void *input, const void *upstream_grad, void *grad, void *bottom_mlp_grad, uint batch_size, uint num_rows, uint num_cols, hipStream_t stream) { // Fragment Settings const uint kFragARows = 2; const uint kFragBCols = 2; const uint kTileLength = 16; const uint kTileLengthLog2 = Log2<kTileLength>::value; const uint kTileWidth = 8; const uint kTileWidthLog2 = Log2<kTileWidth>::value; const uint kWarpSize = 32; const uint kWarpSizeLog2 = Log2<kWarpSize>::value; const uint kSkewFloat = 4; const uint kWarpsPerBlock = 1; const uint kWarpsPerBlockLog2 = Log2<kWarpsPerBlock>::value; const uint kNumThreads = kWarpsPerBlock * kWarpSize; // num tiles uint mat_a_num_row_tiles = (num_rows + kTileLength - 1) >> kTileLengthLog2; uint mat_a_num_col_tiles = (num_rows + kTileWidth - 1) >> kTileWidthLog2; // const uint &mat_b_num_row_tiles = mat_a_num_col_tiles; uint mat_b_num_col_tiles = (num_cols + kTileLength - 1) >> kTileLengthLog2; // number of rows and columns after padding uint num_rows_after_padding = mat_a_num_row_tiles << kTileLengthLog2; uint num_cols_after_padding = mat_b_num_col_tiles << kTileLengthLog2; // 2D ugrad size and stride uint interaction_ugrad_2D_stride = num_rows_after_padding + kSkewFloat; uint interaction_ugrad_2D_size_elems = num_rows_after_padding * interaction_ugrad_2D_stride; // 1D ugrad size uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1; uint interaction_ugrad_size_with_padding = ((interaction_ugrad_size-1)/8 + 1)*8; // in_out place size and stride uint input_stride = num_cols_after_padding + kSkewFloat; uint input_size_elems = num_rows_after_padding * input_stride; // sample size uint sample_size = num_rows * num_cols; // output size uint output_size_elems = kTileLength * kTileLength * kFragARows * kFragBCols; // Shared memory size uint shared_mem_per_warp_size_elems = interaction_ugrad_2D_size_elems + input_size_elems + output_size_elems; uint shared_mem_size_elems = kWarpsPerBlock * shared_mem_per_warp_size_elems; uint shared_mem_size_bytes = shared_mem_size_elems * sizeof(float); uint num_blocks = (batch_size + kWarpsPerBlock - 1) >> kWarpsPerBlockLog2; uint num_k_steps = mat_a_num_col_tiles; uint num_n_steps = mat_b_num_col_tiles / kFragBCols; bool float4_predicate = !((interaction_ugrad_size_with_padding & 7) || (num_cols & 7)); if (float4_predicate) { hipLaunchKernelGGL(( dotBasedInteractTF32BwdKernel<kWarpsPerBlock, kNumThreads, kWarpSize, kWarpSizeLog2, kFragARows, kFragBCols, kTileLength, kTileLengthLog2, kTileWidth, kTileWidthLog2>) , dim3(num_blocks), dim3(kNumThreads), shared_mem_size_bytes, stream, (const float *)input, (const float *)upstream_grad, (float *)grad, (float *)bottom_mlp_grad, batch_size, num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size, interaction_ugrad_size, interaction_ugrad_size_with_padding, interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems, input_stride, shared_mem_per_warp_size_elems, num_k_steps, num_n_steps); } else { // GENERIC VERSION IS UNFINISHED: Use FP32 instead for now dotBasedInteractFP32Bwd(input, upstream_grad, grad, bottom_mlp_grad, batch_size, num_rows, num_cols, stream); } } #endif /* TF32_LAUNCHER_CU */
2548d91db7c920aee9b8ee10397d81856bae6334.cu
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef TF32_LAUNCHER_CU #define TF32_LAUNCHER_CU #include "../cuda_kernels/dot_based_interact_tf32.cu" #include "../launchers/dot_based_interact_fp32_launcher.cu" inline void dotBasedInteractTF32Fwd(const void *input, const void *bottom_mlp_output, void *output, uint batch_size, uint num_rows, uint num_cols, cudaStream_t stream) { const uint kWarpSize = 32; const uint kWarpSizeLog2 = Log2<kWarpSize>::value; const uint kTileLength = 16; const uint kTileLengthLog2 = Log2<kTileLength>::value; const uint kTileWidth = 8; const uint kTileWidthLog2 = Log2<kTileWidth>::value; const uint kWarpsPerBlock = 2; const uint kThreadBlockSize = kWarpsPerBlock * kWarpSize; const uint kRowTilesPerStep = 2; const uint kColTilesPerStep = 1; const uint kSkewFloat = 4; // Ensures we are 16 byte align as required by nvcuda::wmma::load_matrix_sync // num tiles uint mat_a_num_row_tiles = (num_rows + kTileLength - 1) >> kTileLengthLog2; uint mat_a_num_col_tiles = (num_cols + kTileWidth - 1) >> kTileWidthLog2; // const uint &mat_b_num_row_tiles = mat_a_num_col_tiles; // const uint &mat_b_num_col_tiles = mat_a_num_row_tiles; // number of rows and columns after padding uint num_rows_after_padding = mat_a_num_row_tiles << kTileLengthLog2; uint num_cols_after_padding = mat_a_num_col_tiles << kTileWidthLog2; uint num_row_steps = mat_a_num_row_tiles / kRowTilesPerStep; uint num_col_steps = mat_a_num_col_tiles / kColTilesPerStep; const uint smem_stride = num_cols_after_padding + kSkewFloat; const uint smem_elems_per_warp_mat = num_rows_after_padding * smem_stride; const uint smem_stride_acc = num_rows_after_padding + kSkewFloat; const uint smem_elems_per_warp_acc = num_rows_after_padding * smem_stride_acc; const uint smem_elems_per_warp = smem_elems_per_warp_mat > smem_elems_per_warp_acc ? smem_elems_per_warp_mat : smem_elems_per_warp_acc; uint raw_output_size = num_cols + ((num_rows * (num_rows - 1)) >> 1); uint output_size = ((raw_output_size-1)/8 + 1)*8; //round up to multiple of 8 uint padding_size = output_size-raw_output_size; bool float4_predicate = !((num_cols & 7) || (output_size & 7)); if (float4_predicate) { dotBasedInteractTF32FwdKernel<kWarpsPerBlock, kThreadBlockSize, kWarpSize, kWarpSizeLog2, kTileLength, kTileLengthLog2, kTileWidth, kTileWidthLog2, kRowTilesPerStep> <<<(batch_size + kWarpsPerBlock - 1) / kWarpsPerBlock, kThreadBlockSize, kWarpsPerBlock * smem_elems_per_warp * sizeof(float), stream>>>((const float *)input, (float *)output, batch_size, num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp, output_size, num_row_steps, num_col_steps, smem_stride, smem_stride_acc, padding_size); } else { // GENERIC VERSION IS UNFINISHED: Use FP32 instead for now dotBasedInteractFP32Fwd(input, bottom_mlp_output, output, batch_size, num_rows, num_cols, stream); } } inline void dotBasedInteractTF32Bwd(const void *input, const void *upstream_grad, void *grad, void *bottom_mlp_grad, uint batch_size, uint num_rows, uint num_cols, cudaStream_t stream) { // Fragment Settings const uint kFragARows = 2; const uint kFragBCols = 2; const uint kTileLength = 16; const uint kTileLengthLog2 = Log2<kTileLength>::value; const uint kTileWidth = 8; const uint kTileWidthLog2 = Log2<kTileWidth>::value; const uint kWarpSize = 32; const uint kWarpSizeLog2 = Log2<kWarpSize>::value; const uint kSkewFloat = 4; const uint kWarpsPerBlock = 1; const uint kWarpsPerBlockLog2 = Log2<kWarpsPerBlock>::value; const uint kNumThreads = kWarpsPerBlock * kWarpSize; // num tiles uint mat_a_num_row_tiles = (num_rows + kTileLength - 1) >> kTileLengthLog2; uint mat_a_num_col_tiles = (num_rows + kTileWidth - 1) >> kTileWidthLog2; // const uint &mat_b_num_row_tiles = mat_a_num_col_tiles; uint mat_b_num_col_tiles = (num_cols + kTileLength - 1) >> kTileLengthLog2; // number of rows and columns after padding uint num_rows_after_padding = mat_a_num_row_tiles << kTileLengthLog2; uint num_cols_after_padding = mat_b_num_col_tiles << kTileLengthLog2; // 2D ugrad size and stride uint interaction_ugrad_2D_stride = num_rows_after_padding + kSkewFloat; uint interaction_ugrad_2D_size_elems = num_rows_after_padding * interaction_ugrad_2D_stride; // 1D ugrad size uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1; uint interaction_ugrad_size_with_padding = ((interaction_ugrad_size-1)/8 + 1)*8; // in_out place size and stride uint input_stride = num_cols_after_padding + kSkewFloat; uint input_size_elems = num_rows_after_padding * input_stride; // sample size uint sample_size = num_rows * num_cols; // output size uint output_size_elems = kTileLength * kTileLength * kFragARows * kFragBCols; // Shared memory size uint shared_mem_per_warp_size_elems = interaction_ugrad_2D_size_elems + input_size_elems + output_size_elems; uint shared_mem_size_elems = kWarpsPerBlock * shared_mem_per_warp_size_elems; uint shared_mem_size_bytes = shared_mem_size_elems * sizeof(float); uint num_blocks = (batch_size + kWarpsPerBlock - 1) >> kWarpsPerBlockLog2; uint num_k_steps = mat_a_num_col_tiles; uint num_n_steps = mat_b_num_col_tiles / kFragBCols; bool float4_predicate = !((interaction_ugrad_size_with_padding & 7) || (num_cols & 7)); if (float4_predicate) { dotBasedInteractTF32BwdKernel<kWarpsPerBlock, kNumThreads, kWarpSize, kWarpSizeLog2, kFragARows, kFragBCols, kTileLength, kTileLengthLog2, kTileWidth, kTileWidthLog2> <<<num_blocks, kNumThreads, shared_mem_size_bytes, stream>>>((const float *)input, (const float *)upstream_grad, (float *)grad, (float *)bottom_mlp_grad, batch_size, num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size, interaction_ugrad_size, interaction_ugrad_size_with_padding, interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems, input_stride, shared_mem_per_warp_size_elems, num_k_steps, num_n_steps); } else { // GENERIC VERSION IS UNFINISHED: Use FP32 instead for now dotBasedInteractFP32Bwd(input, upstream_grad, grad, bottom_mlp_grad, batch_size, num_rows, num_cols, stream); } } #endif /* TF32_LAUNCHER_CU */
e6d9e8ae5911486d028e86c4e67bfabbb31ac470.hip
// !!! This is a file automatically generated by hipify!!! #include "dataset.cuh" DataSet::DataSet(std::string mnist_data_path, bool shuffle) : shuffle(shuffle), train_data_index(0), test_data_index(0) { // train data this->read_images(mnist_data_path + "/train-images-idx3-ubyte", this->train_data); this->read_labels(mnist_data_path + "/train-labels-idx1-ubyte", this->train_label); // test data this->read_images(mnist_data_path + "/t10k-images-idx3-ubyte", this->test_data); this->read_labels(mnist_data_path + "/t10k-labels-idx1-ubyte", this->test_label); } void DataSet::reset() { this->train_data_index = 0; this->test_data_index = 0; if (shuffle) { // keep random seed same unsigned int seed = std::chrono::system_clock::now().time_since_epoch().count() % 1234; std::shuffle(this->train_data.begin(), this->train_data.end(), std::default_random_engine(seed)); std::shuffle(this->train_label.begin(), this->train_label.end(), std::default_random_engine(seed)); } } void DataSet::forward(int batch_size, bool is_train) { if (is_train) { int start = this->train_data_index; int end = ::min(this->train_data_index + batch_size, (int)this->train_data.size()); this->train_data_index = end; int size = end - start; // init device memory std::vector<int> output_shape{size, 1, this->height, this->width}; std::vector<int> output_label_shape{size, 10}; INIT_STORAGE(this->output, output_shape); INIT_STORAGE(this->output_label, output_label_shape); thrust::fill(this->output_label->get_data().begin(), this->output_label->get_data().end(), 0); // copy to device memory int im_stride = 1 * this->height * this->width; int one_hot_stride = 10; thrust::host_vector< float, thrust::system::cuda::experimental::pinned_allocator<float>> train_data_buffer; train_data_buffer.reserve(size * im_stride); for (int i = start; i < end; i++) { train_data_buffer.insert(train_data_buffer.end(), this->train_data[i].begin(), this->train_data[i].end()); this->output_label ->get_data()[(i - start) * one_hot_stride + this->train_label[i]] = 1; } this->output->get_data() = train_data_buffer; } else { int start = this->test_data_index; int end = ::min(this->test_data_index + batch_size, (int)this->test_data.size()); this->test_data_index = end; int size = end - start; // init device memory std::vector<int> output_shape{size, 1, this->height, this->width}; std::vector<int> output_label_shape{size, 10}; INIT_STORAGE(this->output, output_shape); INIT_STORAGE(this->output_label, output_label_shape); thrust::fill(this->output_label->get_data().begin(), this->output_label->get_data().end(), 0); // copy to device memory int im_stride = 1 * this->height * this->width; int one_hot_stride = 10; thrust::host_vector< float, thrust::system::cuda::experimental::pinned_allocator<float>> test_data_buffer; test_data_buffer.reserve(size * im_stride); for (int i = start; i < end; i++) { test_data_buffer.insert(test_data_buffer.end(), this->test_data[i].begin(), this->test_data[i].end()); this->output_label ->get_data()[(i - start) * one_hot_stride + this->test_label[i]] = 1; } this->output->get_data() = test_data_buffer; } } bool DataSet::has_next(bool is_train) { if (is_train) { return this->train_data_index < this->train_data.size(); } else { return this->test_data_index < this->test_data.size(); } } void DataSet::print_im() { int size = this->output->get_shape()[0]; int im_stride = 1 * height * width; for (int k = 0; k < size; k++) { int max_pos = -1; float max_value = -FLT_MAX; for (int i = 0; i < 10; i++) { float val = this->output_label->get_data()[k * 10 + i]; if (val > max_value) { max_value = val; max_pos = i; } } std::cout << max_pos << std::endl; auto& data = this->output->get_data(); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { std::cout << (data[k * im_stride + i * width + j] > 0 ? "* " : " "); } std::cout << std::endl; } } } unsigned int DataSet::reverse_int(unsigned int i) { unsigned char ch1, ch2, ch3, ch4; ch1 = i & 255; ch2 = (i >> 8) & 255; ch3 = (i >> 16) & 255; ch4 = (i >> 24) & 255; return ((unsigned int)ch1 << 24) + ((unsigned int)ch2 << 16) + ((unsigned int)ch3 << 8) + ch4; } void DataSet::read_images(std::string file_name, std::vector<std::vector<float>>& output) { std::ifstream file(file_name, std::ios::binary); if (file.is_open()) { unsigned int magic_number = 0; unsigned int number_of_images = 0; unsigned int n_rows = 0; unsigned int n_cols = 0; file.read((char*)&magic_number, sizeof(magic_number)); file.read((char*)&number_of_images, sizeof(number_of_images)); file.read((char*)&n_rows, sizeof(n_rows)); file.read((char*)&n_cols, sizeof(n_cols)); magic_number = this->reverse_int(magic_number); number_of_images = this->reverse_int(number_of_images); n_rows = this->reverse_int(n_rows); n_cols = this->reverse_int(n_cols); this->height = n_rows; this->width = n_cols; std::vector<unsigned char> image(n_rows * n_cols); std::vector<float> normalized_image(n_rows * n_cols); for (int i = 0; i < number_of_images; i++) { file.read((char*)&image[0], sizeof(unsigned char) * n_rows * n_cols); for (int i = 0; i < n_rows * n_cols; i++) { normalized_image[i] = (float)image[i] / 255 - 0.5; } output.push_back(normalized_image); } } } void DataSet::read_labels(std::string file_name, std::vector<unsigned char>& output) { std::ifstream file(file_name, std::ios::binary); if (file.is_open()) { unsigned int magic_number = 0; unsigned int number_of_images = 0; file.read((char*)&magic_number, sizeof(magic_number)); file.read((char*)&number_of_images, sizeof(number_of_images)); magic_number = this->reverse_int(magic_number); number_of_images = this->reverse_int(number_of_images); for (int i = 0; i < number_of_images; i++) { unsigned char label = 0; file.read((char*)&label, sizeof(label)); output.push_back(label); } } }
e6d9e8ae5911486d028e86c4e67bfabbb31ac470.cu
#include "dataset.cuh" DataSet::DataSet(std::string mnist_data_path, bool shuffle) : shuffle(shuffle), train_data_index(0), test_data_index(0) { // train data this->read_images(mnist_data_path + "/train-images-idx3-ubyte", this->train_data); this->read_labels(mnist_data_path + "/train-labels-idx1-ubyte", this->train_label); // test data this->read_images(mnist_data_path + "/t10k-images-idx3-ubyte", this->test_data); this->read_labels(mnist_data_path + "/t10k-labels-idx1-ubyte", this->test_label); } void DataSet::reset() { this->train_data_index = 0; this->test_data_index = 0; if (shuffle) { // keep random seed same unsigned int seed = std::chrono::system_clock::now().time_since_epoch().count() % 1234; std::shuffle(this->train_data.begin(), this->train_data.end(), std::default_random_engine(seed)); std::shuffle(this->train_label.begin(), this->train_label.end(), std::default_random_engine(seed)); } } void DataSet::forward(int batch_size, bool is_train) { if (is_train) { int start = this->train_data_index; int end = std::min(this->train_data_index + batch_size, (int)this->train_data.size()); this->train_data_index = end; int size = end - start; // init device memory std::vector<int> output_shape{size, 1, this->height, this->width}; std::vector<int> output_label_shape{size, 10}; INIT_STORAGE(this->output, output_shape); INIT_STORAGE(this->output_label, output_label_shape); thrust::fill(this->output_label->get_data().begin(), this->output_label->get_data().end(), 0); // copy to device memory int im_stride = 1 * this->height * this->width; int one_hot_stride = 10; thrust::host_vector< float, thrust::system::cuda::experimental::pinned_allocator<float>> train_data_buffer; train_data_buffer.reserve(size * im_stride); for (int i = start; i < end; i++) { train_data_buffer.insert(train_data_buffer.end(), this->train_data[i].begin(), this->train_data[i].end()); this->output_label ->get_data()[(i - start) * one_hot_stride + this->train_label[i]] = 1; } this->output->get_data() = train_data_buffer; } else { int start = this->test_data_index; int end = std::min(this->test_data_index + batch_size, (int)this->test_data.size()); this->test_data_index = end; int size = end - start; // init device memory std::vector<int> output_shape{size, 1, this->height, this->width}; std::vector<int> output_label_shape{size, 10}; INIT_STORAGE(this->output, output_shape); INIT_STORAGE(this->output_label, output_label_shape); thrust::fill(this->output_label->get_data().begin(), this->output_label->get_data().end(), 0); // copy to device memory int im_stride = 1 * this->height * this->width; int one_hot_stride = 10; thrust::host_vector< float, thrust::system::cuda::experimental::pinned_allocator<float>> test_data_buffer; test_data_buffer.reserve(size * im_stride); for (int i = start; i < end; i++) { test_data_buffer.insert(test_data_buffer.end(), this->test_data[i].begin(), this->test_data[i].end()); this->output_label ->get_data()[(i - start) * one_hot_stride + this->test_label[i]] = 1; } this->output->get_data() = test_data_buffer; } } bool DataSet::has_next(bool is_train) { if (is_train) { return this->train_data_index < this->train_data.size(); } else { return this->test_data_index < this->test_data.size(); } } void DataSet::print_im() { int size = this->output->get_shape()[0]; int im_stride = 1 * height * width; for (int k = 0; k < size; k++) { int max_pos = -1; float max_value = -FLT_MAX; for (int i = 0; i < 10; i++) { float val = this->output_label->get_data()[k * 10 + i]; if (val > max_value) { max_value = val; max_pos = i; } } std::cout << max_pos << std::endl; auto& data = this->output->get_data(); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { std::cout << (data[k * im_stride + i * width + j] > 0 ? "* " : " "); } std::cout << std::endl; } } } unsigned int DataSet::reverse_int(unsigned int i) { unsigned char ch1, ch2, ch3, ch4; ch1 = i & 255; ch2 = (i >> 8) & 255; ch3 = (i >> 16) & 255; ch4 = (i >> 24) & 255; return ((unsigned int)ch1 << 24) + ((unsigned int)ch2 << 16) + ((unsigned int)ch3 << 8) + ch4; } void DataSet::read_images(std::string file_name, std::vector<std::vector<float>>& output) { std::ifstream file(file_name, std::ios::binary); if (file.is_open()) { unsigned int magic_number = 0; unsigned int number_of_images = 0; unsigned int n_rows = 0; unsigned int n_cols = 0; file.read((char*)&magic_number, sizeof(magic_number)); file.read((char*)&number_of_images, sizeof(number_of_images)); file.read((char*)&n_rows, sizeof(n_rows)); file.read((char*)&n_cols, sizeof(n_cols)); magic_number = this->reverse_int(magic_number); number_of_images = this->reverse_int(number_of_images); n_rows = this->reverse_int(n_rows); n_cols = this->reverse_int(n_cols); this->height = n_rows; this->width = n_cols; std::vector<unsigned char> image(n_rows * n_cols); std::vector<float> normalized_image(n_rows * n_cols); for (int i = 0; i < number_of_images; i++) { file.read((char*)&image[0], sizeof(unsigned char) * n_rows * n_cols); for (int i = 0; i < n_rows * n_cols; i++) { normalized_image[i] = (float)image[i] / 255 - 0.5; } output.push_back(normalized_image); } } } void DataSet::read_labels(std::string file_name, std::vector<unsigned char>& output) { std::ifstream file(file_name, std::ios::binary); if (file.is_open()) { unsigned int magic_number = 0; unsigned int number_of_images = 0; file.read((char*)&magic_number, sizeof(magic_number)); file.read((char*)&number_of_images, sizeof(number_of_images)); magic_number = this->reverse_int(magic_number); number_of_images = this->reverse_int(number_of_images); for (int i = 0; i < number_of_images; i++) { unsigned char label = 0; file.read((char*)&label, sizeof(label)); output.push_back(label); } } }
a89b01506bdebb745e2b295db7cf427ee9ed02c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" ///////////////////////// // matrixVecMult.cu // // Andrew Krepps // // Module 6 Assignment // // 3/12/2018 // ///////////////////////// #include <chrono> #include <stdio.h> #include <stdlib.h> #define MAX_SIZE 8192 /////////////////////////////////////////////////////////////////////////////// /// \brief perform matrix vector multiplication for a single output element /// /// \param [in] inMat the input matrix /// \param [in] inVec the input vector /// \param [out] outVec the output vector /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// __device__ void performMatVecMult( const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { const unsigned int outIdx = blockIdx.x*blockDim.x + threadIdx.x; if (outIdx < m) { // intermediate results are stored in registers // before being written back to the output float sum = 0.0f; unsigned int matRowStart = outIdx*n; for (unsigned int i = 0; i < n; i++) { unsigned int matIdx = matRowStart + i; sum += inMat[matIdx]*inVec[i]; } outVec[outIdx] = sum; } } /////////////////////////////////////////////////////////////////////////////// /// \brief perform matrix vector multiplication using global memory /// /// \param [in] inMat the input matrix /// \param [in] inVec the input vector /// \param [out] outVec the output vector /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// __global__ void matVecMultGlobalMem( const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // we're just using global memory, so directly perform multiplication performMatVecMult(inMat, inVec, outVec, m, n); } /////////////////////////////////////////////////////////////////////////////// // \brief constant memory for storing input vector /////////////////////////////////////////////////////////////////////////////// __constant__ float inVecConstantMem[MAX_SIZE]; /////////////////////////////////////////////////////////////////////////////// /// \brief perform matrix vector mutiplication using constant memory /// /// This assumes that the input vector has already been copied to constant /// memory using the symbol inVecConstantMem /// /// \param [in] inMat the input matrix /// \param [out] outVec the output vector /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// __global__ void matVecMultConstantMem( const float* inMat, float* outVec, const unsigned int m, const unsigned int n) { // perform the multiplication using input vector bound to constant memory performMatVecMult(inMat, inVecConstantMem, outVec, m, n); } /////////////////////////////////////////////////////////////////////////////// /// \brief perform matrix vector multiplication using shared memory /// /// \param [in] inMat the input matrix /// \param [in] inVec the input vector /// \param [out] outVec the output vector /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// __global__ void matVecMultSharedMem( const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // load input vector into shared memory for each block extern __shared__ float inVecSharedMem[]; const unsigned int localIdx = threadIdx.x; // input vector could be larger than the block size, // so we need to figure out which elements each thread // is responsible for copying over to shared memory const unsigned int elementsToCopy = n/blockDim.x + 1; const unsigned int startElement = localIdx*elementsToCopy; for (unsigned int i = 0; i < elementsToCopy; ++i) { unsigned int dataIdx = startElement + i; if (dataIdx < n) { inVecSharedMem[dataIdx] = inVec[dataIdx]; } } __syncthreads(); // after all data is loaded, perform multiplication using vector in shared memory performMatVecMult(inMat, inVecSharedMem, outVec, m, n); } /////////////////////////////////////////////////////////////////////////////// /// \brief initialize input data on the host /// /// \param [out] mat the input matrix /// \param [out] vec the input vector /// \param [in] m the number of matrix rows /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// void initializeInputData( float* mat, float* vec, const unsigned int m, const unsigned int n) { for (unsigned int i = 0; i < m; ++i) { for (unsigned int j = 0; j < n; ++j) { const unsigned int matIdx = i*n + j; mat[matIdx] = matIdx*0.01f; } vec[i] = i*0.1f; } } /////////////////////////////////////////////////////////////////////////////// /// \brief launch a kernel to perform matrix vector multiplication /// /// \param [in] kernel the kernel index (i.e., memory type) to use /// \param [in] blockSize the number of threads per block to use /// \param [in] inMat the input matrix (on the device) /// \param [in] inVec the input vector (on the device) /// \param [out] outVec the output vector (on the device) /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /// /// \returns the kernel execution time (in ms) /////////////////////////////////////////////////////////////////////////////// float launchKernel( const unsigned int kernel, const unsigned int blockSize, const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { const unsigned int numBlocks = m/blockSize; // start clock and launch kernel auto start = std::chrono::high_resolution_clock::now(); switch (kernel) { case 0: hipLaunchKernelGGL(( matVecMultGlobalMem), dim3(numBlocks), dim3(blockSize), 0, 0, inMat, inVec, outVec, m, n); break; case 1: hipLaunchKernelGGL(( matVecMultConstantMem), dim3(numBlocks), dim3(blockSize), 0, 0, inMat, outVec, m, n); break; case 2: hipLaunchKernelGGL(( matVecMultSharedMem), dim3(numBlocks), dim3(blockSize), n*sizeof(float), 0, inMat, inVec, outVec, m, n); break; default: printf("Invalid kernel index: %d\n", kernel); } // wait for GPU kernel to finish hipDeviceSynchronize(); // calculate execution time in ms auto stop = std::chrono::high_resolution_clock::now(); std::chrono::duration<float> duration(stop - start); return duration.count()*1000.0f; } /////////////////////////////////////////////////////////////////////////////// /// \brief allocate device memory and run a kernel to perform matrix vector /// multiplication /// /// \param [in] kernel the kernel index (i.e., memory type) to use /// \param [in] kernel the kernel index (i.e., memory type) to use /// \param [in] blockSize the number of threads per block to use /// \param [in] inMat the input matrix (on the host) /// \param [in] inVec the input vector (on the host) /// \param [out] outVec the output vector (on the host) /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /// /// \returns the kernel execution time (in ms) not including data transfer /////////////////////////////////////////////////////////////////////////////// float runTimingTest( const unsigned int kernel, const unsigned int blockSize, const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // allocate device memory float* d_inMat; float* d_inVec; float* d_outVec; const unsigned int matrixElements = m*n; const unsigned int matrixBytes = matrixElements*sizeof(float); const unsigned int vectorBytes = n*sizeof(float); hipMalloc((void**)&d_inMat, matrixBytes); hipMalloc((void**)&d_inVec, vectorBytes); hipMalloc((void**)&d_outVec, vectorBytes); // copy input data to device hipMemcpy(d_inMat, inMat, matrixBytes, hipMemcpyHostToDevice); hipMemcpy(d_inVec, inVec, vectorBytes, hipMemcpyHostToDevice); float ms = launchKernel(kernel, blockSize, d_inMat, d_inVec, d_outVec, m, n); // copy output data to host hipMemcpy(outVec, d_outVec, vectorBytes, hipMemcpyDeviceToHost); // free device memory hipFree(d_inMat); hipFree(d_inVec); hipFree(d_outVec); return ms; } int main(int argc, char** argv) { // configure run unsigned int dataSize = 512; unsigned int blockSize = 256; if (argc > 1) { dataSize = atoi(argv[1]); } if (argc > 2) { blockSize = atoi(argv[2]); } if (dataSize > MAX_SIZE) { dataSize = MAX_SIZE; printf("Warning: data size exceeds maximum limit. Setting to %d.\n", dataSize); } // allocate and initialize host memory const unsigned int matrixBytes = dataSize*dataSize*sizeof(float); const unsigned int vectorBytes = dataSize*sizeof(float); float* inMat = (float*)malloc(matrixBytes); float* inVec = (float*)malloc(vectorBytes); float* outVec = (float*)malloc(vectorBytes); initializeInputData(inMat, inVec, dataSize, dataSize); // initialize input vector in constant memory for later hipMemcpyToSymbol(inVecConstantMem, inVec, vectorBytes); // dummy executions to avoid startup performance hit for (unsigned int kernel = 0; kernel < 3; ++kernel) { runTimingTest(kernel, blockSize, inMat, inVec, outVec, dataSize, dataSize); } // run timing comparisons for (unsigned int kernel = 0; kernel < 3; ++kernel) { switch (kernel) { case 0: printf("Running global memory kernel: "); break; case 1: printf("Running constant memory kernel: "); break; case 2: printf("Running shared memory kernel "); break; default: printf("Invalid kernel index: %d\n", kernel); } float ms = runTimingTest(kernel, blockSize, inMat, inVec, outVec, dataSize, dataSize); printf("Kernel took %.6f ms to run\n", ms); // print output of kernel for (unsigned int i = 0; i < dataSize; ++i) { //printf("outVec[%d] = %f\n", i, outVec[i]); } } // free host memory free(inMat); free(inVec); free(outVec); }
a89b01506bdebb745e2b295db7cf427ee9ed02c4.cu
///////////////////////// // matrixVecMult.cu // // Andrew Krepps // // Module 6 Assignment // // 3/12/2018 // ///////////////////////// #include <chrono> #include <stdio.h> #include <stdlib.h> #define MAX_SIZE 8192 /////////////////////////////////////////////////////////////////////////////// /// \brief perform matrix vector multiplication for a single output element /// /// \param [in] inMat the input matrix /// \param [in] inVec the input vector /// \param [out] outVec the output vector /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// __device__ void performMatVecMult( const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { const unsigned int outIdx = blockIdx.x*blockDim.x + threadIdx.x; if (outIdx < m) { // intermediate results are stored in registers // before being written back to the output float sum = 0.0f; unsigned int matRowStart = outIdx*n; for (unsigned int i = 0; i < n; i++) { unsigned int matIdx = matRowStart + i; sum += inMat[matIdx]*inVec[i]; } outVec[outIdx] = sum; } } /////////////////////////////////////////////////////////////////////////////// /// \brief perform matrix vector multiplication using global memory /// /// \param [in] inMat the input matrix /// \param [in] inVec the input vector /// \param [out] outVec the output vector /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// __global__ void matVecMultGlobalMem( const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // we're just using global memory, so directly perform multiplication performMatVecMult(inMat, inVec, outVec, m, n); } /////////////////////////////////////////////////////////////////////////////// // \brief constant memory for storing input vector /////////////////////////////////////////////////////////////////////////////// __constant__ float inVecConstantMem[MAX_SIZE]; /////////////////////////////////////////////////////////////////////////////// /// \brief perform matrix vector mutiplication using constant memory /// /// This assumes that the input vector has already been copied to constant /// memory using the symbol inVecConstantMem /// /// \param [in] inMat the input matrix /// \param [out] outVec the output vector /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// __global__ void matVecMultConstantMem( const float* inMat, float* outVec, const unsigned int m, const unsigned int n) { // perform the multiplication using input vector bound to constant memory performMatVecMult(inMat, inVecConstantMem, outVec, m, n); } /////////////////////////////////////////////////////////////////////////////// /// \brief perform matrix vector multiplication using shared memory /// /// \param [in] inMat the input matrix /// \param [in] inVec the input vector /// \param [out] outVec the output vector /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// __global__ void matVecMultSharedMem( const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // load input vector into shared memory for each block extern __shared__ float inVecSharedMem[]; const unsigned int localIdx = threadIdx.x; // input vector could be larger than the block size, // so we need to figure out which elements each thread // is responsible for copying over to shared memory const unsigned int elementsToCopy = n/blockDim.x + 1; const unsigned int startElement = localIdx*elementsToCopy; for (unsigned int i = 0; i < elementsToCopy; ++i) { unsigned int dataIdx = startElement + i; if (dataIdx < n) { inVecSharedMem[dataIdx] = inVec[dataIdx]; } } __syncthreads(); // after all data is loaded, perform multiplication using vector in shared memory performMatVecMult(inMat, inVecSharedMem, outVec, m, n); } /////////////////////////////////////////////////////////////////////////////// /// \brief initialize input data on the host /// /// \param [out] mat the input matrix /// \param [out] vec the input vector /// \param [in] m the number of matrix rows /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// void initializeInputData( float* mat, float* vec, const unsigned int m, const unsigned int n) { for (unsigned int i = 0; i < m; ++i) { for (unsigned int j = 0; j < n; ++j) { const unsigned int matIdx = i*n + j; mat[matIdx] = matIdx*0.01f; } vec[i] = i*0.1f; } } /////////////////////////////////////////////////////////////////////////////// /// \brief launch a kernel to perform matrix vector multiplication /// /// \param [in] kernel the kernel index (i.e., memory type) to use /// \param [in] blockSize the number of threads per block to use /// \param [in] inMat the input matrix (on the device) /// \param [in] inVec the input vector (on the device) /// \param [out] outVec the output vector (on the device) /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /// /// \returns the kernel execution time (in ms) /////////////////////////////////////////////////////////////////////////////// float launchKernel( const unsigned int kernel, const unsigned int blockSize, const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { const unsigned int numBlocks = m/blockSize; // start clock and launch kernel auto start = std::chrono::high_resolution_clock::now(); switch (kernel) { case 0: matVecMultGlobalMem<<<numBlocks, blockSize>>>(inMat, inVec, outVec, m, n); break; case 1: matVecMultConstantMem<<<numBlocks, blockSize>>>(inMat, outVec, m, n); break; case 2: matVecMultSharedMem<<<numBlocks, blockSize, n*sizeof(float)>>>(inMat, inVec, outVec, m, n); break; default: printf("Invalid kernel index: %d\n", kernel); } // wait for GPU kernel to finish cudaThreadSynchronize(); // calculate execution time in ms auto stop = std::chrono::high_resolution_clock::now(); std::chrono::duration<float> duration(stop - start); return duration.count()*1000.0f; } /////////////////////////////////////////////////////////////////////////////// /// \brief allocate device memory and run a kernel to perform matrix vector /// multiplication /// /// \param [in] kernel the kernel index (i.e., memory type) to use /// \param [in] kernel the kernel index (i.e., memory type) to use /// \param [in] blockSize the number of threads per block to use /// \param [in] inMat the input matrix (on the host) /// \param [in] inVec the input vector (on the host) /// \param [out] outVec the output vector (on the host) /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /// /// \returns the kernel execution time (in ms) not including data transfer /////////////////////////////////////////////////////////////////////////////// float runTimingTest( const unsigned int kernel, const unsigned int blockSize, const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // allocate device memory float* d_inMat; float* d_inVec; float* d_outVec; const unsigned int matrixElements = m*n; const unsigned int matrixBytes = matrixElements*sizeof(float); const unsigned int vectorBytes = n*sizeof(float); cudaMalloc((void**)&d_inMat, matrixBytes); cudaMalloc((void**)&d_inVec, vectorBytes); cudaMalloc((void**)&d_outVec, vectorBytes); // copy input data to device cudaMemcpy(d_inMat, inMat, matrixBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_inVec, inVec, vectorBytes, cudaMemcpyHostToDevice); float ms = launchKernel(kernel, blockSize, d_inMat, d_inVec, d_outVec, m, n); // copy output data to host cudaMemcpy(outVec, d_outVec, vectorBytes, cudaMemcpyDeviceToHost); // free device memory cudaFree(d_inMat); cudaFree(d_inVec); cudaFree(d_outVec); return ms; } int main(int argc, char** argv) { // configure run unsigned int dataSize = 512; unsigned int blockSize = 256; if (argc > 1) { dataSize = atoi(argv[1]); } if (argc > 2) { blockSize = atoi(argv[2]); } if (dataSize > MAX_SIZE) { dataSize = MAX_SIZE; printf("Warning: data size exceeds maximum limit. Setting to %d.\n", dataSize); } // allocate and initialize host memory const unsigned int matrixBytes = dataSize*dataSize*sizeof(float); const unsigned int vectorBytes = dataSize*sizeof(float); float* inMat = (float*)malloc(matrixBytes); float* inVec = (float*)malloc(vectorBytes); float* outVec = (float*)malloc(vectorBytes); initializeInputData(inMat, inVec, dataSize, dataSize); // initialize input vector in constant memory for later cudaMemcpyToSymbol(inVecConstantMem, inVec, vectorBytes); // dummy executions to avoid startup performance hit for (unsigned int kernel = 0; kernel < 3; ++kernel) { runTimingTest(kernel, blockSize, inMat, inVec, outVec, dataSize, dataSize); } // run timing comparisons for (unsigned int kernel = 0; kernel < 3; ++kernel) { switch (kernel) { case 0: printf("Running global memory kernel: "); break; case 1: printf("Running constant memory kernel: "); break; case 2: printf("Running shared memory kernel "); break; default: printf("Invalid kernel index: %d\n", kernel); } float ms = runTimingTest(kernel, blockSize, inMat, inVec, outVec, dataSize, dataSize); printf("Kernel took %.6f ms to run\n", ms); // print output of kernel for (unsigned int i = 0; i < dataSize; ++i) { //printf("outVec[%d] = %f\n", i, outVec[i]); } } // free host memory free(inMat); free(inVec); free(outVec); }
d2c2579d4374e947b85120530afe1baaa8629d9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "cuda_utils.h" #include "distance/distance.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace Distance { template <typename Type> __global__ void naiveDistanceAdjKernel(bool *dist, const Type *x, const Type *y, int m, int n, int k, Type eps) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; Type acc = Type(0); for (int i = 0; i < k; ++i) { auto diff = x[i + midx * k] - y[i + nidx * k]; acc += diff * diff; } dist[midx * n + nidx] = acc <= eps; } template <typename Type> void naiveDistanceAdj(bool *dist, const Type *x, const Type *y, int m, int n, int k, Type eps) { static const dim3 TPB(16, 32, 1); dim3 nblks(ceildiv(m, (int)TPB.x), ceildiv(n, (int)TPB.y), 1); hipLaunchKernelGGL(( naiveDistanceAdjKernel<Type>), dim3(nblks), dim3(TPB), 0, 0, dist, x, y, m, n, k, eps); CUDA_CHECK(hipPeekAtLastError()); } template <typename T> struct DistanceAdjInputs { T eps; int m, n, k; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const DistanceAdjInputs<T> &dims) { return os; } template <typename T> struct InParams { T threshold; }; template <typename T> class DistanceAdjTest : public ::testing::TestWithParam<DistanceAdjInputs<T>> { public: void SetUp() override { params = ::testing::TestWithParam<DistanceAdjInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int m = params.m; int n = params.n; int k = params.k; allocate(x, m * k); allocate(y, n * k); allocate(dist_ref, m * n); allocate(dist, m * n); r.uniform(x, m * k, T(-1.0), T(1.0)); r.uniform(y, n * k, T(-1.0), T(1.0)); InParams<T> in_params = {params.eps}; naiveDistanceAdj(dist_ref, x, y, m, n, k, params.eps); char *workspace = nullptr; size_t worksize = 0; typedef cutlass::Shape<8, 128, 128> OutputTile_t; distance<T, T, bool, InParams<T>, OutputTile_t>( x, y, dist, m, n, k, in_params, EucExpandedL2, nullptr, worksize); if (worksize != 0) { allocate(workspace, worksize); } auto fin_op = [] __device__(T d_val, int g_d_idx, const InParams<T> &in_params) { return d_val <= in_params.threshold; }; distance<T, T, bool, InParams<T>, OutputTile_t>( x, y, dist, m, n, k, in_params, EucExpandedL2, workspace, worksize, fin_op); CUDA_CHECK(hipFree(workspace)); } void TearDown() override { CUDA_CHECK(hipFree(x)); CUDA_CHECK(hipFree(y)); CUDA_CHECK(hipFree(dist_ref)); CUDA_CHECK(hipFree(dist)); } protected: DistanceAdjInputs<T> params; T *x, *y; bool *dist_ref, *dist; }; const std::vector<DistanceAdjInputs<float>> inputsf = { {0.01f, 1024, 1024, 32, 1234ULL}, {0.1f, 1024, 1024, 32, 1234ULL}, {1.0f, 1024, 1024, 32, 1234ULL}, {10.0f, 1024, 1024, 32, 1234ULL}}; typedef DistanceAdjTest<float> DistanceAdjTestF; TEST_P(DistanceAdjTestF, Result) { ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>())); } INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceAdjInputs<double>> inputsd = { {0.01, 1024, 1024, 32, 1234ULL}, {0.1, 1024, 1024, 32, 1234ULL}, {1.0, 1024, 1024, 32, 1234ULL}, {10.0, 1024, 1024, 32, 1234ULL}}; typedef DistanceAdjTest<double> DistanceAdjTestD; TEST_P(DistanceAdjTestD, Result) { ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>())); } INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestD, ::testing::ValuesIn(inputsd)); } // end namespace DistanceAdj } // end namespace MLCommon
d2c2579d4374e947b85120530afe1baaa8629d9e.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "cuda_utils.h" #include "distance/distance.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace Distance { template <typename Type> __global__ void naiveDistanceAdjKernel(bool *dist, const Type *x, const Type *y, int m, int n, int k, Type eps) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; Type acc = Type(0); for (int i = 0; i < k; ++i) { auto diff = x[i + midx * k] - y[i + nidx * k]; acc += diff * diff; } dist[midx * n + nidx] = acc <= eps; } template <typename Type> void naiveDistanceAdj(bool *dist, const Type *x, const Type *y, int m, int n, int k, Type eps) { static const dim3 TPB(16, 32, 1); dim3 nblks(ceildiv(m, (int)TPB.x), ceildiv(n, (int)TPB.y), 1); naiveDistanceAdjKernel<Type><<<nblks, TPB>>>(dist, x, y, m, n, k, eps); CUDA_CHECK(cudaPeekAtLastError()); } template <typename T> struct DistanceAdjInputs { T eps; int m, n, k; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const DistanceAdjInputs<T> &dims) { return os; } template <typename T> struct InParams { T threshold; }; template <typename T> class DistanceAdjTest : public ::testing::TestWithParam<DistanceAdjInputs<T>> { public: void SetUp() override { params = ::testing::TestWithParam<DistanceAdjInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int m = params.m; int n = params.n; int k = params.k; allocate(x, m * k); allocate(y, n * k); allocate(dist_ref, m * n); allocate(dist, m * n); r.uniform(x, m * k, T(-1.0), T(1.0)); r.uniform(y, n * k, T(-1.0), T(1.0)); InParams<T> in_params = {params.eps}; naiveDistanceAdj(dist_ref, x, y, m, n, k, params.eps); char *workspace = nullptr; size_t worksize = 0; typedef cutlass::Shape<8, 128, 128> OutputTile_t; distance<T, T, bool, InParams<T>, OutputTile_t>( x, y, dist, m, n, k, in_params, EucExpandedL2, nullptr, worksize); if (worksize != 0) { allocate(workspace, worksize); } auto fin_op = [] __device__(T d_val, int g_d_idx, const InParams<T> &in_params) { return d_val <= in_params.threshold; }; distance<T, T, bool, InParams<T>, OutputTile_t>( x, y, dist, m, n, k, in_params, EucExpandedL2, workspace, worksize, fin_op); CUDA_CHECK(cudaFree(workspace)); } void TearDown() override { CUDA_CHECK(cudaFree(x)); CUDA_CHECK(cudaFree(y)); CUDA_CHECK(cudaFree(dist_ref)); CUDA_CHECK(cudaFree(dist)); } protected: DistanceAdjInputs<T> params; T *x, *y; bool *dist_ref, *dist; }; const std::vector<DistanceAdjInputs<float>> inputsf = { {0.01f, 1024, 1024, 32, 1234ULL}, {0.1f, 1024, 1024, 32, 1234ULL}, {1.0f, 1024, 1024, 32, 1234ULL}, {10.0f, 1024, 1024, 32, 1234ULL}}; typedef DistanceAdjTest<float> DistanceAdjTestF; TEST_P(DistanceAdjTestF, Result) { ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>())); } INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceAdjInputs<double>> inputsd = { {0.01, 1024, 1024, 32, 1234ULL}, {0.1, 1024, 1024, 32, 1234ULL}, {1.0, 1024, 1024, 32, 1234ULL}, {10.0, 1024, 1024, 32, 1234ULL}}; typedef DistanceAdjTest<double> DistanceAdjTestD; TEST_P(DistanceAdjTestD, Result) { ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>())); } INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestD, ::testing::ValuesIn(inputsd)); } // end namespace DistanceAdj } // end namespace MLCommon
2d390511a0edf37618711a166d7e5d4d7b691345.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <typeinfo> #include <random> #include <stdint.h> #include <rocblas.h> #define DEBUG #include <gemm/dispatch.h> #include <gemm/epilogue_function.h> #include "util/matrix.h" #include "util/timer.h" using namespace cutlass; int main(int argc, const char **argv) { int m = 10240; int k = 4096; int n = 4096; float alpha = 1.0; float beta = 0.0; static const matrix_transform_t::kind_t TransformA = matrix_transform_t::NonTranspose; static const matrix_transform_t::kind_t TransformB = matrix_transform_t::NonTranspose; typedef float value_t; typedef float accum_t; int g_timing_iterations = 10; matrix<value_t> A(m, k); matrix<value_t> B(k, n); matrix<accum_t> C(m, n); matrix<accum_t> C2(m, n); A.random(); B.random(); C.fill_ramp(0,0); C2.fill_ramp(0,0); A.sync_device(); B.sync_device(); C.sync_device(); C2.sync_device(); hipblasHandle_t g_cublas_handle; hipblasCreate(&g_cublas_handle); gpu_timer timer; for (int i = 0; i < g_timing_iterations+2; i++) { if (i == 2) timer.start(); CUDA_PERROR(hipblasSgemm( g_cublas_handle, (hipblasOperation_t) TransformA, (hipblasOperation_t) TransformB, m, n, k, &alpha, A.d_data(), m, B.d_data(), k, &beta, C.d_data(), m)); } timer.stop(); int64_t num_flops = (2 * int64_t(m) * int64_t(n) * int64_t(k)) + (2 * int64_t(m) * int64_t(n)); double tcublas = timer.elapsed_millis() / g_timing_iterations; double cublas_flops = double(num_flops) / tcublas / 1.0e6; typedef gemm::blas_scaled_epilogue<float> epilogue_op_t; for (int i = 0; i < g_timing_iterations+2; i++) { if (i == 2) timer.start(); gemm::dispatch<epilogue_op_t>( m, n, k, alpha, beta, A.d_data(), B.d_data(), C2.d_data() ); } timer.stop(); double tcutlass = timer.elapsed_millis() / g_timing_iterations; double cutlass_flops = double(num_flops) / tcutlass / 1.0e6; printf("CUBLAS: %.2f Gflops, CUTLASS: %.2f Gflops\n", cublas_flops, cutlass_flops); C.sync_host(); C2.sync_host(); double err = 0; for (int i=0; i<n; i++) { for (int j=0; j<m; j++) { err += fabs(C.get(i,j) - C2.get(i,j)); } } printf("error: %lf\n", err/n/m); hipblasDestroy(g_cublas_handle); }
2d390511a0edf37618711a166d7e5d4d7b691345.cu
#include <iostream> #include <typeinfo> #include <random> #include <stdint.h> #include <cublas_v2.h> #define DEBUG #include <gemm/dispatch.h> #include <gemm/epilogue_function.h> #include "util/matrix.h" #include "util/timer.h" using namespace cutlass; int main(int argc, const char **argv) { int m = 10240; int k = 4096; int n = 4096; float alpha = 1.0; float beta = 0.0; static const matrix_transform_t::kind_t TransformA = matrix_transform_t::NonTranspose; static const matrix_transform_t::kind_t TransformB = matrix_transform_t::NonTranspose; typedef float value_t; typedef float accum_t; int g_timing_iterations = 10; matrix<value_t> A(m, k); matrix<value_t> B(k, n); matrix<accum_t> C(m, n); matrix<accum_t> C2(m, n); A.random(); B.random(); C.fill_ramp(0,0); C2.fill_ramp(0,0); A.sync_device(); B.sync_device(); C.sync_device(); C2.sync_device(); cublasHandle_t g_cublas_handle; cublasCreate(&g_cublas_handle); gpu_timer timer; for (int i = 0; i < g_timing_iterations+2; i++) { if (i == 2) timer.start(); CUDA_PERROR(cublasSgemm( g_cublas_handle, (cublasOperation_t) TransformA, (cublasOperation_t) TransformB, m, n, k, &alpha, A.d_data(), m, B.d_data(), k, &beta, C.d_data(), m)); } timer.stop(); int64_t num_flops = (2 * int64_t(m) * int64_t(n) * int64_t(k)) + (2 * int64_t(m) * int64_t(n)); double tcublas = timer.elapsed_millis() / g_timing_iterations; double cublas_flops = double(num_flops) / tcublas / 1.0e6; typedef gemm::blas_scaled_epilogue<float> epilogue_op_t; for (int i = 0; i < g_timing_iterations+2; i++) { if (i == 2) timer.start(); gemm::dispatch<epilogue_op_t>( m, n, k, alpha, beta, A.d_data(), B.d_data(), C2.d_data() ); } timer.stop(); double tcutlass = timer.elapsed_millis() / g_timing_iterations; double cutlass_flops = double(num_flops) / tcutlass / 1.0e6; printf("CUBLAS: %.2f Gflops, CUTLASS: %.2f Gflops\n", cublas_flops, cutlass_flops); C.sync_host(); C2.sync_host(); double err = 0; for (int i=0; i<n; i++) { for (int j=0; j<m; j++) { err += fabs(C.get(i,j) - C2.get(i,j)); } } printf("error: %lf\n", err/n/m); cublasDestroy(g_cublas_handle); }
ca0e8c6fa7af99e6ca4cb888b68ef9d455295a8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "config.h" #include "filterCUDA.h" #include <time.h> #define MAX(a, b) a > b ? a : b #define MIN(a, b) a < b ? a : b __global__ void kernel_box_filter(float *estColor, float *inputColor, int xres, int yres, float *time) { clock_t start, end; double result; start = clock(); // running time evaluation start const int cx = blockDim.x * blockIdx.x + threadIdx.x; const int cy = blockDim.y * blockIdx.y + threadIdx.y; if (cx >= xres || cy >= yres) { return; } int index = cy * xres + cx; int sampleCount = 0; float color[3] = { 0.0f, }; int sx = MAX(cx - KernelRadius, 0); // adaptively controll the size of kernel entirely 4 cases int sy = MAX(cy - KernelRadius, 0); int ex = MIN(cx + KernelRadius, xres - 1); int ey = MIN(cy + KernelRadius, yres - 1); for (int ny = sy; ny <= ey; ++ny) { // kernel for (int nx = sx; nx <= ex; ++nx) { color[0] += inputColor[3 * (xres * ny + nx) + 0]; color[1] += inputColor[3 * (xres * ny + nx) + 1]; color[2] += inputColor[3 * (xres * ny + nx) + 2]; sampleCount++; } } estColor[3 * index + 0] = 1.f / (float)sampleCount * color[0]; estColor[3 * index + 1] = 1.f / (float)sampleCount * color[1]; estColor[3 * index + 2] = 1.f / (float)sampleCount * color[2]; end = clock(); result = (float)(end - start); time[1] = (result) / CLOCKS_PER_SEC; } extern "C" void BoxFiltering(float *outputColor, float* inputColor, int xres, int yres, float *time) { float invN = (1.0f / ((float)KernelSize * KernelSize)); float *estColor = new float[yres * xres * 3]; // estColor = sum(inputColor * 1/ N) // Cuda mem allocation // Host mem cpy float *d_input_color, *d_output_color; const int memSize = sizeof(float) * 3 * xres * yres; hipMalloc((void **)&d_input_color, memSize); hipMalloc((void **)&d_output_color, memSize); hipMemcpy(&d_input_color[0], &inputColor[0], memSize, hipMemcpyHostToDevice); // kernel function // for loop => parallel execution const int blockDim = 16; dim3 block(blockDim, blockDim); dim3 grid((xres + block.x - 1) / block.x, (yres + block.y - 1) / block.y); hipLaunchKernelGGL(( kernel_box_filter), dim3(grid), dim3(block), 0, 0, d_output_color, d_input_color, xres, yres, time); hipMemcpy(&outputColor[0], &d_output_color[0], memSize, hipMemcpyDeviceToHost); hipFree(d_input_color); hipFree(d_output_color); }
ca0e8c6fa7af99e6ca4cb888b68ef9d455295a8b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "config.h" #include "filterCUDA.h" #include <time.h> #define MAX(a, b) a > b ? a : b #define MIN(a, b) a < b ? a : b __global__ void kernel_box_filter(float *estColor, float *inputColor, int xres, int yres, float *time) { clock_t start, end; double result; start = clock(); // running time evaluation start const int cx = blockDim.x * blockIdx.x + threadIdx.x; const int cy = blockDim.y * blockIdx.y + threadIdx.y; if (cx >= xres || cy >= yres) { return; } int index = cy * xres + cx; int sampleCount = 0; float color[3] = { 0.0f, }; int sx = MAX(cx - KernelRadius, 0); // adaptively controll the size of kernel entirely 4 cases int sy = MAX(cy - KernelRadius, 0); int ex = MIN(cx + KernelRadius, xres - 1); int ey = MIN(cy + KernelRadius, yres - 1); for (int ny = sy; ny <= ey; ++ny) { // kernel for (int nx = sx; nx <= ex; ++nx) { color[0] += inputColor[3 * (xres * ny + nx) + 0]; color[1] += inputColor[3 * (xres * ny + nx) + 1]; color[2] += inputColor[3 * (xres * ny + nx) + 2]; sampleCount++; } } estColor[3 * index + 0] = 1.f / (float)sampleCount * color[0]; estColor[3 * index + 1] = 1.f / (float)sampleCount * color[1]; estColor[3 * index + 2] = 1.f / (float)sampleCount * color[2]; end = clock(); result = (float)(end - start); time[1] = (result) / CLOCKS_PER_SEC; } extern "C" void BoxFiltering(float *outputColor, float* inputColor, int xres, int yres, float *time) { float invN = (1.0f / ((float)KernelSize * KernelSize)); float *estColor = new float[yres * xres * 3]; // estColor = sum(inputColor * 1/ N) // Cuda mem allocation // Host mem cpy float *d_input_color, *d_output_color; const int memSize = sizeof(float) * 3 * xres * yres; cudaMalloc((void **)&d_input_color, memSize); cudaMalloc((void **)&d_output_color, memSize); cudaMemcpy(&d_input_color[0], &inputColor[0], memSize, cudaMemcpyHostToDevice); // kernel function // for loop => parallel execution const int blockDim = 16; dim3 block(blockDim, blockDim); dim3 grid((xres + block.x - 1) / block.x, (yres + block.y - 1) / block.y); kernel_box_filter<<<grid, block>>>(d_output_color, d_input_color, xres, yres, time); cudaMemcpy(&outputColor[0], &d_output_color[0], memSize, cudaMemcpyDeviceToHost); cudaFree(d_input_color); cudaFree(d_output_color); }
d376158418e26f0fd7f3ecc92e0aa905448c1964.hip
// !!! This is a file automatically generated by hipify!!! /** FNELEM-GPU GPU MATRIX INVERSION Performs matrix inversion using Gauss Jordan algorithm. Based on: https://github.com/ZhengzhongSun/Matrix-Inversion-with-CUDA @package fnelem.math @author ppizarror @date 19/11/2018 @license MIT License Copyright (c) 2018 Pablo Pizarro R. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ // Library imports #include <hip/hip_runtime.h> #include <stdio.h> #include <iostream> // Constants #define __MATRIX_INVERSION_CUDA_BLOCKSIZE 8 /** * NODIAG normalize diagonal matrix (CUDA). * * @param A Matrix * @param I Matrix * @param n Dimension * @param i Position */ __global__ void nodiag_normalize(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) if (x == i && x != y) { I[x * n + y] /= A[i * n + i]; A[x * n + y] /= A[i * n + i]; } } /** * DIAG normalize diagonal matrix (CUDA). * * @param A Matrix * @param I Matrix * @param n Dimension * @param i Position */ __global__ void diag_normalize(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) if (x == y && x == i) { I[x * n + y] /= A[i * n + i]; A[x * n + y] /= A[i * n + i]; } } /** * Performs Gauss Jordan algorithm (CUDA). * * @param A Matrix * @param I Matrix * @param n Dimension * @param i Position */ __global__ void gaussjordan(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) { if (x != i) { I[x * n + y] -= I[i * n + y] * A[x * n + i]; if (y != i) { A[x * n + y] -= A[i * n + y] * A[x * n + i]; } } } } /** * Set zero on matrix (CUDA). * * @param A Matrix * @param I Matrix * @param n Dimension * @param i Position */ __global__ void set_zero(double *A, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) { if (x != i) { if (y == i) { A[x * n + y] = 0; } } } } /** * Matrix inversion, uses CUDA. * * @param feMatrix Matrix to inverse * @return Inverse matrix */ FEMatrix *matrix_inverse_cuda(FEMatrix *feMatrix) { // Get matrix double *matrix = feMatrix->get_array(); // Get matrix dimension int *matDim = feMatrix->size(); int n; if (matDim[0] == matDim[1]) { n = matDim[0]; } else { throw std::logic_error("Matrix to inverse is not square"); } // Inverse matrix CPU double *iMatrix = new double[n * n]; // Create auxiliar matrices double *d_A, *I, *dI; // Time of computation float time; // Create CUDA error handlers hipError_t err; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Matrix memory size int ddsize = n * n * sizeof(double); // Creates blocks dim3 threadsPerBlock(__MATRIX_INVERSION_CUDA_BLOCKSIZE, __MATRIX_INVERSION_CUDA_BLOCKSIZE); dim3 numBlocks((n + __MATRIX_INVERSION_CUDA_BLOCKSIZE - 1) / __MATRIX_INVERSION_CUDA_BLOCKSIZE, (n + __MATRIX_INVERSION_CUDA_BLOCKSIZE - 1) / __MATRIX_INVERSION_CUDA_BLOCKSIZE); // Memory allocation err = hipMalloc((void **) &d_A, ddsize); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } err = hipMalloc((void **) &dI, ddsize); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } // Creates identify matrix I = new double[n * n]; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (i == j) I[i * n + i] = 1.0; else I[i * n + j] = 0.0; } } // Copy data from CPU to GPU err = hipMemcpy(d_A, matrix, ddsize, hipMemcpyHostToDevice); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } err = hipMemcpy(dI, I, ddsize, hipMemcpyHostToDevice); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } // Timer start hipEventRecord(start, 0); // L^(-1) for (int i = 0; i < n; i++) { nodiag_normalize << < numBlocks, threadsPerBlock >> > (d_A, dI, n, i); diag_normalize << < numBlocks, threadsPerBlock >> > (d_A, dI, n, i); gaussjordan << < numBlocks, threadsPerBlock >> > (d_A, dI, n, i); set_zero << < numBlocks, threadsPerBlock >> > (d_A, n, i); } // Record cuda events hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); // Copy data from GPU to CPU err = hipMemcpy(iMatrix, dI, ddsize, hipMemcpyDeviceToHost); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } err = hipMemcpy(I, d_A, ddsize, hipMemcpyDeviceToHost); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } std::cout << "[CUDA] Matrix inversion time: " << time << "ms\n" << std::endl; // Free memory hipFree(d_A); hipFree(dI); delete[] I; // Generate matrix return new FEMatrix(n, n, iMatrix); }
d376158418e26f0fd7f3ecc92e0aa905448c1964.cu
/** FNELEM-GPU GPU MATRIX INVERSION Performs matrix inversion using Gauss Jordan algorithm. Based on: https://github.com/ZhengzhongSun/Matrix-Inversion-with-CUDA @package fnelem.math @author ppizarror @date 19/11/2018 @license MIT License Copyright (c) 2018 Pablo Pizarro R. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ // Library imports #include <cuda.h> #include <stdio.h> #include <iostream> // Constants #define __MATRIX_INVERSION_CUDA_BLOCKSIZE 8 /** * NODIAG normalize diagonal matrix (CUDA). * * @param A Matrix * @param I Matrix * @param n Dimension * @param i Position */ __global__ void nodiag_normalize(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) if (x == i && x != y) { I[x * n + y] /= A[i * n + i]; A[x * n + y] /= A[i * n + i]; } } /** * DIAG normalize diagonal matrix (CUDA). * * @param A Matrix * @param I Matrix * @param n Dimension * @param i Position */ __global__ void diag_normalize(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) if (x == y && x == i) { I[x * n + y] /= A[i * n + i]; A[x * n + y] /= A[i * n + i]; } } /** * Performs Gauss Jordan algorithm (CUDA). * * @param A Matrix * @param I Matrix * @param n Dimension * @param i Position */ __global__ void gaussjordan(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) { if (x != i) { I[x * n + y] -= I[i * n + y] * A[x * n + i]; if (y != i) { A[x * n + y] -= A[i * n + y] * A[x * n + i]; } } } } /** * Set zero on matrix (CUDA). * * @param A Matrix * @param I Matrix * @param n Dimension * @param i Position */ __global__ void set_zero(double *A, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) { if (x != i) { if (y == i) { A[x * n + y] = 0; } } } } /** * Matrix inversion, uses CUDA. * * @param feMatrix Matrix to inverse * @return Inverse matrix */ FEMatrix *matrix_inverse_cuda(FEMatrix *feMatrix) { // Get matrix double *matrix = feMatrix->get_array(); // Get matrix dimension int *matDim = feMatrix->size(); int n; if (matDim[0] == matDim[1]) { n = matDim[0]; } else { throw std::logic_error("Matrix to inverse is not square"); } // Inverse matrix CPU double *iMatrix = new double[n * n]; // Create auxiliar matrices double *d_A, *I, *dI; // Time of computation float time; // Create CUDA error handlers cudaError_t err; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Matrix memory size int ddsize = n * n * sizeof(double); // Creates blocks dim3 threadsPerBlock(__MATRIX_INVERSION_CUDA_BLOCKSIZE, __MATRIX_INVERSION_CUDA_BLOCKSIZE); dim3 numBlocks((n + __MATRIX_INVERSION_CUDA_BLOCKSIZE - 1) / __MATRIX_INVERSION_CUDA_BLOCKSIZE, (n + __MATRIX_INVERSION_CUDA_BLOCKSIZE - 1) / __MATRIX_INVERSION_CUDA_BLOCKSIZE); // Memory allocation err = cudaMalloc((void **) &d_A, ddsize); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } err = cudaMalloc((void **) &dI, ddsize); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } // Creates identify matrix I = new double[n * n]; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (i == j) I[i * n + i] = 1.0; else I[i * n + j] = 0.0; } } // Copy data from CPU to GPU err = cudaMemcpy(d_A, matrix, ddsize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } err = cudaMemcpy(dI, I, ddsize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } // Timer start cudaEventRecord(start, 0); // L^(-1) for (int i = 0; i < n; i++) { nodiag_normalize << < numBlocks, threadsPerBlock >> > (d_A, dI, n, i); diag_normalize << < numBlocks, threadsPerBlock >> > (d_A, dI, n, i); gaussjordan << < numBlocks, threadsPerBlock >> > (d_A, dI, n, i); set_zero << < numBlocks, threadsPerBlock >> > (d_A, n, i); } // Record cuda events cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); // Copy data from GPU to CPU err = cudaMemcpy(iMatrix, dI, ddsize, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } err = cudaMemcpy(I, d_A, ddsize, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl; } std::cout << "[CUDA] Matrix inversion time: " << time << "ms\n" << std::endl; // Free memory cudaFree(d_A); cudaFree(dI); delete[] I; // Generate matrix return new FEMatrix(n, n, iMatrix); }
85aecd8c7ffee95c890d91f1625fda61c43ec302.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gemm.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int m = 2; int n = XSIZE*YSIZE; int k = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gemm), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,m,n,k); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gemm), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,m,n,k); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gemm), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,m,n,k); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
85aecd8c7ffee95c890d91f1625fda61c43ec302.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gemm.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int m = 2; int n = XSIZE*YSIZE; int k = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gemm<<<gridBlock,threadBlock>>>(A,B,C,m,n,k); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gemm<<<gridBlock,threadBlock>>>(A,B,C,m,n,k); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gemm<<<gridBlock,threadBlock>>>(A,B,C,m,n,k); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3908f8484391cdc8bc88a919dbe34828ffa29e62.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include<iostream> #include <windows.h> #define GF 256 #define PP 301 using namespace std; //__global__CUDA_squareGPU //---------------------------------------------- // int *Log = new int[256]; int *ALog = new int[256]; void FillLogArrays(void) { ALog[0] = 1; Log[0] = 1 - GF; for (int i = 1; i<GF; i++) { ALog[i] = ALog[i - 1] * 2; if (ALog[i] >= GF) ALog[i] ^= PP; Log[ALog[i]] = i; } } //CPU---------------------------------------------------------- int cpu_modProduct(int A, int B) { if ((A == 0) || (B == 0)) return (0); else { return (ALog[(Log[A] + Log[B]) % (GF - 1)]); } } int cpu_modPower(int x, int a) { if (a == 0)return 1; else return cpu_modProduct(x, cpu_modPower(x, a - 1));//x*power(x,a-1); } int cpu_modQuotient(int A, int B)// namely A divided by B { if (B == 0) return (1 - GF); else if (A == 0) return (0); else { return (ALog[(Log[A] - Log[B] + (GF - 1)) % (GF - 1)]); } } int cpu_modSum(int A, int B) { return (A ^ B); } int cpu_modDifference(int A, int B) { return (A ^ B); } //---------------------------------------------------------------- __device__ int modProduct(int A, int B, int * gpu_Log, int * gpu_ALog) { if ((A == 0) || (B == 0)) return (0); else { return (gpu_ALog[((gpu_Log[A] + gpu_Log[B]) % (GF - 1))]); } } __device__ int modPower(int x, int a, int * gpu_Log, int * gpu_ALog) { int test = x; for (int i = 1; i <a; i++) { test = modProduct(x, test, gpu_Log, gpu_ALog); } return test; } __device__ int modQuotient(int A, int B, int * gpu_Log, int * gpu_ALog)// namely A divided by B { if (B == 0) return (1 - GF); else if (A == 0) return (0); else { return (gpu_ALog[(gpu_Log[A] - gpu_Log[B] + (GF - 1)) % (GF - 1)]); } } __device__ int modSum(int A, int B) { return (A ^ B); } __device__ int modDifference(int A, int B) { return (A ^ B); } /*__device__ int datarandom(){ }*/ __global__ void gpu_sharing(int k, int n, int dataDim, int *gpu_dataA, int * gpu_share, int *gpu_Log, int *gpu_ALog, int *gpu_rand) { int block_id = blockIdx.x;//block int thread_id = threadIdx.x; //thread int index = block_id *blockDim.x + thread_id; int *ans = new int[n]; for (int i = 0; i < n; i++) { ans[i] = 0; } //------------------------------------------------------------------------ /* hiprandState_t state; hiprand_init(0, index, 0, &state); for (int i = 0; i < k - 1; i++) { //k-1k-1P randCoefficient[i] = hiprand(&state) % 256; }*/ //------------------------------------------------------------------------ for (int h = 0; h < n; h++) { //n for (int j = 0; j < k - 1; j++) { //*X ans[h] = modSum(ans[h], modProduct(gpu_rand[j], modPower(h + 1, j + 1, gpu_Log, gpu_ALog), gpu_Log, gpu_ALog));//pow(h+1,j+1)--->h+1= j+1= } gpu_share[h*dataDim + index] = modSum(gpu_dataA[index], ans[h]); //gpu_dataA[index]; //= ans[h]; } free(ans); //gpu_share[index] = gpu_dataB[index]; //gpu_share[index + dataDim] = gpu_share[index]; //gpu_share[index + 2*dataDim] = gpu_share[index]; //gpu_share[index + 4* dataDim] = gpu_share[index]; } __global__ void gpu_desharing(int k, int n, int dataDim, int *gpu_dataB, int * gpu_share, int *gpu_Log, int *gpu_ALog) { int block_id = blockIdx.x;//block int thread_id = threadIdx.x; //thread int index = block_id *blockDim.x + thread_id; float L_end = 0; int L_Product = 1; for (int h = 0; h < k; h++) { //K L_Product = 1; for (int j = 0; j < k; j++) { if (h == j)continue; L_Product = modProduct(L_Product, modQuotient(j + 1, modDifference(j + 1, h + 1), gpu_Log, gpu_ALog), gpu_Log, gpu_ALog); } L_Product = modProduct(gpu_share[index + h*dataDim], L_Product, gpu_Log, gpu_ALog); L_end = modSum(L_end, L_Product); } gpu_dataB[index] = L_end; } int main() { //threadnum blocknum ---------------------------------------- int blocknum, threadnum; int k, n; int i, j; cout << "threadnum :"; cin >> threadnum; cout << endl; cout << "blocknum :"; cin >> blocknum; cout << endl; cout << "k = "; cin >> k; cout << endl; cout << "n = "; cin >> n; cout << endl; int dataDim = blocknum * threadnum; cout << ":" << dataDim << endl; //----------------------------------------------------------- int *gpu_Log; int *gpu_ALog; // int * gpu_k; // int * gpu_n; int * gpu_dataA; //int * share; int * gpu_share; int *gpu_dataB; int *gpu_rand; //----------------------------------------------------------------- int *data = (int*)malloc(sizeof(int)*(dataDim)); int *dataB = (int*)malloc(sizeof(int)*(dataDim)); int *randCoefficient = (int*)malloc(sizeof(int)*k); // int *data2 = (int*) malloc (sizeof(int)*dataDim); int *share = (int*)malloc(sizeof(int)*n*dataDim); //share = new int[n*dataDim]; // for (i = 0; i < n; i++) // share[i] = new int [dataDim]; //----------------------------------------------------------------- srand(time(NULL)); FillLogArrays(); for (i = 0; i < dataDim; i++) { data[i] = (rand() % 256) + 1; } for (i = 0; i < 2; i++) { cout<<"test data["<<i<<"]:"<<data[i]<<endl; } cout<<endl<< "2:-------------------------"<<endl; for (i = dataDim - 1; i >dataDim - 3; i--) { cout << "test data[" << i << "]:" << data[i] << endl; } cout << endl; for (int x = 0; x < k - 1; x++) { randCoefficient[x] = (rand() % 256) + 1; } //GPU----------------------------------------------------------------- hipMalloc((int**)&gpu_Log, sizeof(int)*GF); hipMalloc((int**)&gpu_ALog, sizeof(int)*GF); hipMalloc((int**)&gpu_dataA, sizeof(int)*dataDim); hipMalloc((int**)&gpu_share, sizeof(int)*n*dataDim); hipMalloc((int**)&gpu_rand, sizeof(int)*k); cout << ":" << sizeof(int)*k + sizeof(int)*n*dataDim + sizeof(int)*dataDim + sizeof(int)*GF + sizeof(int)*GF << endl; //----------------------------------------------------------------------- /*float time__tran, time__tran2,costime1,costime2; hipEvent_t start_tran, stop_tran, start_tran2, stop_tran2,time_start,time_end,time_start2, time_end2; hipEventCreate(&start_tran); hipEventCreate(&stop_tran); hipEventCreate(&start_tran2); hipEventCreate(&stop_tran2); hipEventCreate(&time_start); hipEventCreate(&time_end); hipEventCreate(&time_start2); hipEventCreate(&time_end2); hipEventRecord(start_tran, 0);*/ /* for (i = 0; i < n; i++) { for (int j = 0; j < 5; j++) { //printf("GPU_share[%2d]: %d \n ", j, share[i * dataDim + j]);//, Log[j], ALog[j] cout << "TEST GPU_share[" << j << "]" << share[i * dataDim + j] << endl; } printf("\n"); }*/ //GPUCPUGPU------------------------------------------------------------------------------ hipMemcpy(gpu_Log, Log, sizeof(int)*GF, hipMemcpyHostToDevice); hipMemcpy(gpu_ALog, ALog, sizeof(int)*GF, hipMemcpyHostToDevice); hipMemcpy(gpu_dataA, data, sizeof(int)*dataDim, hipMemcpyHostToDevice); //hipMemcpy(gpu_dataB, dataB, sizeof(int)*dataDim, hipMemcpyHostToDevice); hipMemcpy(gpu_rand, randCoefficient, sizeof(int)*k, hipMemcpyHostToDevice); hipMemcpy(gpu_share, share, sizeof(int)*n*dataDim, hipMemcpyHostToDevice); /*hipEventRecord(stop_tran, 0); hipEventSynchronize(stop_tran); hipEventElapsedTime(&time__tran, start_tran, stop_tran);*/ /* free(data); free(randCoefficient); free(ALog); free(Log);*/ //GPU---------------------------------------------------------------------------- gpu_sharing << <blocknum, threadnum, 0 >> >(k, n, dataDim, gpu_dataA, gpu_share, gpu_Log, gpu_ALog, gpu_rand); hipMemcpy(share, gpu_share, sizeof(int)*n*dataDim, hipMemcpyDeviceToHost); hipFree(gpu_dataA); hipFree(gpu_rand); //GPU---------------------------------------------------------------------------- hipMalloc((int**)&gpu_dataB, sizeof(int)*dataDim); gpu_desharing << <blocknum, threadnum, 0 >> >(k, n, dataDim, gpu_dataB, gpu_share, gpu_Log, gpu_ALog); hipDeviceSynchronize();// //GPUCPU-------------------------------------------------------------------------------------- // for (i=0; i<n; i++) //GPUGPUCPU------------------------------------------------------------------------------ hipMemcpy(dataB, gpu_dataB, sizeof(int)*dataDim, hipMemcpyDeviceToHost); // hipMemcpy(data, gpu_dataA, sizeof(int)*dataDim, hipMemcpyDeviceToHost); // hipMemcpy(ALog, gpu_ALog, sizeof(int)*256, hipMemcpyDeviceToHost); //---------------------------------------------------------------------------------------------------------- /*hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop); hipEventDestroy(start); hipEventDestroy(stop); cout << ":" << elapsed_time_ms / 1000 << endl;*/ //--------------------------------------------------------------------- //printf("CPU %f \n", L_end);//GPU cout << "-------------------------"<<endl; for (i = 0; i < n; i++) { for (int j = 0; j < 2; j++) { cout << "GPU_share[" << j << "]" << share[i * dataDim + j]<<endl;//printf("GPU_share[%2d]: %d \n ", j, share[i * dataDim + j]);//, Log[j], ALog[j] } printf("\n"); } cout << endl << "2:-------------------------" << endl; for (i = 0; i < n; i++) { for (int j = dataDim-1; j >dataDim-3; j--) { //printf("GPU_share[%2d]: %d \n ", j, share[i * dataDim + j]);//, Log[j], ALog[j] cout << "GPU_share[" << j << "]" << share[i * dataDim + j] << endl; } printf("\n"); } cout << "----------------------" << endl; for (i = 0; i < 2; i++) { cout << "GPU_data[" << i << "]:" << dataB[i] << endl; } cout << endl << "2:-------------------------" << endl; for (i = dataDim-1; i >dataDim-3; i--) { cout << "GPU_data[" << i << "]:" << dataB[i] << endl; } //cout << ":" << elapsed_time_ms/1000 << endl; cout << "----------------------" << endl; hipFree(gpu_Log); hipFree(gpu_ALog); hipFree(gpu_dataB); hipFree(gpu_share); //CPU---------------------------------------------------------------------- //printf("CPU \n"); clock_t end2; clock_t start2; double costTime_encode = 0; double costTime_decode = 0; int* cpu_ans = new int[n]; int *L_end = new int[dataDim]; for (int i = 0; i < dataDim; i++) { L_end[i] = 0; } int L_Product = 1; for (int h = 0; h < dataDim; h++) { start2 = clock(); for (j = 0; j < n; j++) { cpu_ans[j] = 0; } for (i = 0; i < n; i++) { //n for (j = 0; j < k - 1; j++) { //*X cpu_ans[i] = cpu_modSum(cpu_ans[i], cpu_modProduct(randCoefficient[j], cpu_modPower(i + 1, j + 1)));//pow(i+1,j+1)--->i+1= j+1= } cpu_ans[i] = cpu_modSum(cpu_ans[i], data[h]); } end2 = clock(); costTime_encode = costTime_encode + ((double)(end2 - start2) / CLK_TCK); //Decover---------------------------------------------------------------------- start2 = clock(); L_Product = 1; for (i = 0; i < k; i++) { //K L_Product = 1; // for (j = 0; j < k; j++) { if (i == j)continue; L_Product = cpu_modProduct(L_Product, cpu_modQuotient(j + 1, cpu_modDifference(j + 1, i + 1))); } L_Product = cpu_modProduct(cpu_ans[i + 1 - 1], L_Product); L_end[h] = cpu_modSum(L_end[h], L_Product); } end2 = clock(); costTime_decode = costTime_decode + ((double)(end2 - start2) / CLK_TCK); } for (i = 0; i <2 ; i++) { cout <<"CPU_data[" << i << "]:" << L_end[i] << endl; } cout<<endl<<"2:-------------------------" << endl; for (i = dataDim - 1; i >dataDim - 3; i--) { cout << "CPU_data[" << i << "]:" << L_end[i] << endl; } cout << " " << costTime_encode << " \n"; cout << " " << costTime_decode << " \n"; /*for (i = 0; i < 10; i++) { cout << "Data[" << i << "] " << cpu_data[i] << "\n"; } for (i = 0; i < 10; i++) { cout << "Decode[" << i << "] " << L_end[i] << "\n"; }*/ system("pause"); }
3908f8484391cdc8bc88a919dbe34828ffa29e62.cu
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <curand.h> #include <curand_kernel.h> #include<iostream> #include <windows.h> #define GF 256 #define PP 301 using namespace std; //以__global__開頭的函式,代表CUDA_square這個函式是由GPU執行 //---------------------------------------------- //加密 int *Log = new int[256]; int *ALog = new int[256]; void FillLogArrays(void) { ALog[0] = 1; Log[0] = 1 - GF; for (int i = 1; i<GF; i++) { ALog[i] = ALog[i - 1] * 2; if (ALog[i] >= GF) ALog[i] ^= PP; Log[ALog[i]] = i; } } //CPU函數---------------------------------------------------------- int cpu_modProduct(int A, int B) { if ((A == 0) || (B == 0)) return (0); else { return (ALog[(Log[A] + Log[B]) % (GF - 1)]); } } int cpu_modPower(int x, int a) { if (a == 0)return 1; else return cpu_modProduct(x, cpu_modPower(x, a - 1));//x*power(x,a-1); } int cpu_modQuotient(int A, int B)// namely A divided by B { if (B == 0) return (1 - GF); else if (A == 0) return (0); else { return (ALog[(Log[A] - Log[B] + (GF - 1)) % (GF - 1)]); } } int cpu_modSum(int A, int B) { return (A ^ B); } int cpu_modDifference(int A, int B) { return (A ^ B); } //---------------------------------------------------------------- __device__ int modProduct(int A, int B, int * gpu_Log, int * gpu_ALog) { if ((A == 0) || (B == 0)) return (0); else { return (gpu_ALog[((gpu_Log[A] + gpu_Log[B]) % (GF - 1))]); } } __device__ int modPower(int x, int a, int * gpu_Log, int * gpu_ALog) { int test = x; for (int i = 1; i <a; i++) { test = modProduct(x, test, gpu_Log, gpu_ALog); } return test; } __device__ int modQuotient(int A, int B, int * gpu_Log, int * gpu_ALog)// namely A divided by B { if (B == 0) return (1 - GF); else if (A == 0) return (0); else { return (gpu_ALog[(gpu_Log[A] - gpu_Log[B] + (GF - 1)) % (GF - 1)]); } } __device__ int modSum(int A, int B) { return (A ^ B); } __device__ int modDifference(int A, int B) { return (A ^ B); } /*__device__ int datarandom(){ }*/ __global__ void gpu_sharing(int k, int n, int dataDim, int *gpu_dataA, int * gpu_share, int *gpu_Log, int *gpu_ALog, int *gpu_rand) { int block_id = blockIdx.x;//讀取當前block的編號 int thread_id = threadIdx.x; //讀取當前thread的編號 int index = block_id *blockDim.x + thread_id; int *ans = new int[n]; for (int i = 0; i < n; i++) { ans[i] = 0; } //亂數------------------------------------------------------------------------ /* curandState_t state; curand_init(0, index, 0, &state); for (int i = 0; i < k - 1; i++) { //迴圈k-1圈,產生k-1個亂數且小於P之係數 randCoefficient[i] = curand(&state) % 256; }*/ //------------------------------------------------------------------------ for (int h = 0; h < n; h++) { //n份,每一份的常數 for (int j = 0; j < k - 1; j++) { //係數*X的次方數 ans[h] = modSum(ans[h], modProduct(gpu_rand[j], modPower(h + 1, j + 1, gpu_Log, gpu_ALog), gpu_Log, gpu_ALog));//pow(h+1,j+1)--->h+1=基底 j+1=次方數 } gpu_share[h*dataDim + index] = modSum(gpu_dataA[index], ans[h]); //gpu_dataA[index]; //= ans[h]; } free(ans); //gpu_share[index] = gpu_dataB[index]; //gpu_share[index + dataDim] = gpu_share[index]; //gpu_share[index + 2*dataDim] = gpu_share[index]; //gpu_share[index + 4* dataDim] = gpu_share[index]; } __global__ void gpu_desharing(int k, int n, int dataDim, int *gpu_dataB, int * gpu_share, int *gpu_Log, int *gpu_ALog) { int block_id = blockIdx.x;//讀取當前block的編號 int thread_id = threadIdx.x; //讀取當前thread的編號 int index = block_id *blockDim.x + thread_id; float L_end = 0; int L_Product = 1; for (int h = 0; h < k; h++) { //K個還原 L_Product = 1; for (int j = 0; j < k; j++) { if (h == j)continue; L_Product = modProduct(L_Product, modQuotient(j + 1, modDifference(j + 1, h + 1), gpu_Log, gpu_ALog), gpu_Log, gpu_ALog); } L_Product = modProduct(gpu_share[index + h*dataDim], L_Product, gpu_Log, gpu_ALog); L_end = modSum(L_end, L_Product); } gpu_dataB[index] = L_end; } int main() { //輸入threadnum 數和blocknum 數---------------------------------------- int blocknum, threadnum; int k, n; int i, j; cout << "threadnum 數:"; cin >> threadnum; cout << endl; cout << "blocknum 數:"; cin >> blocknum; cout << endl; cout << "k = "; cin >> k; cout << endl; cout << "n = "; cin >> n; cout << endl; int dataDim = blocknum * threadnum; cout << "資料數:" << dataDim << endl; //宣告----------------------------------------------------------- int *gpu_Log; int *gpu_ALog; // int * gpu_k; // int * gpu_n; int * gpu_dataA; //int * share; int * gpu_share; int *gpu_dataB; int *gpu_rand; //記憶體大小----------------------------------------------------------------- int *data = (int*)malloc(sizeof(int)*(dataDim)); int *dataB = (int*)malloc(sizeof(int)*(dataDim)); int *randCoefficient = (int*)malloc(sizeof(int)*k); // int *data2 = (int*) malloc (sizeof(int)*dataDim); int *share = (int*)malloc(sizeof(int)*n*dataDim); //share = new int[n*dataDim]; // for (i = 0; i < n; i++) // share[i] = new int [dataDim]; //灑參數----------------------------------------------------------------- srand(time(NULL)); FillLogArrays(); for (i = 0; i < dataDim; i++) { data[i] = (rand() % 256) + 1; } for (i = 0; i < 2; i++) { cout<<"test data["<<i<<"]:"<<data[i]<<endl; } cout<<endl<< "後2筆:-------------------------"<<endl; for (i = dataDim - 1; i >dataDim - 3; i--) { cout << "test data[" << i << "]:" << data[i] << endl; } cout << endl; for (int x = 0; x < k - 1; x++) { randCoefficient[x] = (rand() % 256) + 1; } //GPU記憶體大小----------------------------------------------------------------- cudaMalloc((int**)&gpu_Log, sizeof(int)*GF); cudaMalloc((int**)&gpu_ALog, sizeof(int)*GF); cudaMalloc((int**)&gpu_dataA, sizeof(int)*dataDim); cudaMalloc((int**)&gpu_share, sizeof(int)*n*dataDim); cudaMalloc((int**)&gpu_rand, sizeof(int)*k); cout << "位元:" << sizeof(int)*k + sizeof(int)*n*dataDim + sizeof(int)*dataDim + sizeof(int)*GF + sizeof(int)*GF << endl; //----------------------------------------------------------------------- /*float time__tran, time__tran2,costime1,costime2; cudaEvent_t start_tran, stop_tran, start_tran2, stop_tran2,time_start,time_end,time_start2, time_end2; cudaEventCreate(&start_tran); cudaEventCreate(&stop_tran); cudaEventCreate(&start_tran2); cudaEventCreate(&stop_tran2); cudaEventCreate(&time_start); cudaEventCreate(&time_end); cudaEventCreate(&time_start2); cudaEventCreate(&time_end2); cudaEventRecord(start_tran, 0);*/ /* for (i = 0; i < n; i++) { for (int j = 0; j < 5; j++) { //printf("GPU_share[%2d]: %d \n ", j, share[i * dataDim + j]);//, Log[j], ALog[j] cout << "TEST GPU_share[" << j << "]" << share[i * dataDim + j] << endl; } printf("\n"); }*/ //GPU加密資料CPU傳到GPU------------------------------------------------------------------------------ cudaMemcpy(gpu_Log, Log, sizeof(int)*GF, cudaMemcpyHostToDevice); cudaMemcpy(gpu_ALog, ALog, sizeof(int)*GF, cudaMemcpyHostToDevice); cudaMemcpy(gpu_dataA, data, sizeof(int)*dataDim, cudaMemcpyHostToDevice); //cudaMemcpy(gpu_dataB, dataB, sizeof(int)*dataDim, cudaMemcpyHostToDevice); cudaMemcpy(gpu_rand, randCoefficient, sizeof(int)*k, cudaMemcpyHostToDevice); cudaMemcpy(gpu_share, share, sizeof(int)*n*dataDim, cudaMemcpyHostToDevice); /*cudaEventRecord(stop_tran, 0); cudaEventSynchronize(stop_tran); cudaEventElapsedTime(&time__tran, start_tran, stop_tran);*/ /* free(data); free(randCoefficient); free(ALog); free(Log);*/ //GPU加密計算---------------------------------------------------------------------------- gpu_sharing << <blocknum, threadnum, 0 >> >(k, n, dataDim, gpu_dataA, gpu_share, gpu_Log, gpu_ALog, gpu_rand); cudaMemcpy(share, gpu_share, sizeof(int)*n*dataDim, cudaMemcpyDeviceToHost); cudaFree(gpu_dataA); cudaFree(gpu_rand); //GPU解密計算---------------------------------------------------------------------------- cudaMalloc((int**)&gpu_dataB, sizeof(int)*dataDim); gpu_desharing << <blocknum, threadnum, 0 >> >(k, n, dataDim, gpu_dataB, gpu_share, gpu_Log, gpu_ALog); cudaThreadSynchronize();//同步 //GPU傳到CPU-------------------------------------------------------------------------------------- // for (i=0; i<n; i++) //GPU解密資料GPU傳到CPU------------------------------------------------------------------------------ cudaMemcpy(dataB, gpu_dataB, sizeof(int)*dataDim, cudaMemcpyDeviceToHost); // cudaMemcpy(data, gpu_dataA, sizeof(int)*dataDim, cudaMemcpyDeviceToHost); // cudaMemcpy(ALog, gpu_ALog, sizeof(int)*256, cudaMemcpyDeviceToHost); //---------------------------------------------------------------------------------------------------------- /*cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cout << "秒:" << elapsed_time_ms / 1000 << endl;*/ //輸出--------------------------------------------------------------------- //printf("CPU %f \n", L_end);//GPU cout << "-------------------------"<<endl; for (i = 0; i < n; i++) { for (int j = 0; j < 2; j++) { cout << "GPU_share[" << j << "]" << share[i * dataDim + j]<<endl;//printf("GPU_share[%2d]: %d \n ", j, share[i * dataDim + j]);//, Log[j], ALog[j] } printf("\n"); } cout << endl << "後2筆:-------------------------" << endl; for (i = 0; i < n; i++) { for (int j = dataDim-1; j >dataDim-3; j--) { //printf("GPU_share[%2d]: %d \n ", j, share[i * dataDim + j]);//, Log[j], ALog[j] cout << "GPU_share[" << j << "]" << share[i * dataDim + j] << endl; } printf("\n"); } cout << "----------------------" << endl; for (i = 0; i < 2; i++) { cout << "GPU_data[" << i << "]:" << dataB[i] << endl; } cout << endl << "後2筆:-------------------------" << endl; for (i = dataDim-1; i >dataDim-3; i--) { cout << "GPU_data[" << i << "]:" << dataB[i] << endl; } //cout << "秒:" << elapsed_time_ms/1000 << endl; cout << "----------------------" << endl; cudaFree(gpu_Log); cudaFree(gpu_ALog); cudaFree(gpu_dataB); cudaFree(gpu_share); //CPU加密解密運算---------------------------------------------------------------------- //printf("CPU 加密與解密時間\n"); clock_t end2; clock_t start2; double costTime_encode = 0; double costTime_decode = 0; int* cpu_ans = new int[n]; int *L_end = new int[dataDim]; for (int i = 0; i < dataDim; i++) { L_end[i] = 0; } int L_Product = 1; for (int h = 0; h < dataDim; h++) { start2 = clock(); for (j = 0; j < n; j++) { cpu_ans[j] = 0; } for (i = 0; i < n; i++) { //n份,每一份的常數 for (j = 0; j < k - 1; j++) { //係數*X的次方數 cpu_ans[i] = cpu_modSum(cpu_ans[i], cpu_modProduct(randCoefficient[j], cpu_modPower(i + 1, j + 1)));//pow(i+1,j+1)--->i+1=基底 j+1=次方數 } cpu_ans[i] = cpu_modSum(cpu_ans[i], data[h]); } end2 = clock(); costTime_encode = costTime_encode + ((double)(end2 - start2) / CLK_TCK); //Decover---------------------------------------------------------------------- start2 = clock(); L_Product = 1; for (i = 0; i < k; i++) { //K個還原 L_Product = 1; //累乘基底 for (j = 0; j < k; j++) { if (i == j)continue; L_Product = cpu_modProduct(L_Product, cpu_modQuotient(j + 1, cpu_modDifference(j + 1, i + 1))); } L_Product = cpu_modProduct(cpu_ans[i + 1 - 1], L_Product); L_end[h] = cpu_modSum(L_end[h], L_Product); } end2 = clock(); costTime_decode = costTime_decode + ((double)(end2 - start2) / CLK_TCK); } for (i = 0; i <2 ; i++) { cout <<"CPU_data[" << i << "]:" << L_end[i] << endl; } cout<<endl<<"後2筆:-------------------------" << endl; for (i = dataDim - 1; i >dataDim - 3; i--) { cout << "CPU_data[" << i << "]:" << L_end[i] << endl; } cout << "加密共 : " << costTime_encode << " 秒\n"; cout << "解密共 : " << costTime_decode << " 秒\n"; /*for (i = 0; i < 10; i++) { cout << "Data[" << i << "] :" << cpu_data[i] << "\n"; } for (i = 0; i < 10; i++) { cout << "Decode[" << i << "] :" << L_end[i] << "\n"; }*/ system("pause"); }
234c38cb82b5908c30f3473cf7ed3827f4ca8e7e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cmath> __global__ void add(int n, float **matr1, float **matr2, float **resMatr) { long i = blockIdx.x*blockDim.x + threadIdx.x; long total = n*n*n; if (i < total) { long number = i; long common = number % n; number = number / n; long column2 = number % n; number = number / n; long row1 = number; atomicAdd(&resMatr[row1][column2], matr1[row1][common] * matr2[common][column2]); } } int main(void) { int N = 2; int total = N*N*N; // to allocate the space in Unified memory we use float **matr1, **matr2, **resMatr; hipMallocManaged(&matr1, N * sizeof(float*)); hipMallocManaged(&matr2, N * sizeof(float*)); hipMallocManaged(&resMatr, N * sizeof(float*)); hipMallocManaged(&tests, total); for (int i = 0; i < N; i++) { hipMallocManaged(&(matr1[i]), N * sizeof(float)); hipMallocManaged(&(matr2[i]), N * sizeof(float)); hipMallocManaged(&(resMatr[i]), N * sizeof(float)); } // initialize x and y arrays on the host for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { matr1[i][j] = 1.0f; matr2[i][j] = 2.0f; resMatr[i][j] = 0.0f; } } //the number of threads as of now should be equal to total, or N^3 hipLaunchKernelGGL(( add), dim3(1), dim3(32), 0, 0, N, matr1, matr2, resMatr); // Wait for the GPU to finish before accessing on host hipDeviceSynchronize(); for (int i = 0; i < N; i++) { hipFree(matr1[i]); hipFree(matr2[i]); } hipFree(matr1); hipFree(matr2); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { std::cout << resMatr[i][j] << "\n"; } } return 0; } /* //alternative versions //realized too late that this hash function is not optimal. Now it works but needs a better hash function. I basically multiplied the complexity by a factor of n //I wanted each thread to manage a single scalar multiplication, but at this point it would be easier to just let each thread manage one row and one column // the idea remains the same, but the hash function n^2 and not n^4 //actually it would be interesting to see which one is faster, I will develop them both and compare the results __global__ void add(int n, float **matr1, float **matr2, float **resMatr) { long total = n*n; if (idx < total) { long column 2 = idx % n; long row1 = number / n; for (int i = 0; int < n; i++) { resMatr[row1][column2] = resMatr[row1][column2] + (matr1[row1][i] * matr2[i][column2]); } } } __global__ void add(int n, float **matr1, float **matr2, float **resMatr) { // this should work. Also it does not need to be synchronized since every cell of the output is managed by a single thread. // will compare the 2 functions ASAP long i = blockIdx.x*blockDim.x + threadIdx.x; long total = n*n*n*n; if (i < total) { long number = i; long column2 = number % n; number = number / n; long row2 = number % n; number = number / n; long column1 = number % n; number = number / n; long row1 = number; if(column1 == row2 )atomicAdd(&resMatr[row1][column2], matr1[row1][column1] * matr2[row2][column2]); } } */
234c38cb82b5908c30f3473cf7ed3827f4ca8e7e.cu
#include <iostream> #include <cmath> __global__ void add(int n, float **matr1, float **matr2, float **resMatr) { long i = blockIdx.x*blockDim.x + threadIdx.x; long total = n*n*n; if (i < total) { long number = i; long common = number % n; number = number / n; long column2 = number % n; number = number / n; long row1 = number; atomicAdd(&resMatr[row1][column2], matr1[row1][common] * matr2[common][column2]); } } int main(void) { int N = 2; int total = N*N*N; // to allocate the space in Unified memory we use float **matr1, **matr2, **resMatr; cudaMallocManaged(&matr1, N * sizeof(float*)); cudaMallocManaged(&matr2, N * sizeof(float*)); cudaMallocManaged(&resMatr, N * sizeof(float*)); cudaMallocManaged(&tests, total); for (int i = 0; i < N; i++) { cudaMallocManaged(&(matr1[i]), N * sizeof(float)); cudaMallocManaged(&(matr2[i]), N * sizeof(float)); cudaMallocManaged(&(resMatr[i]), N * sizeof(float)); } // initialize x and y arrays on the host for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { matr1[i][j] = 1.0f; matr2[i][j] = 2.0f; resMatr[i][j] = 0.0f; } } //the number of threads as of now should be equal to total, or N^3 add<<<1, 32>>>(N, matr1, matr2, resMatr); // Wait for the GPU to finish before accessing on host cudaDeviceSynchronize(); for (int i = 0; i < N; i++) { cudaFree(matr1[i]); cudaFree(matr2[i]); } cudaFree(matr1); cudaFree(matr2); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { std::cout << resMatr[i][j] << "\n"; } } return 0; } /* //alternative versions //realized too late that this hash function is not optimal. Now it works but needs a better hash function. I basically multiplied the complexity by a factor of n //I wanted each thread to manage a single scalar multiplication, but at this point it would be easier to just let each thread manage one row and one column // the idea remains the same, but the hash function n^2 and not n^4 //actually it would be interesting to see which one is faster, I will develop them both and compare the results __global__ void add(int n, float **matr1, float **matr2, float **resMatr) { long total = n*n; if (idx < total) { long column 2 = idx % n; long row1 = number / n; for (int i = 0; int < n; i++) { resMatr[row1][column2] = resMatr[row1][column2] + (matr1[row1][i] * matr2[i][column2]); } } } __global__ void add(int n, float **matr1, float **matr2, float **resMatr) { // this should work. Also it does not need to be synchronized since every cell of the output is managed by a single thread. // will compare the 2 functions ASAP long i = blockIdx.x*blockDim.x + threadIdx.x; long total = n*n*n*n; if (i < total) { long number = i; long column2 = number % n; number = number / n; long row2 = number % n; number = number / n; long column1 = number % n; number = number / n; long row1 = number; if(column1 == row2 )atomicAdd(&resMatr[row1][column2], matr1[row1][column1] * matr2[row2][column2]); } } */