hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
13aa3769a2611ae4d3ba511ad6eb6fedd79c094d.hip | // !!! This is a file automatically generated by hipify!!!
#define FP double
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <math.h>
// A: n * p, B: p * m, C: n * m
__global__ void gpu_matrixmult(FP *a, FP *b, FP *c, int n, int p, int m, int TW) {
extern __shared__ FP bigarray[];
FP *atile = &bigarray[0], *btile = &bigarray[TW * TW];
int tx = threadIdx.x; int ty = threadIdx.y; FP cvalue = 0.;
// current thread is responsible for computing the entry at (row, col) of C
int col = tx + blockDim.x * blockIdx.x;
int row = ty + blockDim.y * blockIdx.y;
for (int tile = 0; tile < p / TW; tile++) {
int indexa = row * p + tile * TW + tx; // current thread caches this entry of A
int indexb = (tile * TW + ty) * m + col; // current thread caches this entry of B
atile[ty * TW + tx] = (tile * TW + tx < p && row < n) ? a[indexa] : 0.;
btile[ty * TW + tx] = (tile * TW + ty < p && col < m) ? b[indexb] : 0.;
__syncthreads(); // make sure tiles are loaded before all threads can use them
for (int k = 0; k < TW; k++) cvalue += atile[ty * TW + k] * btile[k * TW + tx];
__syncthreads(); // make sure were done using the tiles before overwriting them
}
if (col < m && row < n) {
c[row * m + col] = cvalue;
}
}
int main(int argc, char *argv[]) {
int i, j; // loop counters
int gpucount = 0; // Count of available GPUs
int gpunum = 0; // Device number to use
int Grid_Dim_x = 1; // Grid dimension, x
int Grid_Dim_y = 1; // Grid dimension, y
int Block_Dim_x = 1; // Block dimension, x
int Block_Dim_y = 1; // Block dimension, y
int TW;
int n, p, m; // matrix dimension (A: n * p, B: p * m, C: n * m)
FP *a, *b, *c;
FP *dev_a, *dev_b, *dev_c;
int sizeA, sizeB, sizeC; // number of bytes in arrays
hipEvent_t start, stop; // using cuda events to measure time
float elapsed_time_ms; // which is applicable for asynchronous code also
hipError_t errorcode;
// -------------------- SET PARAMETERS AND DATA -----------------------
errorcode = hipGetDeviceCount(&gpucount);
if (errorcode == hipErrorNoDevice) {
printf("No GPUs are visible\n");
exit(-1);
}
else {
printf("Device count = %d\n", gpucount);
}
if (argc != 5) {
printf("Usage: matmul-1a <matrix dim n> <matrix dim p> <matrix dim m> <block dim> \n");
exit (-1);
}
n = atoi(argv[1]);
p = atoi(argv[2]);
m = atoi(argv[3]);
Block_Dim_x = atoi(argv[4]); // Square block
Block_Dim_y = Block_Dim_x;
TW = Block_Dim_x; // Simplest case: TW = Block_Dim_x = Block_Dim_y
if (Block_Dim_x * Block_Dim_y > 1024) {
printf("Error, too many threads in block\n");
exit (-1);
}
Grid_Dim_x = m / Block_Dim_x;
Grid_Dim_y = n / Block_Dim_y;
if (Grid_Dim_x * Block_Dim_x < m || Grid_Dim_y * Block_Dim_y < n) {
printf("Error, number of threads in x/y dimensions less than number of array elements\n");
exit(-1);
}
hipSetDevice(gpunum);
printf("Using device %d\n", gpunum);
printf("Matrix Dimension = A (%d, %d), B (%d, %d), C (%d, %d) \n", n, p, p, m, n, m);
printf("Block_Dim = (%d, %d), Grid_Dim = (%d, %d) \n", Block_Dim_x, Block_Dim_y, Grid_Dim_x, Grid_Dim_y);
dim3 Grid(Grid_Dim_x, Grid_Dim_y); // Grid structure
dim3 Block(Block_Dim_x, Block_Dim_y); // Block structure
sizeA = n * p * sizeof(FP);
sizeB = p * m * sizeof(FP);
sizeC = n * m * sizeof(FP);
a = (FP *) malloc(sizeA); // dynamically allocated memory for arrays on host
b = (FP *) malloc(sizeB);
c = (FP *) malloc(sizeC); // results from GPU
srand(12345);
for (i = 0; i < n; i++) {
for (j = 0; j < p; j++) {
a[i * p + j] = (FP) rand() / (FP) RAND_MAX;
}
}
for (i = 0; i < p; i++) {
for (j = 0; j < m; j++) {
b[i * m + j] = (FP) rand() / (FP) RAND_MAX;
}
}
// ------------- COMPUTATION DONE ON GPU ----------------------------
hipMalloc((void**) &dev_a, sizeA); // allocate memory on device
hipMalloc((void**) &dev_b, sizeB);
hipMalloc((void**) &dev_c, sizeC);
hipMemcpy(dev_a, a, sizeA, hipMemcpyHostToDevice); // copy from CPU tp GPU
hipMemcpy(dev_b, b, sizeB, hipMemcpyHostToDevice);
hipEventCreate(&start); // instrument code to measure start time
hipEventCreate(&stop);
hipEventRecord(start, 0);
// hipEventSynchronize(start); // not needed
size_t Ns = 2 * TW * TW * sizeof(FP); // amount of shared momery
hipLaunchKernelGGL(( gpu_matrixmult), dim3(Grid), dim3(Block), Ns, 0, dev_a, dev_b, dev_c, n, p, m, TW);
hipEventRecord(stop, 0); // instrument code to measure end time
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop);
hipMemcpy(c, dev_c, sizeC, hipMemcpyDeviceToHost); // copy from GPU to CPU
printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time
// ----------------------------- clean up ------------------------------
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 13aa3769a2611ae4d3ba511ad6eb6fedd79c094d.cu | #define FP double
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <math.h>
// A: n * p, B: p * m, C: n * m
__global__ void gpu_matrixmult(FP *a, FP *b, FP *c, int n, int p, int m, int TW) {
extern __shared__ FP bigarray[];
FP *atile = &bigarray[0], *btile = &bigarray[TW * TW];
int tx = threadIdx.x; int ty = threadIdx.y; FP cvalue = 0.;
// current thread is responsible for computing the entry at (row, col) of C
int col = tx + blockDim.x * blockIdx.x;
int row = ty + blockDim.y * blockIdx.y;
for (int tile = 0; tile < p / TW; tile++) {
int indexa = row * p + tile * TW + tx; // current thread caches this entry of A
int indexb = (tile * TW + ty) * m + col; // current thread caches this entry of B
atile[ty * TW + tx] = (tile * TW + tx < p && row < n) ? a[indexa] : 0.;
btile[ty * TW + tx] = (tile * TW + ty < p && col < m) ? b[indexb] : 0.;
__syncthreads(); // make sure tiles are loaded before all threads can use them
for (int k = 0; k < TW; k++) cvalue += atile[ty * TW + k] * btile[k * TW + tx];
__syncthreads(); // make sure we’re done using the tiles before overwriting them
}
if (col < m && row < n) {
c[row * m + col] = cvalue;
}
}
int main(int argc, char *argv[]) {
int i, j; // loop counters
int gpucount = 0; // Count of available GPUs
int gpunum = 0; // Device number to use
int Grid_Dim_x = 1; // Grid dimension, x
int Grid_Dim_y = 1; // Grid dimension, y
int Block_Dim_x = 1; // Block dimension, x
int Block_Dim_y = 1; // Block dimension, y
int TW;
int n, p, m; // matrix dimension (A: n * p, B: p * m, C: n * m)
FP *a, *b, *c;
FP *dev_a, *dev_b, *dev_c;
int sizeA, sizeB, sizeC; // number of bytes in arrays
cudaEvent_t start, stop; // using cuda events to measure time
float elapsed_time_ms; // which is applicable for asynchronous code also
cudaError_t errorcode;
// -------------------- SET PARAMETERS AND DATA -----------------------
errorcode = cudaGetDeviceCount(&gpucount);
if (errorcode == cudaErrorNoDevice) {
printf("No GPUs are visible\n");
exit(-1);
}
else {
printf("Device count = %d\n", gpucount);
}
if (argc != 5) {
printf("Usage: matmul-1a <matrix dim n> <matrix dim p> <matrix dim m> <block dim> \n");
exit (-1);
}
n = atoi(argv[1]);
p = atoi(argv[2]);
m = atoi(argv[3]);
Block_Dim_x = atoi(argv[4]); // Square block
Block_Dim_y = Block_Dim_x;
TW = Block_Dim_x; // Simplest case: TW = Block_Dim_x = Block_Dim_y
if (Block_Dim_x * Block_Dim_y > 1024) {
printf("Error, too many threads in block\n");
exit (-1);
}
Grid_Dim_x = m / Block_Dim_x;
Grid_Dim_y = n / Block_Dim_y;
if (Grid_Dim_x * Block_Dim_x < m || Grid_Dim_y * Block_Dim_y < n) {
printf("Error, number of threads in x/y dimensions less than number of array elements\n");
exit(-1);
}
cudaSetDevice(gpunum);
printf("Using device %d\n", gpunum);
printf("Matrix Dimension = A (%d, %d), B (%d, %d), C (%d, %d) \n", n, p, p, m, n, m);
printf("Block_Dim = (%d, %d), Grid_Dim = (%d, %d) \n", Block_Dim_x, Block_Dim_y, Grid_Dim_x, Grid_Dim_y);
dim3 Grid(Grid_Dim_x, Grid_Dim_y); // Grid structure
dim3 Block(Block_Dim_x, Block_Dim_y); // Block structure
sizeA = n * p * sizeof(FP);
sizeB = p * m * sizeof(FP);
sizeC = n * m * sizeof(FP);
a = (FP *) malloc(sizeA); // dynamically allocated memory for arrays on host
b = (FP *) malloc(sizeB);
c = (FP *) malloc(sizeC); // results from GPU
srand(12345);
for (i = 0; i < n; i++) {
for (j = 0; j < p; j++) {
a[i * p + j] = (FP) rand() / (FP) RAND_MAX;
}
}
for (i = 0; i < p; i++) {
for (j = 0; j < m; j++) {
b[i * m + j] = (FP) rand() / (FP) RAND_MAX;
}
}
// ------------- COMPUTATION DONE ON GPU ----------------------------
cudaMalloc((void**) &dev_a, sizeA); // allocate memory on device
cudaMalloc((void**) &dev_b, sizeB);
cudaMalloc((void**) &dev_c, sizeC);
cudaMemcpy(dev_a, a, sizeA, cudaMemcpyHostToDevice); // copy from CPU tp GPU
cudaMemcpy(dev_b, b, sizeB, cudaMemcpyHostToDevice);
cudaEventCreate(&start); // instrument code to measure start time
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// cudaEventSynchronize(start); // not needed
size_t Ns = 2 * TW * TW * sizeof(FP); // amount of shared momery
gpu_matrixmult<<<Grid, Block, Ns>>>(dev_a, dev_b, dev_c, n, p, m, TW);
cudaEventRecord(stop, 0); // instrument code to measure end time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
cudaMemcpy(c, dev_c, sizeC, cudaMemcpyDeviceToHost); // copy from GPU to CPU
printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time
// ----------------------------- clean up ------------------------------
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
6ce23759c0eec0ca53a738691d54d60150a18ce6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: echo "GPU binary would be here" > %t
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \
// RUN: -fcuda-include-gpubinary %t -o - -x hip\
// RUN: | FileCheck -check-prefixes=CHECK,GNU %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \
// RUN: -fcuda-include-gpubinary %t -o - -x hip\
// RUN: | FileCheck -check-prefix=NEG %s
// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm %s \
// RUN: -aux-triple amdgcn-amd-amdhsa -fcuda-include-gpubinary \
// RUN: %t -o - -x hip\
// RUN: | FileCheck -check-prefixes=CHECK,MSVC %s
// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm %s \
// RUN: -aux-triple amdgcn-amd-amdhsa -fcuda-include-gpubinary \
// RUN: %t -o - -x hip\
// RUN: | FileCheck -check-prefix=NEG %s
#include "Inputs/cuda.h"
// Check kernel handles are emitted for non-MSVC target but not for MSVC target.
// GNU: @[[HCKERN:ckernel]] = constant void ()* @[[CSTUB:__device_stub__ckernel]], align 8
// GNU: @[[HNSKERN:_ZN2ns8nskernelEv]] = constant void ()* @[[NSSTUB:_ZN2ns23__device_stub__nskernelEv]], align 8
// GNU: @[[HTKERN:_Z10kernelfuncIiEvv]] = linkonce_odr constant void ()* @[[TSTUB:_Z25__device_stub__kernelfuncIiEvv]], comdat, align 8
// GNU: @[[HDKERN:_Z11kernel_declv]] = external constant void ()*, align 8
// MSVC: @[[HCKERN:ckernel]] = dso_local constant void ()* @[[CSTUB:__device_stub__ckernel]], align 8
// MSVC: @[[HNSKERN:"\?nskernel@ns@@YAXXZ.*"]] = dso_local constant void ()* @[[NSSTUB:"\?__device_stub__nskernel@ns@@YAXXZ"]], align 8
// MSVC: @[[HTKERN:"\?\?\$kernelfunc@H@@YAXXZ.*"]] = linkonce_odr dso_local constant void ()* @[[TSTUB:"\?\?\$__device_stub__kernelfunc@H@@YAXXZ.*"]], comdat, align 8
// MSVC: @[[HDKERN:"\?kernel_decl@@YAXXZ.*"]] = external dso_local constant void ()*, align 8
extern "C" __global__ void ckernel() {}
namespace ns {
__global__ void nskernel() {}
} // namespace ns
template<class T>
__global__ void kernelfunc() {}
__global__ void kernel_decl();
extern "C" void (*kernel_ptr)();
extern "C" void *void_ptr;
extern "C" void launch(void *kern);
// Device side kernel names
// CHECK: @[[CKERN:[0-9]*]] = {{.*}} c"ckernel\00"
// CHECK: @[[NSKERN:[0-9]*]] = {{.*}} c"_ZN2ns8nskernelEv\00"
// CHECK: @[[TKERN:[0-9]*]] = {{.*}} c"_Z10kernelfuncIiEvv\00"
// Non-template kernel stub functions
// CHECK: define{{.*}}@[[CSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HCKERN]]
// CHECK: define{{.*}}@[[NSSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HNSKERN]]
// Check kernel stub is called for triple chevron.
// CHECK-LABEL: define{{.*}}@fun1()
// CHECK: call void @[[CSTUB]]()
// CHECK: call void @[[NSSTUB]]()
// CHECK: call void @[[TSTUB]]()
// GNU: call void @[[DSTUB:_Z26__device_stub__kernel_declv]]()
// MSVC: call void @[[DSTUB:"\?__device_stub__kernel_decl@@YAXXZ"]]()
extern "C" void fun1(void) {
hipLaunchKernelGGL(( ckernel), dim3(1), dim3(1), 0, 0, );
hipLaunchKernelGGL(( ns::nskernel), dim3(1), dim3(1), 0, 0, );
hipLaunchKernelGGL(( kernelfunc<int>), dim3(1), dim3(1), 0, 0, );
hipLaunchKernelGGL(( kernel_decl), dim3(1), dim3(1), 0, 0, );
}
// Template kernel stub functions
// CHECK: define{{.*}}@[[TSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HTKERN]]
// Check declaration of stub function for external kernel.
// CHECK: declare{{.*}}@[[DSTUB]]
// Check kernel handle is used for passing the kernel as a function pointer.
// CHECK-LABEL: define{{.*}}@fun2()
// CHECK: call void @launch({{.*}}[[HCKERN]]
// CHECK: call void @launch({{.*}}[[HNSKERN]]
// CHECK: call void @launch({{.*}}[[HTKERN]]
// CHECK: call void @launch({{.*}}[[HDKERN]]
extern "C" void fun2() {
launch((void *)ckernel);
launch((void *)ns::nskernel);
launch((void *)kernelfunc<int>);
launch((void *)kernel_decl);
}
// Check kernel handle is used for assigning a kernel to a function pointer.
// CHECK-LABEL: define{{.*}}@fun3()
// CHECK: store void ()* bitcast (void ()** @[[HCKERN]] to void ()*), void ()** @kernel_ptr, align 8
// CHECK: store void ()* bitcast (void ()** @[[HCKERN]] to void ()*), void ()** @kernel_ptr, align 8
// CHECK: store i8* bitcast (void ()** @[[HCKERN]] to i8*), i8** @void_ptr, align 8
// CHECK: store i8* bitcast (void ()** @[[HCKERN]] to i8*), i8** @void_ptr, align 8
extern "C" void fun3() {
kernel_ptr = ckernel;
kernel_ptr = &ckernel;
void_ptr = (void *)ckernel;
void_ptr = (void *)&ckernel;
}
// Check kernel stub is loaded from kernel handle when function pointer is
// used with triple chevron.
// CHECK-LABEL: define{{.*}}@fun4()
// CHECK: store void ()* bitcast (void ()** @[[HCKERN]] to void ()*), void ()** @kernel_ptr
// CHECK: call noundef i32 @{{.*hipConfigureCall}}
// CHECK: %[[HANDLE:.*]] = load void ()*, void ()** @kernel_ptr, align 8
// CHECK: %[[CAST:.*]] = bitcast void ()* %[[HANDLE]] to void ()**
// CHECK: %[[STUB:.*]] = load void ()*, void ()** %[[CAST]], align 8
// CHECK: call void %[[STUB]]()
extern "C" void fun4() {
kernel_ptr = ckernel;
hipLaunchKernelGGL(( kernel_ptr), dim3(1),dim3(1), 0, 0, );
}
// Check kernel handle is passed to a function.
// CHECK-LABEL: define{{.*}}@fun5()
// CHECK: store void ()* bitcast (void ()** @[[HCKERN]] to void ()*), void ()** @kernel_ptr
// CHECK: %[[HANDLE:.*]] = load void ()*, void ()** @kernel_ptr, align 8
// CHECK: %[[CAST:.*]] = bitcast void ()* %[[HANDLE]] to i8*
// CHECK: call void @launch(i8* noundef %[[CAST]])
extern "C" void fun5() {
kernel_ptr = ckernel;
launch((void *)kernel_ptr);
}
// Check kernel handle is registered.
// CHECK-LABEL: define{{.*}}@__hip_register_globals
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HCKERN]]{{.*}}@[[CKERN]]
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HNSKERN]]{{.*}}@[[NSKERN]]
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HTKERN]]{{.*}}@[[TKERN]]
// NEG-NOT: call{{.*}}@__hipRegisterFunction{{.*}}__device_stub
// NEG-NOT: call{{.*}}@__hipRegisterFunction{{.*}}kernel_decl
| 6ce23759c0eec0ca53a738691d54d60150a18ce6.cu | // RUN: echo "GPU binary would be here" > %t
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \
// RUN: -fcuda-include-gpubinary %t -o - -x hip\
// RUN: | FileCheck -check-prefixes=CHECK,GNU %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \
// RUN: -fcuda-include-gpubinary %t -o - -x hip\
// RUN: | FileCheck -check-prefix=NEG %s
// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm %s \
// RUN: -aux-triple amdgcn-amd-amdhsa -fcuda-include-gpubinary \
// RUN: %t -o - -x hip\
// RUN: | FileCheck -check-prefixes=CHECK,MSVC %s
// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm %s \
// RUN: -aux-triple amdgcn-amd-amdhsa -fcuda-include-gpubinary \
// RUN: %t -o - -x hip\
// RUN: | FileCheck -check-prefix=NEG %s
#include "Inputs/cuda.h"
// Check kernel handles are emitted for non-MSVC target but not for MSVC target.
// GNU: @[[HCKERN:ckernel]] = constant void ()* @[[CSTUB:__device_stub__ckernel]], align 8
// GNU: @[[HNSKERN:_ZN2ns8nskernelEv]] = constant void ()* @[[NSSTUB:_ZN2ns23__device_stub__nskernelEv]], align 8
// GNU: @[[HTKERN:_Z10kernelfuncIiEvv]] = linkonce_odr constant void ()* @[[TSTUB:_Z25__device_stub__kernelfuncIiEvv]], comdat, align 8
// GNU: @[[HDKERN:_Z11kernel_declv]] = external constant void ()*, align 8
// MSVC: @[[HCKERN:ckernel]] = dso_local constant void ()* @[[CSTUB:__device_stub__ckernel]], align 8
// MSVC: @[[HNSKERN:"\?nskernel@ns@@YAXXZ.*"]] = dso_local constant void ()* @[[NSSTUB:"\?__device_stub__nskernel@ns@@YAXXZ"]], align 8
// MSVC: @[[HTKERN:"\?\?\$kernelfunc@H@@YAXXZ.*"]] = linkonce_odr dso_local constant void ()* @[[TSTUB:"\?\?\$__device_stub__kernelfunc@H@@YAXXZ.*"]], comdat, align 8
// MSVC: @[[HDKERN:"\?kernel_decl@@YAXXZ.*"]] = external dso_local constant void ()*, align 8
extern "C" __global__ void ckernel() {}
namespace ns {
__global__ void nskernel() {}
} // namespace ns
template<class T>
__global__ void kernelfunc() {}
__global__ void kernel_decl();
extern "C" void (*kernel_ptr)();
extern "C" void *void_ptr;
extern "C" void launch(void *kern);
// Device side kernel names
// CHECK: @[[CKERN:[0-9]*]] = {{.*}} c"ckernel\00"
// CHECK: @[[NSKERN:[0-9]*]] = {{.*}} c"_ZN2ns8nskernelEv\00"
// CHECK: @[[TKERN:[0-9]*]] = {{.*}} c"_Z10kernelfuncIiEvv\00"
// Non-template kernel stub functions
// CHECK: define{{.*}}@[[CSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HCKERN]]
// CHECK: define{{.*}}@[[NSSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HNSKERN]]
// Check kernel stub is called for triple chevron.
// CHECK-LABEL: define{{.*}}@fun1()
// CHECK: call void @[[CSTUB]]()
// CHECK: call void @[[NSSTUB]]()
// CHECK: call void @[[TSTUB]]()
// GNU: call void @[[DSTUB:_Z26__device_stub__kernel_declv]]()
// MSVC: call void @[[DSTUB:"\?__device_stub__kernel_decl@@YAXXZ"]]()
extern "C" void fun1(void) {
ckernel<<<1, 1>>>();
ns::nskernel<<<1, 1>>>();
kernelfunc<int><<<1, 1>>>();
kernel_decl<<<1, 1>>>();
}
// Template kernel stub functions
// CHECK: define{{.*}}@[[TSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HTKERN]]
// Check declaration of stub function for external kernel.
// CHECK: declare{{.*}}@[[DSTUB]]
// Check kernel handle is used for passing the kernel as a function pointer.
// CHECK-LABEL: define{{.*}}@fun2()
// CHECK: call void @launch({{.*}}[[HCKERN]]
// CHECK: call void @launch({{.*}}[[HNSKERN]]
// CHECK: call void @launch({{.*}}[[HTKERN]]
// CHECK: call void @launch({{.*}}[[HDKERN]]
extern "C" void fun2() {
launch((void *)ckernel);
launch((void *)ns::nskernel);
launch((void *)kernelfunc<int>);
launch((void *)kernel_decl);
}
// Check kernel handle is used for assigning a kernel to a function pointer.
// CHECK-LABEL: define{{.*}}@fun3()
// CHECK: store void ()* bitcast (void ()** @[[HCKERN]] to void ()*), void ()** @kernel_ptr, align 8
// CHECK: store void ()* bitcast (void ()** @[[HCKERN]] to void ()*), void ()** @kernel_ptr, align 8
// CHECK: store i8* bitcast (void ()** @[[HCKERN]] to i8*), i8** @void_ptr, align 8
// CHECK: store i8* bitcast (void ()** @[[HCKERN]] to i8*), i8** @void_ptr, align 8
extern "C" void fun3() {
kernel_ptr = ckernel;
kernel_ptr = &ckernel;
void_ptr = (void *)ckernel;
void_ptr = (void *)&ckernel;
}
// Check kernel stub is loaded from kernel handle when function pointer is
// used with triple chevron.
// CHECK-LABEL: define{{.*}}@fun4()
// CHECK: store void ()* bitcast (void ()** @[[HCKERN]] to void ()*), void ()** @kernel_ptr
// CHECK: call noundef i32 @{{.*hipConfigureCall}}
// CHECK: %[[HANDLE:.*]] = load void ()*, void ()** @kernel_ptr, align 8
// CHECK: %[[CAST:.*]] = bitcast void ()* %[[HANDLE]] to void ()**
// CHECK: %[[STUB:.*]] = load void ()*, void ()** %[[CAST]], align 8
// CHECK: call void %[[STUB]]()
extern "C" void fun4() {
kernel_ptr = ckernel;
kernel_ptr<<<1,1>>>();
}
// Check kernel handle is passed to a function.
// CHECK-LABEL: define{{.*}}@fun5()
// CHECK: store void ()* bitcast (void ()** @[[HCKERN]] to void ()*), void ()** @kernel_ptr
// CHECK: %[[HANDLE:.*]] = load void ()*, void ()** @kernel_ptr, align 8
// CHECK: %[[CAST:.*]] = bitcast void ()* %[[HANDLE]] to i8*
// CHECK: call void @launch(i8* noundef %[[CAST]])
extern "C" void fun5() {
kernel_ptr = ckernel;
launch((void *)kernel_ptr);
}
// Check kernel handle is registered.
// CHECK-LABEL: define{{.*}}@__hip_register_globals
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HCKERN]]{{.*}}@[[CKERN]]
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HNSKERN]]{{.*}}@[[NSKERN]]
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HTKERN]]{{.*}}@[[TKERN]]
// NEG-NOT: call{{.*}}@__hipRegisterFunction{{.*}}__device_stub
// NEG-NOT: call{{.*}}@__hipRegisterFunction{{.*}}kernel_decl
|
9e3457b4e8c541f7df218c362f8b7841a087b212.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2020 by XGBoost Contributors
*/
#include <thrust/unique.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/binary_search.h>
#include <thrust/transform_scan.h>
#include <thrust/execution_policy.h>
#include <memory>
#include <utility>
#include "xgboost/span.h"
#include "quantile.h"
#include "quantile.cuh"
#include "hist_util.h"
#include "device_helpers_hip.cuh"
#include "categorical.h"
#include "common.h"
namespace xgboost {
namespace common {
using WQSketch = HostSketchContainer::WQSketch;
using SketchEntry = WQSketch::Entry;
// Algorithm 4 in XGBoost's paper, using binary search to find i.
template <typename EntryIter>
__device__ SketchEntry BinarySearchQuery(EntryIter beg, EntryIter end, float rank) {
assert(end - beg >= 2);
rank *= 2;
auto front = *beg;
if (rank < front.rmin + front.rmax) {
return *beg;
}
auto back = *(end - 1);
if (rank >= back.rmin + back.rmax) {
return back;
}
auto search_begin = dh::MakeTransformIterator<float>(
beg, [=] __device__(SketchEntry const &entry) {
return entry.rmin + entry.rmax;
});
auto search_end = search_begin + (end - beg);
auto i =
thrust::upper_bound(thrust::seq, search_begin + 1, search_end - 1, rank) -
search_begin - 1;
if (rank < (*(beg + i)).RMinNext() + (*(beg + i + 1)).RMaxPrev()) {
return *(beg + i);
} else {
return *(beg + i + 1);
}
}
template <typename InEntry, typename ToSketchEntry>
void PruneImpl(int device,
common::Span<SketchContainer::OffsetT const> cuts_ptr,
Span<InEntry const> sorted_data,
Span<size_t const> columns_ptr_in, // could be ptr for data or cuts
Span<FeatureType const> feature_types,
Span<SketchEntry> out_cuts,
ToSketchEntry to_sketch_entry) {
dh::LaunchN(device, out_cuts.size(), [=] __device__(size_t idx) {
size_t column_id = dh::SegmentId(cuts_ptr, idx);
auto out_column = out_cuts.subspan(
cuts_ptr[column_id], cuts_ptr[column_id + 1] - cuts_ptr[column_id]);
auto in_column = sorted_data.subspan(columns_ptr_in[column_id],
columns_ptr_in[column_id + 1] -
columns_ptr_in[column_id]);
auto to = cuts_ptr[column_id + 1] - cuts_ptr[column_id];
idx -= cuts_ptr[column_id];
auto front = to_sketch_entry(0ul, in_column, column_id);
auto back = to_sketch_entry(in_column.size() - 1, in_column, column_id);
auto is_cat = IsCat(feature_types, column_id);
if (in_column.size() <= to || is_cat) {
// cut idx equals sample idx
out_column[idx] = to_sketch_entry(idx, in_column, column_id);
return;
}
// 1 thread for each output. See A.4 for detail.
auto d_out = out_column;
if (idx == 0) {
d_out.front() = front;
return;
}
if (idx == to - 1) {
d_out.back() = back;
return;
}
float w = back.rmin - front.rmax;
assert(w != 0);
auto budget = static_cast<float>(d_out.size());
assert(budget != 0);
auto q = ((static_cast<float>(idx) * w) / (static_cast<float>(to) - 1.0f) + front.rmax);
auto it = dh::MakeTransformIterator<SketchEntry>(
thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) {
auto e = to_sketch_entry(idx, in_column, column_id);
return e;
});
d_out[idx] = BinarySearchQuery(it, it + in_column.size(), q);
});
}
template <typename T, typename U>
void CopyTo(Span<T> out, Span<U> src) {
CHECK_EQ(out.size(), src.size());
static_assert(std::is_same<std::remove_cv_t<T>, std::remove_cv_t<T>>::value, "");
dh::safe_cuda(hipMemcpyAsync(out.data(), src.data(),
out.size_bytes(),
hipMemcpyDefault));
}
// Compute the merge path.
common::Span<thrust::tuple<uint64_t, uint64_t>> MergePath(
Span<SketchEntry const> const &d_x, Span<bst_row_t const> const &x_ptr,
Span<SketchEntry const> const &d_y, Span<bst_row_t const> const &y_ptr,
Span<SketchEntry> out, Span<bst_row_t> out_ptr) {
auto x_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple(
dh::MakeTransformIterator<bst_row_t>(
thrust::make_counting_iterator(0ul),
[=] __device__(size_t idx) { return dh::SegmentId(x_ptr, idx); }),
d_x.data()));
auto y_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple(
dh::MakeTransformIterator<bst_row_t>(
thrust::make_counting_iterator(0ul),
[=] __device__(size_t idx) { return dh::SegmentId(y_ptr, idx); }),
d_y.data()));
using Tuple = thrust::tuple<uint64_t, uint64_t>;
thrust::constant_iterator<uint64_t> a_ind_iter(0ul);
thrust::constant_iterator<uint64_t> b_ind_iter(1ul);
auto place_holder = thrust::make_constant_iterator<uint64_t>(0u);
auto x_merge_val_it =
thrust::make_zip_iterator(thrust::make_tuple(a_ind_iter, place_holder));
auto y_merge_val_it =
thrust::make_zip_iterator(thrust::make_tuple(b_ind_iter, place_holder));
dh::XGBCachingDeviceAllocator<Tuple> alloc;
static_assert(sizeof(Tuple) == sizeof(SketchEntry), "");
// We reuse the memory for storing merge path.
common::Span<Tuple> merge_path{reinterpret_cast<Tuple *>(out.data()), out.size()};
// Determine the merge path, 0 if element is from x, 1 if it's from y.
thrust::merge_by_key(
thrust::hip::par(alloc), x_merge_key_it, x_merge_key_it + d_x.size(),
y_merge_key_it, y_merge_key_it + d_y.size(), x_merge_val_it,
y_merge_val_it, thrust::make_discard_iterator(), merge_path.data(),
[=] __device__(auto const &l, auto const &r) -> bool {
auto l_column_id = thrust::get<0>(l);
auto r_column_id = thrust::get<0>(r);
if (l_column_id == r_column_id) {
return thrust::get<1>(l).value < thrust::get<1>(r).value;
}
return l_column_id < r_column_id;
});
// Compute output ptr
auto transform_it =
thrust::make_zip_iterator(thrust::make_tuple(x_ptr.data(), y_ptr.data()));
thrust::transform(
thrust::hip::par(alloc), transform_it, transform_it + x_ptr.size(),
out_ptr.data(),
[] __device__(auto const& t) { return thrust::get<0>(t) + thrust::get<1>(t); });
// 0^th is the indicator, 1^th is placeholder
auto get_ind = []XGBOOST_DEVICE(Tuple const& t) { return thrust::get<0>(t); };
// 0^th is the counter for x, 1^th for y.
auto get_x = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<0>(t); };
auto get_y = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<1>(t); };
auto scan_key_it = dh::MakeTransformIterator<size_t>(
thrust::make_counting_iterator(0ul),
[=] __device__(size_t idx) { return dh::SegmentId(out_ptr, idx); });
auto scan_val_it = dh::MakeTransformIterator<Tuple>(
merge_path.data(), [=] __device__(Tuple const &t) -> Tuple {
auto ind = get_ind(t); // == 0 if element is from x
// x_counter, y_counter
return thrust::make_tuple<uint64_t, uint64_t>(!ind, ind);
});
// Compute the index for both x and y (which of the element in a and b are used in each
// comparison) by scanning the binary merge path. Take output [(x_0, y_0), (x_0, y_1),
// ...] as an example, the comparison between (x_0, y_0) adds 1 step in the merge path.
// Assuming y_0 is less than x_0 so this step is toward the end of y. After the
// comparison, index of y is incremented by 1 from y_0 to y_1, and at the same time, y_0
// is landed into output as the first element in merge result. The scan result is the
// subscript of x and y.
thrust::exclusive_scan_by_key(
thrust::hip::par(alloc), scan_key_it, scan_key_it + merge_path.size(),
scan_val_it, merge_path.data(),
thrust::make_tuple<uint64_t, uint64_t>(0ul, 0ul),
thrust::equal_to<size_t>{},
[=] __device__(Tuple const &l, Tuple const &r) -> Tuple {
return thrust::make_tuple(get_x(l) + get_x(r), get_y(l) + get_y(r));
});
return merge_path;
}
// Merge d_x and d_y into out. Because the final output depends on predicate (which
// summary does the output element come from) result by definition of merged rank. So we
// run it in 2 passes to obtain the merge path and then customize the standard merge
// algorithm.
void MergeImpl(int32_t device, Span<SketchEntry const> const &d_x,
Span<bst_row_t const> const &x_ptr,
Span<SketchEntry const> const &d_y,
Span<bst_row_t const> const &y_ptr,
Span<SketchEntry> out,
Span<bst_row_t> out_ptr) {
dh::safe_cuda(hipSetDevice(device));
CHECK_EQ(d_x.size() + d_y.size(), out.size());
CHECK_EQ(x_ptr.size(), out_ptr.size());
CHECK_EQ(y_ptr.size(), out_ptr.size());
auto d_merge_path = MergePath(d_x, x_ptr, d_y, y_ptr, out, out_ptr);
auto d_out = out;
dh::LaunchN(device, d_out.size(), [=] __device__(size_t idx) {
auto column_id = dh::SegmentId(out_ptr, idx);
idx -= out_ptr[column_id];
auto d_x_column =
d_x.subspan(x_ptr[column_id], x_ptr[column_id + 1] - x_ptr[column_id]);
auto d_y_column =
d_y.subspan(y_ptr[column_id], y_ptr[column_id + 1] - y_ptr[column_id]);
auto d_out_column = d_out.subspan(
out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]);
auto d_path_column = d_merge_path.subspan(
out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]);
uint64_t a_ind, b_ind;
thrust::tie(a_ind, b_ind) = d_path_column[idx];
// Handle empty column. If both columns are empty, we should not get this column_id
// as result of binary search.
assert((d_x_column.size() != 0) || (d_y_column.size() != 0));
if (d_x_column.size() == 0) {
d_out_column[idx] = d_y_column[b_ind];
return;
}
if (d_y_column.size() == 0) {
d_out_column[idx] = d_x_column[a_ind];
return;
}
// Handle trailing elements.
assert(a_ind <= d_x_column.size());
if (a_ind == d_x_column.size()) {
// Trailing elements are from y because there's no more x to land.
auto y_elem = d_y_column[b_ind];
d_out_column[idx] = SketchEntry(y_elem.rmin + d_x_column.back().RMinNext(),
y_elem.rmax + d_x_column.back().rmax,
y_elem.wmin, y_elem.value);
return;
}
auto x_elem = d_x_column[a_ind];
assert(b_ind <= d_y_column.size());
if (b_ind == d_y_column.size()) {
d_out_column[idx] = SketchEntry(x_elem.rmin + d_y_column.back().RMinNext(),
x_elem.rmax + d_y_column.back().rmax,
x_elem.wmin, x_elem.value);
return;
}
auto y_elem = d_y_column[b_ind];
/* Merge procedure. See A.3 merge operation eq (26) ~ (28). The trick to interpret
it is rewriting the symbols on both side of equality. Take eq (26) as an example:
Expand it according to definition of extended rank then rewrite it into:
If $k_i$ is the $i$ element in output and \textbf{comes from $D_1$}:
r_\bar{D}(k_i) = r_{\bar{D_1}}(k_i) + w_{\bar{{D_1}}}(k_i) +
[r_{\bar{D_2}}(x_i) + w_{\bar{D_2}}(x_i)]
Where $x_i$ is the largest element in $D_2$ that's less than $k_i$. $k_i$ can be
used in $D_1$ as it's since $k_i \in D_1$. Other 2 equations can be applied
similarly with $k_i$ comes from different $D$. just use different symbol on
different source of summary.
*/
assert(idx < d_out_column.size());
if (x_elem.value == y_elem.value) {
d_out_column[idx] =
SketchEntry{x_elem.rmin + y_elem.rmin, x_elem.rmax + y_elem.rmax,
x_elem.wmin + y_elem.wmin, x_elem.value};
} else if (x_elem.value < y_elem.value) {
// elem from x is landed. yprev_min is the element in D_2 that's 1 rank less than
// x_elem if we put x_elem in D_2.
float yprev_min = b_ind == 0 ? 0.0f : d_y_column[b_ind - 1].RMinNext();
// rmin should be equal to x_elem.rmin + x_elem.wmin + yprev_min. But for
// implementation, the weight is stored in a separated field and we compute the
// extended definition on the fly when needed.
d_out_column[idx] =
SketchEntry{x_elem.rmin + yprev_min, x_elem.rmax + y_elem.RMaxPrev(),
x_elem.wmin, x_elem.value};
} else {
// elem from y is landed.
float xprev_min = a_ind == 0 ? 0.0f : d_x_column[a_ind - 1].RMinNext();
d_out_column[idx] =
SketchEntry{xprev_min + y_elem.rmin, x_elem.RMaxPrev() + y_elem.rmax,
y_elem.wmin, y_elem.value};
}
});
}
void SketchContainer::Push(Span<Entry const> entries, Span<size_t> columns_ptr,
common::Span<OffsetT> cuts_ptr,
size_t total_cuts, Span<float> weights) {
Span<SketchEntry> out;
dh::device_vector<SketchEntry> cuts;
bool first_window = this->Current().empty();
if (!first_window) {
cuts.resize(total_cuts);
out = dh::ToSpan(cuts);
} else {
this->Current().resize(total_cuts);
out = dh::ToSpan(this->Current());
}
auto ft = this->feature_types_.ConstDeviceSpan();
if (weights.empty()) {
auto to_sketch_entry = [] __device__(size_t sample_idx,
Span<Entry const> const &column,
size_t) {
float rmin = sample_idx;
float rmax = sample_idx + 1;
return SketchEntry{rmin, rmax, 1, column[sample_idx].fvalue};
}; // NOLINT
PruneImpl<Entry>(device_, cuts_ptr, entries, columns_ptr, ft, out,
to_sketch_entry);
} else {
auto to_sketch_entry = [weights, columns_ptr] __device__(
size_t sample_idx,
Span<Entry const> const &column,
size_t column_id) {
Span<float const> column_weights_scan =
weights.subspan(columns_ptr[column_id], column.size());
float rmin = sample_idx > 0 ? column_weights_scan[sample_idx - 1] : 0.0f;
float rmax = column_weights_scan[sample_idx];
float wmin = rmax - rmin;
wmin = wmin < 0 ? kRtEps : wmin; // GPU scan can generate floating error.
return SketchEntry{rmin, rmax, wmin, column[sample_idx].fvalue};
}; // NOLINT
PruneImpl<Entry>(device_, cuts_ptr, entries, columns_ptr, ft, out,
to_sketch_entry);
}
auto n_uniques = this->ScanInput(out, cuts_ptr);
if (!first_window) {
CHECK_EQ(this->columns_ptr_.Size(), cuts_ptr.size());
out = out.subspan(0, n_uniques);
this->Merge(cuts_ptr, out);
this->FixError();
} else {
this->Current().resize(n_uniques);
this->columns_ptr_.SetDevice(device_);
this->columns_ptr_.Resize(cuts_ptr.size());
auto d_cuts_ptr = this->columns_ptr_.DeviceSpan();
CopyTo(d_cuts_ptr, cuts_ptr);
}
}
size_t SketchContainer::ScanInput(Span<SketchEntry> entries, Span<OffsetT> d_columns_ptr_in) {
/* There are 2 types of duplication. First is duplicated feature values, which comes
* from user input data. Second is duplicated sketching entries, which is generated by
* pruning or merging. We preserve the first type and remove the second type.
*/
timer_.Start(__func__);
dh::safe_cuda(hipSetDevice(device_));
CHECK_EQ(d_columns_ptr_in.size(), num_columns_ + 1);
dh::XGBCachingDeviceAllocator<char> alloc;
auto key_it = dh::MakeTransformIterator<size_t>(
thrust::make_reverse_iterator(thrust::make_counting_iterator(entries.size())),
[=] __device__(size_t idx) {
return dh::SegmentId(d_columns_ptr_in, idx);
});
// Reverse scan to accumulate weights into first duplicated element on left.
auto val_it = thrust::make_reverse_iterator(dh::tend(entries));
thrust::inclusive_scan_by_key(
thrust::hip::par(alloc), key_it, key_it + entries.size(),
val_it, val_it,
thrust::equal_to<size_t>{},
[] __device__(SketchEntry const &r, SketchEntry const &l) {
// Only accumulate for the first type of duplication.
if (l.value - r.value == 0 && l.rmin - r.rmin != 0) {
auto w = l.wmin + r.wmin;
SketchEntry v{l.rmin, l.rmin + w, w, l.value};
return v;
}
return l;
});
auto d_columns_ptr_out = columns_ptr_b_.DeviceSpan();
// thrust unique_by_key preserves the first element.
auto n_uniques = dh::SegmentedUnique(
d_columns_ptr_in.data(),
d_columns_ptr_in.data() + d_columns_ptr_in.size(), entries.data(),
entries.data() + entries.size(), d_columns_ptr_out.data(), entries.data(),
detail::SketchUnique{});
CopyTo(d_columns_ptr_in, d_columns_ptr_out);
timer_.Stop(__func__);
return n_uniques;
}
size_t SketchContainer::Unique() {
timer_.Start(__func__);
dh::safe_cuda(hipSetDevice(device_));
this->columns_ptr_.SetDevice(device_);
Span<OffsetT> d_column_scan = this->columns_ptr_.DeviceSpan();
CHECK_EQ(d_column_scan.size(), num_columns_ + 1);
Span<SketchEntry> entries = dh::ToSpan(this->Current());
HostDeviceVector<OffsetT> scan_out(d_column_scan.size());
scan_out.SetDevice(device_);
auto d_scan_out = scan_out.DeviceSpan();
d_column_scan = this->columns_ptr_.DeviceSpan();
size_t n_uniques = dh::SegmentedUnique(
d_column_scan.data(), d_column_scan.data() + d_column_scan.size(),
entries.data(), entries.data() + entries.size(), scan_out.DevicePointer(),
entries.data(),
detail::SketchUnique{});
this->columns_ptr_.Copy(scan_out);
CHECK(!this->columns_ptr_.HostCanRead());
this->Current().resize(n_uniques);
timer_.Stop(__func__);
return n_uniques;
}
void SketchContainer::Prune(size_t to) {
timer_.Start(__func__);
dh::safe_cuda(hipSetDevice(device_));
OffsetT to_total = 0;
auto& h_columns_ptr = columns_ptr_b_.HostVector();
h_columns_ptr[0] = to_total;
auto const& h_feature_types = feature_types_.ConstHostSpan();
for (bst_feature_t i = 0; i < num_columns_; ++i) {
size_t length = this->Column(i).size();
length = ::min(length, to);
if (IsCat(h_feature_types, i)) {
length = this->Column(i).size();
}
to_total += length;
h_columns_ptr[i+1] = to_total;
}
this->Other().resize(to_total);
auto d_columns_ptr_in = this->columns_ptr_.ConstDeviceSpan();
auto d_columns_ptr_out = columns_ptr_b_.ConstDeviceSpan();
auto out = dh::ToSpan(this->Other());
auto in = dh::ToSpan(this->Current());
auto no_op = [] __device__(size_t sample_idx,
Span<SketchEntry const> const &entries,
size_t) { return entries[sample_idx]; }; // NOLINT
auto ft = this->feature_types_.ConstDeviceSpan();
PruneImpl<SketchEntry>(device_, d_columns_ptr_out, in, d_columns_ptr_in, ft,
out, no_op);
this->columns_ptr_.Copy(columns_ptr_b_);
this->Alternate();
this->Unique();
timer_.Stop(__func__);
}
void SketchContainer::Merge(Span<OffsetT const> d_that_columns_ptr,
Span<SketchEntry const> that) {
dh::safe_cuda(hipSetDevice(device_));
timer_.Start(__func__);
if (this->Current().size() == 0) {
CHECK_EQ(this->columns_ptr_.HostVector().back(), 0);
CHECK_EQ(this->columns_ptr_.HostVector().size(), d_that_columns_ptr.size());
CHECK_EQ(columns_ptr_.Size(), num_columns_ + 1);
thrust::copy(thrust::device, d_that_columns_ptr.data(),
d_that_columns_ptr.data() + d_that_columns_ptr.size(),
this->columns_ptr_.DevicePointer());
auto total = this->columns_ptr_.HostVector().back();
this->Current().resize(total);
CopyTo(dh::ToSpan(this->Current()), that);
timer_.Stop(__func__);
return;
}
this->Other().resize(this->Current().size() + that.size());
CHECK_EQ(d_that_columns_ptr.size(), this->columns_ptr_.Size());
MergeImpl(device_, this->Data(), this->ColumnsPtr(),
that, d_that_columns_ptr,
dh::ToSpan(this->Other()), columns_ptr_b_.DeviceSpan());
this->columns_ptr_.Copy(columns_ptr_b_);
CHECK_EQ(this->columns_ptr_.Size(), num_columns_ + 1);
this->Alternate();
timer_.Stop(__func__);
}
void SketchContainer::FixError() {
dh::safe_cuda(hipSetDevice(device_));
auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan();
auto in = dh::ToSpan(this->Current());
dh::LaunchN(device_, in.size(), [=] __device__(size_t idx) {
auto column_id = dh::SegmentId(d_columns_ptr, idx);
auto in_column = in.subspan(d_columns_ptr[column_id],
d_columns_ptr[column_id + 1] -
d_columns_ptr[column_id]);
idx -= d_columns_ptr[column_id];
float prev_rmin = idx == 0 ? 0.0f : in_column[idx-1].rmin;
if (in_column[idx].rmin < prev_rmin) {
in_column[idx].rmin = prev_rmin;
}
float prev_rmax = idx == 0 ? 0.0f : in_column[idx-1].rmax;
if (in_column[idx].rmax < prev_rmax) {
in_column[idx].rmax = prev_rmax;
}
float rmin_next = in_column[idx].RMinNext();
if (in_column[idx].rmax < rmin_next) {
in_column[idx].rmax = rmin_next;
}
});
}
void SketchContainer::AllReduce() {
dh::safe_cuda(hipSetDevice(device_));
auto world = rabit::GetWorldSize();
if (world == 1) {
return;
}
timer_.Start(__func__);
if (!reducer_) {
reducer_ = std::make_unique<dh::AllReducer>();
reducer_->Init(device_);
}
// Reduce the overhead on syncing.
size_t global_sum_rows = num_rows_;
rabit::Allreduce<rabit::op::Sum>(&global_sum_rows, 1);
size_t intermediate_num_cuts =
::min(global_sum_rows, static_cast<size_t>(num_bins_ * kFactor));
this->Prune(intermediate_num_cuts);
auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan();
CHECK_EQ(d_columns_ptr.size(), num_columns_ + 1);
size_t n = d_columns_ptr.size();
rabit::Allreduce<rabit::op::Max>(&n, 1);
CHECK_EQ(n, d_columns_ptr.size()) << "Number of columns differs across workers";
// Get the columns ptr from all workers
dh::device_vector<SketchContainer::OffsetT> gathered_ptrs;
gathered_ptrs.resize(d_columns_ptr.size() * world, 0);
size_t rank = rabit::GetRank();
auto offset = rank * d_columns_ptr.size();
thrust::copy(thrust::device, d_columns_ptr.data(), d_columns_ptr.data() + d_columns_ptr.size(),
gathered_ptrs.begin() + offset);
reducer_->AllReduceSum(gathered_ptrs.data().get(), gathered_ptrs.data().get(),
gathered_ptrs.size());
// Get the data from all workers.
std::vector<size_t> recv_lengths;
dh::caching_device_vector<char> recvbuf;
reducer_->AllGather(this->Current().data().get(),
dh::ToSpan(this->Current()).size_bytes(), &recv_lengths,
&recvbuf);
reducer_->Synchronize();
// Segment the received data.
auto s_recvbuf = dh::ToSpan(recvbuf);
std::vector<Span<SketchEntry>> allworkers;
offset = 0;
for (int32_t i = 0; i < world; ++i) {
size_t length_as_bytes = recv_lengths.at(i);
auto raw = s_recvbuf.subspan(offset, length_as_bytes);
auto sketch = Span<SketchEntry>(reinterpret_cast<SketchEntry *>(raw.data()),
length_as_bytes / sizeof(SketchEntry));
allworkers.emplace_back(sketch);
offset += length_as_bytes;
}
// Merge them into a new sketch.
SketchContainer new_sketch(this->feature_types_, num_bins_,
this->num_columns_, global_sum_rows,
this->device_);
for (size_t i = 0; i < allworkers.size(); ++i) {
auto worker = allworkers[i];
auto worker_ptr =
dh::ToSpan(gathered_ptrs)
.subspan(i * d_columns_ptr.size(), d_columns_ptr.size());
new_sketch.Merge(worker_ptr, worker);
new_sketch.FixError();
}
*this = std::move(new_sketch);
timer_.Stop(__func__);
}
void SketchContainer::MakeCuts(HistogramCuts* p_cuts) {
timer_.Start(__func__);
dh::safe_cuda(hipSetDevice(device_));
p_cuts->min_vals_.Resize(num_columns_);
// Sync between workers.
this->AllReduce();
// Prune to final number of bins.
this->Prune(num_bins_ + 1);
this->FixError();
// Set up inputs
auto d_in_columns_ptr = this->columns_ptr_.ConstDeviceSpan();
p_cuts->min_vals_.SetDevice(device_);
auto d_min_values = p_cuts->min_vals_.DeviceSpan();
auto in_cut_values = dh::ToSpan(this->Current());
// Set up output ptr
p_cuts->cut_ptrs_.SetDevice(device_);
auto& h_out_columns_ptr = p_cuts->cut_ptrs_.HostVector();
h_out_columns_ptr.clear();
h_out_columns_ptr.push_back(0);
auto const& h_feature_types = this->feature_types_.ConstHostSpan();
for (bst_feature_t i = 0; i < num_columns_; ++i) {
size_t column_size = ::max(static_cast<size_t>(1ul),
this->Column(i).size());
if (IsCat(h_feature_types, i)) {
h_out_columns_ptr.push_back(static_cast<size_t>(column_size));
} else {
h_out_columns_ptr.push_back(::min(static_cast<size_t>(column_size),
static_cast<size_t>(num_bins_)));
}
}
std::partial_sum(h_out_columns_ptr.begin(), h_out_columns_ptr.end(),
h_out_columns_ptr.begin());
auto d_out_columns_ptr = p_cuts->cut_ptrs_.ConstDeviceSpan();
// Set up output cuts
size_t total_bins = h_out_columns_ptr.back();
p_cuts->cut_values_.SetDevice(device_);
p_cuts->cut_values_.Resize(total_bins);
auto out_cut_values = p_cuts->cut_values_.DeviceSpan();
auto d_ft = feature_types_.ConstDeviceSpan();
dh::LaunchN(0, total_bins, [=] __device__(size_t idx) {
auto column_id = dh::SegmentId(d_out_columns_ptr, idx);
auto in_column = in_cut_values.subspan(d_in_columns_ptr[column_id],
d_in_columns_ptr[column_id + 1] -
d_in_columns_ptr[column_id]);
auto out_column = out_cut_values.subspan(d_out_columns_ptr[column_id],
d_out_columns_ptr[column_id + 1] -
d_out_columns_ptr[column_id]);
idx -= d_out_columns_ptr[column_id];
if (in_column.size() == 0) {
// If the column is empty, we push a dummy value. It won't affect training as the
// column is empty, trees cannot split on it. This is just to be consistent with
// rest of the library.
if (idx == 0) {
d_min_values[column_id] = kRtEps;
out_column[0] = kRtEps;
assert(out_column.size() == 1);
}
return;
}
if (idx == 0 && !IsCat(d_ft, column_id)) {
auto mval = in_column[idx].value;
d_min_values[column_id] = mval - (fabs(mval) + 1e-5);
}
if (IsCat(d_ft, column_id)) {
assert(out_column.size() == in_column.size());
out_column[idx] = in_column[idx].value;
return;
}
// Last thread is responsible for setting a value that's greater than other cuts.
if (idx == out_column.size() - 1) {
const bst_float cpt = in_column.back().value;
// this must be bigger than last value in a scale
const bst_float last = cpt + (fabs(cpt) + 1e-5);
out_column[idx] = last;
return;
}
assert(idx+1 < in_column.size());
out_column[idx] = in_column[idx+1].value;
});
timer_.Stop(__func__);
}
} // namespace common
} // namespace xgboost
| 9e3457b4e8c541f7df218c362f8b7841a087b212.cu | /*!
* Copyright 2020 by XGBoost Contributors
*/
#include <thrust/unique.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/binary_search.h>
#include <thrust/transform_scan.h>
#include <thrust/execution_policy.h>
#include <memory>
#include <utility>
#include "xgboost/span.h"
#include "quantile.h"
#include "quantile.cuh"
#include "hist_util.h"
#include "device_helpers.cuh"
#include "categorical.h"
#include "common.h"
namespace xgboost {
namespace common {
using WQSketch = HostSketchContainer::WQSketch;
using SketchEntry = WQSketch::Entry;
// Algorithm 4 in XGBoost's paper, using binary search to find i.
template <typename EntryIter>
__device__ SketchEntry BinarySearchQuery(EntryIter beg, EntryIter end, float rank) {
assert(end - beg >= 2);
rank *= 2;
auto front = *beg;
if (rank < front.rmin + front.rmax) {
return *beg;
}
auto back = *(end - 1);
if (rank >= back.rmin + back.rmax) {
return back;
}
auto search_begin = dh::MakeTransformIterator<float>(
beg, [=] __device__(SketchEntry const &entry) {
return entry.rmin + entry.rmax;
});
auto search_end = search_begin + (end - beg);
auto i =
thrust::upper_bound(thrust::seq, search_begin + 1, search_end - 1, rank) -
search_begin - 1;
if (rank < (*(beg + i)).RMinNext() + (*(beg + i + 1)).RMaxPrev()) {
return *(beg + i);
} else {
return *(beg + i + 1);
}
}
template <typename InEntry, typename ToSketchEntry>
void PruneImpl(int device,
common::Span<SketchContainer::OffsetT const> cuts_ptr,
Span<InEntry const> sorted_data,
Span<size_t const> columns_ptr_in, // could be ptr for data or cuts
Span<FeatureType const> feature_types,
Span<SketchEntry> out_cuts,
ToSketchEntry to_sketch_entry) {
dh::LaunchN(device, out_cuts.size(), [=] __device__(size_t idx) {
size_t column_id = dh::SegmentId(cuts_ptr, idx);
auto out_column = out_cuts.subspan(
cuts_ptr[column_id], cuts_ptr[column_id + 1] - cuts_ptr[column_id]);
auto in_column = sorted_data.subspan(columns_ptr_in[column_id],
columns_ptr_in[column_id + 1] -
columns_ptr_in[column_id]);
auto to = cuts_ptr[column_id + 1] - cuts_ptr[column_id];
idx -= cuts_ptr[column_id];
auto front = to_sketch_entry(0ul, in_column, column_id);
auto back = to_sketch_entry(in_column.size() - 1, in_column, column_id);
auto is_cat = IsCat(feature_types, column_id);
if (in_column.size() <= to || is_cat) {
// cut idx equals sample idx
out_column[idx] = to_sketch_entry(idx, in_column, column_id);
return;
}
// 1 thread for each output. See A.4 for detail.
auto d_out = out_column;
if (idx == 0) {
d_out.front() = front;
return;
}
if (idx == to - 1) {
d_out.back() = back;
return;
}
float w = back.rmin - front.rmax;
assert(w != 0);
auto budget = static_cast<float>(d_out.size());
assert(budget != 0);
auto q = ((static_cast<float>(idx) * w) / (static_cast<float>(to) - 1.0f) + front.rmax);
auto it = dh::MakeTransformIterator<SketchEntry>(
thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) {
auto e = to_sketch_entry(idx, in_column, column_id);
return e;
});
d_out[idx] = BinarySearchQuery(it, it + in_column.size(), q);
});
}
template <typename T, typename U>
void CopyTo(Span<T> out, Span<U> src) {
CHECK_EQ(out.size(), src.size());
static_assert(std::is_same<std::remove_cv_t<T>, std::remove_cv_t<T>>::value, "");
dh::safe_cuda(cudaMemcpyAsync(out.data(), src.data(),
out.size_bytes(),
cudaMemcpyDefault));
}
// Compute the merge path.
common::Span<thrust::tuple<uint64_t, uint64_t>> MergePath(
Span<SketchEntry const> const &d_x, Span<bst_row_t const> const &x_ptr,
Span<SketchEntry const> const &d_y, Span<bst_row_t const> const &y_ptr,
Span<SketchEntry> out, Span<bst_row_t> out_ptr) {
auto x_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple(
dh::MakeTransformIterator<bst_row_t>(
thrust::make_counting_iterator(0ul),
[=] __device__(size_t idx) { return dh::SegmentId(x_ptr, idx); }),
d_x.data()));
auto y_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple(
dh::MakeTransformIterator<bst_row_t>(
thrust::make_counting_iterator(0ul),
[=] __device__(size_t idx) { return dh::SegmentId(y_ptr, idx); }),
d_y.data()));
using Tuple = thrust::tuple<uint64_t, uint64_t>;
thrust::constant_iterator<uint64_t> a_ind_iter(0ul);
thrust::constant_iterator<uint64_t> b_ind_iter(1ul);
auto place_holder = thrust::make_constant_iterator<uint64_t>(0u);
auto x_merge_val_it =
thrust::make_zip_iterator(thrust::make_tuple(a_ind_iter, place_holder));
auto y_merge_val_it =
thrust::make_zip_iterator(thrust::make_tuple(b_ind_iter, place_holder));
dh::XGBCachingDeviceAllocator<Tuple> alloc;
static_assert(sizeof(Tuple) == sizeof(SketchEntry), "");
// We reuse the memory for storing merge path.
common::Span<Tuple> merge_path{reinterpret_cast<Tuple *>(out.data()), out.size()};
// Determine the merge path, 0 if element is from x, 1 if it's from y.
thrust::merge_by_key(
thrust::cuda::par(alloc), x_merge_key_it, x_merge_key_it + d_x.size(),
y_merge_key_it, y_merge_key_it + d_y.size(), x_merge_val_it,
y_merge_val_it, thrust::make_discard_iterator(), merge_path.data(),
[=] __device__(auto const &l, auto const &r) -> bool {
auto l_column_id = thrust::get<0>(l);
auto r_column_id = thrust::get<0>(r);
if (l_column_id == r_column_id) {
return thrust::get<1>(l).value < thrust::get<1>(r).value;
}
return l_column_id < r_column_id;
});
// Compute output ptr
auto transform_it =
thrust::make_zip_iterator(thrust::make_tuple(x_ptr.data(), y_ptr.data()));
thrust::transform(
thrust::cuda::par(alloc), transform_it, transform_it + x_ptr.size(),
out_ptr.data(),
[] __device__(auto const& t) { return thrust::get<0>(t) + thrust::get<1>(t); });
// 0^th is the indicator, 1^th is placeholder
auto get_ind = []XGBOOST_DEVICE(Tuple const& t) { return thrust::get<0>(t); };
// 0^th is the counter for x, 1^th for y.
auto get_x = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<0>(t); };
auto get_y = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<1>(t); };
auto scan_key_it = dh::MakeTransformIterator<size_t>(
thrust::make_counting_iterator(0ul),
[=] __device__(size_t idx) { return dh::SegmentId(out_ptr, idx); });
auto scan_val_it = dh::MakeTransformIterator<Tuple>(
merge_path.data(), [=] __device__(Tuple const &t) -> Tuple {
auto ind = get_ind(t); // == 0 if element is from x
// x_counter, y_counter
return thrust::make_tuple<uint64_t, uint64_t>(!ind, ind);
});
// Compute the index for both x and y (which of the element in a and b are used in each
// comparison) by scanning the binary merge path. Take output [(x_0, y_0), (x_0, y_1),
// ...] as an example, the comparison between (x_0, y_0) adds 1 step in the merge path.
// Assuming y_0 is less than x_0 so this step is toward the end of y. After the
// comparison, index of y is incremented by 1 from y_0 to y_1, and at the same time, y_0
// is landed into output as the first element in merge result. The scan result is the
// subscript of x and y.
thrust::exclusive_scan_by_key(
thrust::cuda::par(alloc), scan_key_it, scan_key_it + merge_path.size(),
scan_val_it, merge_path.data(),
thrust::make_tuple<uint64_t, uint64_t>(0ul, 0ul),
thrust::equal_to<size_t>{},
[=] __device__(Tuple const &l, Tuple const &r) -> Tuple {
return thrust::make_tuple(get_x(l) + get_x(r), get_y(l) + get_y(r));
});
return merge_path;
}
// Merge d_x and d_y into out. Because the final output depends on predicate (which
// summary does the output element come from) result by definition of merged rank. So we
// run it in 2 passes to obtain the merge path and then customize the standard merge
// algorithm.
void MergeImpl(int32_t device, Span<SketchEntry const> const &d_x,
Span<bst_row_t const> const &x_ptr,
Span<SketchEntry const> const &d_y,
Span<bst_row_t const> const &y_ptr,
Span<SketchEntry> out,
Span<bst_row_t> out_ptr) {
dh::safe_cuda(cudaSetDevice(device));
CHECK_EQ(d_x.size() + d_y.size(), out.size());
CHECK_EQ(x_ptr.size(), out_ptr.size());
CHECK_EQ(y_ptr.size(), out_ptr.size());
auto d_merge_path = MergePath(d_x, x_ptr, d_y, y_ptr, out, out_ptr);
auto d_out = out;
dh::LaunchN(device, d_out.size(), [=] __device__(size_t idx) {
auto column_id = dh::SegmentId(out_ptr, idx);
idx -= out_ptr[column_id];
auto d_x_column =
d_x.subspan(x_ptr[column_id], x_ptr[column_id + 1] - x_ptr[column_id]);
auto d_y_column =
d_y.subspan(y_ptr[column_id], y_ptr[column_id + 1] - y_ptr[column_id]);
auto d_out_column = d_out.subspan(
out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]);
auto d_path_column = d_merge_path.subspan(
out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]);
uint64_t a_ind, b_ind;
thrust::tie(a_ind, b_ind) = d_path_column[idx];
// Handle empty column. If both columns are empty, we should not get this column_id
// as result of binary search.
assert((d_x_column.size() != 0) || (d_y_column.size() != 0));
if (d_x_column.size() == 0) {
d_out_column[idx] = d_y_column[b_ind];
return;
}
if (d_y_column.size() == 0) {
d_out_column[idx] = d_x_column[a_ind];
return;
}
// Handle trailing elements.
assert(a_ind <= d_x_column.size());
if (a_ind == d_x_column.size()) {
// Trailing elements are from y because there's no more x to land.
auto y_elem = d_y_column[b_ind];
d_out_column[idx] = SketchEntry(y_elem.rmin + d_x_column.back().RMinNext(),
y_elem.rmax + d_x_column.back().rmax,
y_elem.wmin, y_elem.value);
return;
}
auto x_elem = d_x_column[a_ind];
assert(b_ind <= d_y_column.size());
if (b_ind == d_y_column.size()) {
d_out_column[idx] = SketchEntry(x_elem.rmin + d_y_column.back().RMinNext(),
x_elem.rmax + d_y_column.back().rmax,
x_elem.wmin, x_elem.value);
return;
}
auto y_elem = d_y_column[b_ind];
/* Merge procedure. See A.3 merge operation eq (26) ~ (28). The trick to interpret
it is rewriting the symbols on both side of equality. Take eq (26) as an example:
Expand it according to definition of extended rank then rewrite it into:
If $k_i$ is the $i$ element in output and \textbf{comes from $D_1$}:
r_\bar{D}(k_i) = r_{\bar{D_1}}(k_i) + w_{\bar{{D_1}}}(k_i) +
[r_{\bar{D_2}}(x_i) + w_{\bar{D_2}}(x_i)]
Where $x_i$ is the largest element in $D_2$ that's less than $k_i$. $k_i$ can be
used in $D_1$ as it's since $k_i \in D_1$. Other 2 equations can be applied
similarly with $k_i$ comes from different $D$. just use different symbol on
different source of summary.
*/
assert(idx < d_out_column.size());
if (x_elem.value == y_elem.value) {
d_out_column[idx] =
SketchEntry{x_elem.rmin + y_elem.rmin, x_elem.rmax + y_elem.rmax,
x_elem.wmin + y_elem.wmin, x_elem.value};
} else if (x_elem.value < y_elem.value) {
// elem from x is landed. yprev_min is the element in D_2 that's 1 rank less than
// x_elem if we put x_elem in D_2.
float yprev_min = b_ind == 0 ? 0.0f : d_y_column[b_ind - 1].RMinNext();
// rmin should be equal to x_elem.rmin + x_elem.wmin + yprev_min. But for
// implementation, the weight is stored in a separated field and we compute the
// extended definition on the fly when needed.
d_out_column[idx] =
SketchEntry{x_elem.rmin + yprev_min, x_elem.rmax + y_elem.RMaxPrev(),
x_elem.wmin, x_elem.value};
} else {
// elem from y is landed.
float xprev_min = a_ind == 0 ? 0.0f : d_x_column[a_ind - 1].RMinNext();
d_out_column[idx] =
SketchEntry{xprev_min + y_elem.rmin, x_elem.RMaxPrev() + y_elem.rmax,
y_elem.wmin, y_elem.value};
}
});
}
void SketchContainer::Push(Span<Entry const> entries, Span<size_t> columns_ptr,
common::Span<OffsetT> cuts_ptr,
size_t total_cuts, Span<float> weights) {
Span<SketchEntry> out;
dh::device_vector<SketchEntry> cuts;
bool first_window = this->Current().empty();
if (!first_window) {
cuts.resize(total_cuts);
out = dh::ToSpan(cuts);
} else {
this->Current().resize(total_cuts);
out = dh::ToSpan(this->Current());
}
auto ft = this->feature_types_.ConstDeviceSpan();
if (weights.empty()) {
auto to_sketch_entry = [] __device__(size_t sample_idx,
Span<Entry const> const &column,
size_t) {
float rmin = sample_idx;
float rmax = sample_idx + 1;
return SketchEntry{rmin, rmax, 1, column[sample_idx].fvalue};
}; // NOLINT
PruneImpl<Entry>(device_, cuts_ptr, entries, columns_ptr, ft, out,
to_sketch_entry);
} else {
auto to_sketch_entry = [weights, columns_ptr] __device__(
size_t sample_idx,
Span<Entry const> const &column,
size_t column_id) {
Span<float const> column_weights_scan =
weights.subspan(columns_ptr[column_id], column.size());
float rmin = sample_idx > 0 ? column_weights_scan[sample_idx - 1] : 0.0f;
float rmax = column_weights_scan[sample_idx];
float wmin = rmax - rmin;
wmin = wmin < 0 ? kRtEps : wmin; // GPU scan can generate floating error.
return SketchEntry{rmin, rmax, wmin, column[sample_idx].fvalue};
}; // NOLINT
PruneImpl<Entry>(device_, cuts_ptr, entries, columns_ptr, ft, out,
to_sketch_entry);
}
auto n_uniques = this->ScanInput(out, cuts_ptr);
if (!first_window) {
CHECK_EQ(this->columns_ptr_.Size(), cuts_ptr.size());
out = out.subspan(0, n_uniques);
this->Merge(cuts_ptr, out);
this->FixError();
} else {
this->Current().resize(n_uniques);
this->columns_ptr_.SetDevice(device_);
this->columns_ptr_.Resize(cuts_ptr.size());
auto d_cuts_ptr = this->columns_ptr_.DeviceSpan();
CopyTo(d_cuts_ptr, cuts_ptr);
}
}
size_t SketchContainer::ScanInput(Span<SketchEntry> entries, Span<OffsetT> d_columns_ptr_in) {
/* There are 2 types of duplication. First is duplicated feature values, which comes
* from user input data. Second is duplicated sketching entries, which is generated by
* pruning or merging. We preserve the first type and remove the second type.
*/
timer_.Start(__func__);
dh::safe_cuda(cudaSetDevice(device_));
CHECK_EQ(d_columns_ptr_in.size(), num_columns_ + 1);
dh::XGBCachingDeviceAllocator<char> alloc;
auto key_it = dh::MakeTransformIterator<size_t>(
thrust::make_reverse_iterator(thrust::make_counting_iterator(entries.size())),
[=] __device__(size_t idx) {
return dh::SegmentId(d_columns_ptr_in, idx);
});
// Reverse scan to accumulate weights into first duplicated element on left.
auto val_it = thrust::make_reverse_iterator(dh::tend(entries));
thrust::inclusive_scan_by_key(
thrust::cuda::par(alloc), key_it, key_it + entries.size(),
val_it, val_it,
thrust::equal_to<size_t>{},
[] __device__(SketchEntry const &r, SketchEntry const &l) {
// Only accumulate for the first type of duplication.
if (l.value - r.value == 0 && l.rmin - r.rmin != 0) {
auto w = l.wmin + r.wmin;
SketchEntry v{l.rmin, l.rmin + w, w, l.value};
return v;
}
return l;
});
auto d_columns_ptr_out = columns_ptr_b_.DeviceSpan();
// thrust unique_by_key preserves the first element.
auto n_uniques = dh::SegmentedUnique(
d_columns_ptr_in.data(),
d_columns_ptr_in.data() + d_columns_ptr_in.size(), entries.data(),
entries.data() + entries.size(), d_columns_ptr_out.data(), entries.data(),
detail::SketchUnique{});
CopyTo(d_columns_ptr_in, d_columns_ptr_out);
timer_.Stop(__func__);
return n_uniques;
}
size_t SketchContainer::Unique() {
timer_.Start(__func__);
dh::safe_cuda(cudaSetDevice(device_));
this->columns_ptr_.SetDevice(device_);
Span<OffsetT> d_column_scan = this->columns_ptr_.DeviceSpan();
CHECK_EQ(d_column_scan.size(), num_columns_ + 1);
Span<SketchEntry> entries = dh::ToSpan(this->Current());
HostDeviceVector<OffsetT> scan_out(d_column_scan.size());
scan_out.SetDevice(device_);
auto d_scan_out = scan_out.DeviceSpan();
d_column_scan = this->columns_ptr_.DeviceSpan();
size_t n_uniques = dh::SegmentedUnique(
d_column_scan.data(), d_column_scan.data() + d_column_scan.size(),
entries.data(), entries.data() + entries.size(), scan_out.DevicePointer(),
entries.data(),
detail::SketchUnique{});
this->columns_ptr_.Copy(scan_out);
CHECK(!this->columns_ptr_.HostCanRead());
this->Current().resize(n_uniques);
timer_.Stop(__func__);
return n_uniques;
}
void SketchContainer::Prune(size_t to) {
timer_.Start(__func__);
dh::safe_cuda(cudaSetDevice(device_));
OffsetT to_total = 0;
auto& h_columns_ptr = columns_ptr_b_.HostVector();
h_columns_ptr[0] = to_total;
auto const& h_feature_types = feature_types_.ConstHostSpan();
for (bst_feature_t i = 0; i < num_columns_; ++i) {
size_t length = this->Column(i).size();
length = std::min(length, to);
if (IsCat(h_feature_types, i)) {
length = this->Column(i).size();
}
to_total += length;
h_columns_ptr[i+1] = to_total;
}
this->Other().resize(to_total);
auto d_columns_ptr_in = this->columns_ptr_.ConstDeviceSpan();
auto d_columns_ptr_out = columns_ptr_b_.ConstDeviceSpan();
auto out = dh::ToSpan(this->Other());
auto in = dh::ToSpan(this->Current());
auto no_op = [] __device__(size_t sample_idx,
Span<SketchEntry const> const &entries,
size_t) { return entries[sample_idx]; }; // NOLINT
auto ft = this->feature_types_.ConstDeviceSpan();
PruneImpl<SketchEntry>(device_, d_columns_ptr_out, in, d_columns_ptr_in, ft,
out, no_op);
this->columns_ptr_.Copy(columns_ptr_b_);
this->Alternate();
this->Unique();
timer_.Stop(__func__);
}
void SketchContainer::Merge(Span<OffsetT const> d_that_columns_ptr,
Span<SketchEntry const> that) {
dh::safe_cuda(cudaSetDevice(device_));
timer_.Start(__func__);
if (this->Current().size() == 0) {
CHECK_EQ(this->columns_ptr_.HostVector().back(), 0);
CHECK_EQ(this->columns_ptr_.HostVector().size(), d_that_columns_ptr.size());
CHECK_EQ(columns_ptr_.Size(), num_columns_ + 1);
thrust::copy(thrust::device, d_that_columns_ptr.data(),
d_that_columns_ptr.data() + d_that_columns_ptr.size(),
this->columns_ptr_.DevicePointer());
auto total = this->columns_ptr_.HostVector().back();
this->Current().resize(total);
CopyTo(dh::ToSpan(this->Current()), that);
timer_.Stop(__func__);
return;
}
this->Other().resize(this->Current().size() + that.size());
CHECK_EQ(d_that_columns_ptr.size(), this->columns_ptr_.Size());
MergeImpl(device_, this->Data(), this->ColumnsPtr(),
that, d_that_columns_ptr,
dh::ToSpan(this->Other()), columns_ptr_b_.DeviceSpan());
this->columns_ptr_.Copy(columns_ptr_b_);
CHECK_EQ(this->columns_ptr_.Size(), num_columns_ + 1);
this->Alternate();
timer_.Stop(__func__);
}
void SketchContainer::FixError() {
dh::safe_cuda(cudaSetDevice(device_));
auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan();
auto in = dh::ToSpan(this->Current());
dh::LaunchN(device_, in.size(), [=] __device__(size_t idx) {
auto column_id = dh::SegmentId(d_columns_ptr, idx);
auto in_column = in.subspan(d_columns_ptr[column_id],
d_columns_ptr[column_id + 1] -
d_columns_ptr[column_id]);
idx -= d_columns_ptr[column_id];
float prev_rmin = idx == 0 ? 0.0f : in_column[idx-1].rmin;
if (in_column[idx].rmin < prev_rmin) {
in_column[idx].rmin = prev_rmin;
}
float prev_rmax = idx == 0 ? 0.0f : in_column[idx-1].rmax;
if (in_column[idx].rmax < prev_rmax) {
in_column[idx].rmax = prev_rmax;
}
float rmin_next = in_column[idx].RMinNext();
if (in_column[idx].rmax < rmin_next) {
in_column[idx].rmax = rmin_next;
}
});
}
void SketchContainer::AllReduce() {
dh::safe_cuda(cudaSetDevice(device_));
auto world = rabit::GetWorldSize();
if (world == 1) {
return;
}
timer_.Start(__func__);
if (!reducer_) {
reducer_ = std::make_unique<dh::AllReducer>();
reducer_->Init(device_);
}
// Reduce the overhead on syncing.
size_t global_sum_rows = num_rows_;
rabit::Allreduce<rabit::op::Sum>(&global_sum_rows, 1);
size_t intermediate_num_cuts =
std::min(global_sum_rows, static_cast<size_t>(num_bins_ * kFactor));
this->Prune(intermediate_num_cuts);
auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan();
CHECK_EQ(d_columns_ptr.size(), num_columns_ + 1);
size_t n = d_columns_ptr.size();
rabit::Allreduce<rabit::op::Max>(&n, 1);
CHECK_EQ(n, d_columns_ptr.size()) << "Number of columns differs across workers";
// Get the columns ptr from all workers
dh::device_vector<SketchContainer::OffsetT> gathered_ptrs;
gathered_ptrs.resize(d_columns_ptr.size() * world, 0);
size_t rank = rabit::GetRank();
auto offset = rank * d_columns_ptr.size();
thrust::copy(thrust::device, d_columns_ptr.data(), d_columns_ptr.data() + d_columns_ptr.size(),
gathered_ptrs.begin() + offset);
reducer_->AllReduceSum(gathered_ptrs.data().get(), gathered_ptrs.data().get(),
gathered_ptrs.size());
// Get the data from all workers.
std::vector<size_t> recv_lengths;
dh::caching_device_vector<char> recvbuf;
reducer_->AllGather(this->Current().data().get(),
dh::ToSpan(this->Current()).size_bytes(), &recv_lengths,
&recvbuf);
reducer_->Synchronize();
// Segment the received data.
auto s_recvbuf = dh::ToSpan(recvbuf);
std::vector<Span<SketchEntry>> allworkers;
offset = 0;
for (int32_t i = 0; i < world; ++i) {
size_t length_as_bytes = recv_lengths.at(i);
auto raw = s_recvbuf.subspan(offset, length_as_bytes);
auto sketch = Span<SketchEntry>(reinterpret_cast<SketchEntry *>(raw.data()),
length_as_bytes / sizeof(SketchEntry));
allworkers.emplace_back(sketch);
offset += length_as_bytes;
}
// Merge them into a new sketch.
SketchContainer new_sketch(this->feature_types_, num_bins_,
this->num_columns_, global_sum_rows,
this->device_);
for (size_t i = 0; i < allworkers.size(); ++i) {
auto worker = allworkers[i];
auto worker_ptr =
dh::ToSpan(gathered_ptrs)
.subspan(i * d_columns_ptr.size(), d_columns_ptr.size());
new_sketch.Merge(worker_ptr, worker);
new_sketch.FixError();
}
*this = std::move(new_sketch);
timer_.Stop(__func__);
}
void SketchContainer::MakeCuts(HistogramCuts* p_cuts) {
timer_.Start(__func__);
dh::safe_cuda(cudaSetDevice(device_));
p_cuts->min_vals_.Resize(num_columns_);
// Sync between workers.
this->AllReduce();
// Prune to final number of bins.
this->Prune(num_bins_ + 1);
this->FixError();
// Set up inputs
auto d_in_columns_ptr = this->columns_ptr_.ConstDeviceSpan();
p_cuts->min_vals_.SetDevice(device_);
auto d_min_values = p_cuts->min_vals_.DeviceSpan();
auto in_cut_values = dh::ToSpan(this->Current());
// Set up output ptr
p_cuts->cut_ptrs_.SetDevice(device_);
auto& h_out_columns_ptr = p_cuts->cut_ptrs_.HostVector();
h_out_columns_ptr.clear();
h_out_columns_ptr.push_back(0);
auto const& h_feature_types = this->feature_types_.ConstHostSpan();
for (bst_feature_t i = 0; i < num_columns_; ++i) {
size_t column_size = std::max(static_cast<size_t>(1ul),
this->Column(i).size());
if (IsCat(h_feature_types, i)) {
h_out_columns_ptr.push_back(static_cast<size_t>(column_size));
} else {
h_out_columns_ptr.push_back(std::min(static_cast<size_t>(column_size),
static_cast<size_t>(num_bins_)));
}
}
std::partial_sum(h_out_columns_ptr.begin(), h_out_columns_ptr.end(),
h_out_columns_ptr.begin());
auto d_out_columns_ptr = p_cuts->cut_ptrs_.ConstDeviceSpan();
// Set up output cuts
size_t total_bins = h_out_columns_ptr.back();
p_cuts->cut_values_.SetDevice(device_);
p_cuts->cut_values_.Resize(total_bins);
auto out_cut_values = p_cuts->cut_values_.DeviceSpan();
auto d_ft = feature_types_.ConstDeviceSpan();
dh::LaunchN(0, total_bins, [=] __device__(size_t idx) {
auto column_id = dh::SegmentId(d_out_columns_ptr, idx);
auto in_column = in_cut_values.subspan(d_in_columns_ptr[column_id],
d_in_columns_ptr[column_id + 1] -
d_in_columns_ptr[column_id]);
auto out_column = out_cut_values.subspan(d_out_columns_ptr[column_id],
d_out_columns_ptr[column_id + 1] -
d_out_columns_ptr[column_id]);
idx -= d_out_columns_ptr[column_id];
if (in_column.size() == 0) {
// If the column is empty, we push a dummy value. It won't affect training as the
// column is empty, trees cannot split on it. This is just to be consistent with
// rest of the library.
if (idx == 0) {
d_min_values[column_id] = kRtEps;
out_column[0] = kRtEps;
assert(out_column.size() == 1);
}
return;
}
if (idx == 0 && !IsCat(d_ft, column_id)) {
auto mval = in_column[idx].value;
d_min_values[column_id] = mval - (fabs(mval) + 1e-5);
}
if (IsCat(d_ft, column_id)) {
assert(out_column.size() == in_column.size());
out_column[idx] = in_column[idx].value;
return;
}
// Last thread is responsible for setting a value that's greater than other cuts.
if (idx == out_column.size() - 1) {
const bst_float cpt = in_column.back().value;
// this must be bigger than last value in a scale
const bst_float last = cpt + (fabs(cpt) + 1e-5);
out_column[idx] = last;
return;
}
assert(idx+1 < in_column.size());
out_column[idx] = in_column[idx+1].value;
});
timer_.Stop(__func__);
}
} // namespace common
} // namespace xgboost
|
e3bf1df327a05133755d74dbce4fe972ca33e06c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "dali/operators/generic/one_hot.h"
#include "dali/operators/generic/one_hot.cuh"
namespace dali {
class OneHotGPU : public OneHot<GPUBackend> {
public:
explicit OneHotGPU(const OpSpec &spec) : OneHot<GPUBackend>(spec) {
scratch_mem_.set_type(TypeTable::GetTypeInfo(DALI_UINT8));
}
~OneHotGPU() override = default;
USE_OPERATOR_MEMBERS();
protected:
void RunImpl(workspace_t<GPUBackend> &ws) override;
bool SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) override;
template<typename OutputType, typename InputType>
void RunImplTyped(workspace_t<GPUBackend> &ws, int placement_axis);
private:
std::vector<detail::SampleDesc> sample_descs_;
Tensor<GPUBackend> scratch_mem_;
int recent_n_samples_ = 0;
};
bool OneHotGPU::SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) {
const auto &input = ws.template InputRef<GPUBackend>(0);
int num_samples = input.shape().num_samples();
if (num_samples != recent_n_samples_) {
recent_n_samples_ = num_samples;
int64_t samples_size = num_samples * sizeof(detail::SampleDesc);
scratch_mem_.Resize({samples_size});
}
sample_descs_.clear();
sample_descs_.reserve(num_samples);
return OneHot<GPUBackend>::SetupImpl(output_desc, ws);
}
void OneHotGPU::RunImpl(workspace_t<GPUBackend> &ws) {
const auto &input = ws.InputRef<GPUBackend>(0);
auto &output = ws.OutputRef<GPUBackend>(0);
int output_sample_dim = output.shape().sample_dim();
int placement_axis = get_placement_axis(output_sample_dim);
output.SetLayout(GetOutputLayout(ws, placement_axis, output_sample_dim));
TYPE_SWITCH(input.type().id(), type2id, InputType, ONE_HOT_TYPES, (
TYPE_SWITCH(output_type_, type2id, OutputType, ONE_HOT_TYPES, (
RunImplTyped<OutputType, InputType>(ws, placement_axis);
), DALI_FAIL(make_string("Unsupported output type: ", output_type_)); ); // NOLINT
), DALI_FAIL(make_string("Unsupported input type: ", input.type().id())); ); // NOLINT
}
template <typename OutputType, typename InputType>
void OneHotGPU::RunImplTyped(workspace_t<GPUBackend> &ws, int axis) {
const auto &input = ws.InputRef<GPUBackend>(0);
auto &output = ws.OutputRef<GPUBackend>(0);
int num_samples = input.shape().num_samples();
uint64_t max_out_vol = 1;
const auto &shape = output.shape();
for (int sample_id = 0; sample_id < num_samples; ++sample_id) {
detail::SampleDesc sample;
auto output_shape = shape.tensor_shape_span(sample_id);
auto outer_vol = volume(output_shape.begin(), output_shape.begin() + axis);
sample.inner_vol = volume(output_shape.begin() + axis + 1, output_shape.end());
sample.inner_vol_classes = sample.inner_vol * num_classes_;
sample.output_vol = outer_vol * sample.inner_vol_classes;
sample.out = output.mutable_tensor<OutputType>(sample_id);
sample.in = input.tensor<InputType>(sample_id);
sample_descs_.push_back(sample);
max_out_vol = ::max(max_out_vol, sample.output_vol);
}
auto stream = ws.stream();
scratch_mem_.Copy(sample_descs_, stream);
const auto *scratch_mem_gpu = scratch_mem_.data<detail::SampleDesc>();
const int block = 256;
auto grid = detail::gridHelper(max_out_vol, num_samples, block);
hipLaunchKernelGGL(( detail::PopulateOneHot<OutputType, InputType>), dim3(grid), dim3(block), 0, stream,
on_value_, off_value_, scratch_mem_gpu);
}
DALI_REGISTER_OPERATOR(OneHot, OneHotGPU, GPU);
} // namespace dali
| e3bf1df327a05133755d74dbce4fe972ca33e06c.cu | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "dali/operators/generic/one_hot.h"
#include "dali/operators/generic/one_hot.cuh"
namespace dali {
class OneHotGPU : public OneHot<GPUBackend> {
public:
explicit OneHotGPU(const OpSpec &spec) : OneHot<GPUBackend>(spec) {
scratch_mem_.set_type(TypeTable::GetTypeInfo(DALI_UINT8));
}
~OneHotGPU() override = default;
USE_OPERATOR_MEMBERS();
protected:
void RunImpl(workspace_t<GPUBackend> &ws) override;
bool SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) override;
template<typename OutputType, typename InputType>
void RunImplTyped(workspace_t<GPUBackend> &ws, int placement_axis);
private:
std::vector<detail::SampleDesc> sample_descs_;
Tensor<GPUBackend> scratch_mem_;
int recent_n_samples_ = 0;
};
bool OneHotGPU::SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) {
const auto &input = ws.template InputRef<GPUBackend>(0);
int num_samples = input.shape().num_samples();
if (num_samples != recent_n_samples_) {
recent_n_samples_ = num_samples;
int64_t samples_size = num_samples * sizeof(detail::SampleDesc);
scratch_mem_.Resize({samples_size});
}
sample_descs_.clear();
sample_descs_.reserve(num_samples);
return OneHot<GPUBackend>::SetupImpl(output_desc, ws);
}
void OneHotGPU::RunImpl(workspace_t<GPUBackend> &ws) {
const auto &input = ws.InputRef<GPUBackend>(0);
auto &output = ws.OutputRef<GPUBackend>(0);
int output_sample_dim = output.shape().sample_dim();
int placement_axis = get_placement_axis(output_sample_dim);
output.SetLayout(GetOutputLayout(ws, placement_axis, output_sample_dim));
TYPE_SWITCH(input.type().id(), type2id, InputType, ONE_HOT_TYPES, (
TYPE_SWITCH(output_type_, type2id, OutputType, ONE_HOT_TYPES, (
RunImplTyped<OutputType, InputType>(ws, placement_axis);
), DALI_FAIL(make_string("Unsupported output type: ", output_type_)); ); // NOLINT
), DALI_FAIL(make_string("Unsupported input type: ", input.type().id())); ); // NOLINT
}
template <typename OutputType, typename InputType>
void OneHotGPU::RunImplTyped(workspace_t<GPUBackend> &ws, int axis) {
const auto &input = ws.InputRef<GPUBackend>(0);
auto &output = ws.OutputRef<GPUBackend>(0);
int num_samples = input.shape().num_samples();
uint64_t max_out_vol = 1;
const auto &shape = output.shape();
for (int sample_id = 0; sample_id < num_samples; ++sample_id) {
detail::SampleDesc sample;
auto output_shape = shape.tensor_shape_span(sample_id);
auto outer_vol = volume(output_shape.begin(), output_shape.begin() + axis);
sample.inner_vol = volume(output_shape.begin() + axis + 1, output_shape.end());
sample.inner_vol_classes = sample.inner_vol * num_classes_;
sample.output_vol = outer_vol * sample.inner_vol_classes;
sample.out = output.mutable_tensor<OutputType>(sample_id);
sample.in = input.tensor<InputType>(sample_id);
sample_descs_.push_back(sample);
max_out_vol = std::max(max_out_vol, sample.output_vol);
}
auto stream = ws.stream();
scratch_mem_.Copy(sample_descs_, stream);
const auto *scratch_mem_gpu = scratch_mem_.data<detail::SampleDesc>();
const int block = 256;
auto grid = detail::gridHelper(max_out_vol, num_samples, block);
detail::PopulateOneHot<OutputType, InputType><<<grid, block, 0, stream>>>(
on_value_, off_value_, scratch_mem_gpu);
}
DALI_REGISTER_OPERATOR(OneHot, OneHotGPU, GPU);
} // namespace dali
|
d914a60c54487956b3ae0cc4afab113cbc4720df.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main() {
int c;
hipGetDeviceCount(&c);
printf("Total device %d\n",c);
int i;
hipDeviceProp_t deviceProp;
for(i=0; i<c; i++){
hipGetDeviceProperties(&deviceProp, i);
printf("Device %d has compute capability %d.%d.\n",
i, deviceProp.major, deviceProp.minor);
}
}
| d914a60c54487956b3ae0cc4afab113cbc4720df.cu | #include <stdio.h>
int main() {
int c;
cudaGetDeviceCount(&c);
printf("Total device %d\n",c);
int i;
cudaDeviceProp deviceProp;
for(i=0; i<c; i++){
cudaGetDeviceProperties(&deviceProp, i);
printf("Device %d has compute capability %d.%d.\n",
i, deviceProp.major, deviceProp.minor);
}
}
|
1cdd1ca7ec4276aa48de4a1fae0862352806140b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#define MAX 32678
void ms(int a[],int l,int m,int h);
void part(int a[],int l,int h);
int checkError(int a[], int c[], int n);
__global__ void insert(int *a, int n)
{
int i = blockIdx.x *n;
int c,d,p,t,size;
size = (blockIdx.x+1)*n;
for(c=i;c<size;c++)
{
d=c;
while(d>i&&a[d]<a[d-1])
{
t=a[d];
a[d]=a[d-1];
a[d-1]=t;
d--;
}
}
}
int main()
{
int i,p,k,r;
int num_elem, num_bytes;
int *device_aay, *host_aay, *checkaay;
double res_time[45];
p=0;
FILE *insertcuda;
int block_size;
for(k=1; k<15; k++)
{
num_elem=pow(2,k);
//computing the size in bytes
num_bytes=num_elem * sizeof(int);
//malloc host_aay
host_aay=(int*)malloc(num_bytes);
checkaay=(int*)malloc(num_bytes);
//hipMalloc device aay
hipMalloc((void**)&device_aay,num_bytes);
//initialising host aay
for (i=0;i<num_elem;i++)
{
host_aay[i]=rand()%num_elem;
checkaay[i]=host_aay[i];
}
block_size=8;
hipMemcpy(device_aay,host_aay,num_bytes,hipMemcpyHostToDevice);
hipEvent_t start_insert, stop_insert, start_merge, stop_merge;
hipEventCreate(&start_insert);
hipEventCreate(&start_merge);
hipEventCreate(&stop_insert);
hipEventCreate(&stop_merge);
hipEventRecord(start_insert,0);
for (r = 0; r < 1000; r++)
{
hipLaunchKernelGGL(( insert), dim3(block_size),dim3(1), 0, 0, device_aay,num_elem/block_size);
}
hipEventRecord(stop_insert,0);
hipEventSynchronize(stop_insert);
float elapsedTimeInsert;
hipEventElapsedTime(&elapsedTimeInsert, start_insert,stop_insert);
hipMemcpy(host_aay,device_aay,num_bytes,hipMemcpyDeviceToHost);
hipEventRecord(start_merge,0);
part(host_aay,0,num_elem-1);
hipEventRecord(stop_merge,0);
hipEventSynchronize(stop_merge);
float elapsedTimeMerge;
hipEventElapsedTime(&elapsedTimeMerge, start_merge,stop_merge);
part(checkaay,0,num_elem-1);
/*printf("\n\n");
printf ("Time for the insertion sort: %f ms\n", elapsedTimeInsert);
printf ("Time for the merge sort: %f ms\n", elapsedTimeMerge);
printf("\n\n");*/
/*missorted = checkError(host_aay,checkaay,num_elem);
if (missorted != 0) printf("%d missorted nubmers\n",missorted);*/
res_time[p]= num_elem;
res_time[p+1]=elapsedTimeInsert;
res_time[p+2]=elapsedTimeMerge;
p=p+3;
//deallocate memory
free(host_aay);
free(checkaay);
hipFree(device_aay);
}
insertcuda=fopen("insertcuda.csv","w");
if(!insertcuda)
{
printf("file opening failed");
fclose(insertcuda);
}
/* Calculation Of time */
for(p=0;p<45;p=p+3)
{
fprintf(insertcuda,"n=%f,insert=%f,merge=%f \n ",res_time[p],res_time[p+1],res_time[p+2]);
}
fclose(insertcuda);
return 0;
}
void part(int a[],int l,int h){
int m;
if(l<h){
m=(l+h)/2;
part(a,l,m);
part(a,m+1,h);
ms(a,l,m,h);
}
}
void ms(int a[],int l,int m,int h){
int i,m,k,l,temp[MAX];
l=l;
i=l;
m=m+1;
while((l<=m)&&(m<=h)){
if(a[l]<=a[m]){
temp[i]=a[l];
l++;
}
else{
temp[i]=a[m];
m++;
}
i++;
}
if(l>m){
for(k=m;k<=h;k++){
temp[i]=a[k];
i++;
}
}
else{
for(k=l;k<=m;k++){
temp[i]=a[k];
i++;
}
}
for(k=l;k<=h;k++){
a[k]=temp[k];
}
}
int checkError(int a[], int c[], int n) {
int result = 0;
for (int i=0; i<n; i++) {
if (a[i] != c[i]) {
result++;
}
}
return result;
}
| 1cdd1ca7ec4276aa48de4a1fae0862352806140b.cu | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#define MAX 32678
void ms(int a[],int l,int m,int h);
void part(int a[],int l,int h);
int checkError(int a[], int c[], int n);
__global__ void insert(int *a, int n)
{
int i = blockIdx.x *n;
int c,d,p,t,size;
size = (blockIdx.x+1)*n;
for(c=i;c<size;c++)
{
d=c;
while(d>i&&a[d]<a[d-1])
{
t=a[d];
a[d]=a[d-1];
a[d-1]=t;
d--;
}
}
}
int main()
{
int i,p,k,r;
int num_elem, num_bytes;
int *device_aay, *host_aay, *checkaay;
double res_time[45];
p=0;
FILE *insertcuda;
int block_size;
for(k=1; k<15; k++)
{
num_elem=pow(2,k);
//computing the size in bytes
num_bytes=num_elem * sizeof(int);
//malloc host_aay
host_aay=(int*)malloc(num_bytes);
checkaay=(int*)malloc(num_bytes);
//cudaMalloc device aay
cudaMalloc((void**)&device_aay,num_bytes);
//initialising host aay
for (i=0;i<num_elem;i++)
{
host_aay[i]=rand()%num_elem;
checkaay[i]=host_aay[i];
}
block_size=8;
cudaMemcpy(device_aay,host_aay,num_bytes,cudaMemcpyHostToDevice);
cudaEvent_t start_insert, stop_insert, start_merge, stop_merge;
cudaEventCreate(&start_insert);
cudaEventCreate(&start_merge);
cudaEventCreate(&stop_insert);
cudaEventCreate(&stop_merge);
cudaEventRecord(start_insert,0);
for (r = 0; r < 1000; r++)
{
insert<<<block_size,1>>>(device_aay,num_elem/block_size);
}
cudaEventRecord(stop_insert,0);
cudaEventSynchronize(stop_insert);
float elapsedTimeInsert;
cudaEventElapsedTime(&elapsedTimeInsert, start_insert,stop_insert);
cudaMemcpy(host_aay,device_aay,num_bytes,cudaMemcpyDeviceToHost);
cudaEventRecord(start_merge,0);
part(host_aay,0,num_elem-1);
cudaEventRecord(stop_merge,0);
cudaEventSynchronize(stop_merge);
float elapsedTimeMerge;
cudaEventElapsedTime(&elapsedTimeMerge, start_merge,stop_merge);
part(checkaay,0,num_elem-1);
/*printf("\n\n");
printf ("Time for the insertion sort: %f ms\n", elapsedTimeInsert);
printf ("Time for the merge sort: %f ms\n", elapsedTimeMerge);
printf("\n\n");*/
/*missorted = checkError(host_aay,checkaay,num_elem);
if (missorted != 0) printf("%d missorted nubmers\n",missorted);*/
res_time[p]= num_elem;
res_time[p+1]=elapsedTimeInsert;
res_time[p+2]=elapsedTimeMerge;
p=p+3;
//deallocate memory
free(host_aay);
free(checkaay);
cudaFree(device_aay);
}
insertcuda=fopen("insertcuda.csv","w");
if(!insertcuda)
{
printf("file opening failed");
fclose(insertcuda);
}
/* Calculation Of time */
for(p=0;p<45;p=p+3)
{
fprintf(insertcuda,"n=%f,insert=%f,merge=%f \n ",res_time[p],res_time[p+1],res_time[p+2]);
}
fclose(insertcuda);
return 0;
}
void part(int a[],int l,int h){
int m;
if(l<h){
m=(l+h)/2;
part(a,l,m);
part(a,m+1,h);
ms(a,l,m,h);
}
}
void ms(int a[],int l,int m,int h){
int i,m,k,l,temp[MAX];
l=l;
i=l;
m=m+1;
while((l<=m)&&(m<=h)){
if(a[l]<=a[m]){
temp[i]=a[l];
l++;
}
else{
temp[i]=a[m];
m++;
}
i++;
}
if(l>m){
for(k=m;k<=h;k++){
temp[i]=a[k];
i++;
}
}
else{
for(k=l;k<=m;k++){
temp[i]=a[k];
i++;
}
}
for(k=l;k<=h;k++){
a[k]=temp[k];
}
}
int checkError(int a[], int c[], int n) {
int result = 0;
for (int i=0; i<n; i++) {
if (a[i] != c[i]) {
result++;
}
}
return result;
}
|
36b48a77ea0acd71fade84cecfe792b237312646.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void NegativeCorrelationForwardSumKernel( float* inputPtr, float* outputPtr, int thisLayerSize )
{
// j: current layer neuron id
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
outputPtr[j] += inputPtr[j];
}
} | 36b48a77ea0acd71fade84cecfe792b237312646.cu | #include "includes.h"
__global__ void NegativeCorrelationForwardSumKernel( float* inputPtr, float* outputPtr, int thisLayerSize )
{
// j: current layer neuron id
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
outputPtr[j] += inputPtr[j];
}
} |
62a88fcf8dafa7221ff3eec6d9a72c7cb8a30ddc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/prelu_layer.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n,
const int rows, const int rowPitch, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
for ( int k = 1; k < rows; k++ ) {
out_diff[index] += in_diff[index + k*rowPitch]
* in_data[index + k*rowPitch] * (in_data[index + k*rowPitch] <= 0);
}
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(cdim)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
cdim, bottom[0]->num(), top[0]->offset(1), top_diff ,
bottom_data ,
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype dsum;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &dsum);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
| 62a88fcf8dafa7221ff3eec6d9a72c7cb8a30ddc.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/prelu_layer.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n,
const int rows, const int rowPitch, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
for ( int k = 1; k < rows; k++ ) {
out_diff[index] += in_diff[index + k*rowPitch]
* in_data[index + k*rowPitch] * (in_data[index + k*rowPitch] <= 0);
}
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUParamBackward<Dtype><<<CAFFE_GET_BLOCKS(cdim),
CAFFE_CUDA_NUM_THREADS>>>(
cdim, bottom[0]->num(), top[0]->offset(1), top_diff ,
bottom_data ,
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype dsum;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &dsum);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
|
fe921f0746c5d9036dcff060a3b30f9a2bb496aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file add_pw_ekin.cu
*
* \brief CUDA kernel for the hphi update.
*/
#include "gpu/cuda_common.hpp"
#include "gpu/acc_runtime.hpp"
__global__ void add_pw_ekin_gpu_kernel(int num_gvec__,
double alpha__,
double const* pw_ekin__,
acc_complex_double_t const* phi__,
acc_complex_double_t const* vphi__,
acc_complex_double_t* hphi__)
{
int ig = blockIdx.x * blockDim.x + threadIdx.x;
if (ig < num_gvec__) {
acc_complex_double_t z1 = accCadd(vphi__[ig], make_accDoubleComplex(alpha__ * pw_ekin__[ig] * phi__[ig].x,
alpha__ * pw_ekin__[ig] * phi__[ig].y));
hphi__[ig] = accCadd(hphi__[ig], z1);
}
}
/// Update the hphi wave functions.
/** The following operation is performed:
* hphi[ig] += (alpha * pw_ekin[ig] * phi[ig] + vphi[ig])
*/
extern "C" void add_pw_ekin_gpu(int num_gvec__,
double alpha__,
double const* pw_ekin__,
acc_complex_double_t const* phi__,
acc_complex_double_t const* vphi__,
acc_complex_double_t* hphi__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(num_gvec__, grid_t.x));
accLaunchKernel((add_pw_ekin_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
num_gvec__,
alpha__,
pw_ekin__,
phi__,
vphi__,
hphi__
);
}
| fe921f0746c5d9036dcff060a3b30f9a2bb496aa.cu | // Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file add_pw_ekin.cu
*
* \brief CUDA kernel for the hphi update.
*/
#include "gpu/cuda_common.hpp"
#include "gpu/acc_runtime.hpp"
__global__ void add_pw_ekin_gpu_kernel(int num_gvec__,
double alpha__,
double const* pw_ekin__,
acc_complex_double_t const* phi__,
acc_complex_double_t const* vphi__,
acc_complex_double_t* hphi__)
{
int ig = blockIdx.x * blockDim.x + threadIdx.x;
if (ig < num_gvec__) {
acc_complex_double_t z1 = accCadd(vphi__[ig], make_accDoubleComplex(alpha__ * pw_ekin__[ig] * phi__[ig].x,
alpha__ * pw_ekin__[ig] * phi__[ig].y));
hphi__[ig] = accCadd(hphi__[ig], z1);
}
}
/// Update the hphi wave functions.
/** The following operation is performed:
* hphi[ig] += (alpha * pw_ekin[ig] * phi[ig] + vphi[ig])
*/
extern "C" void add_pw_ekin_gpu(int num_gvec__,
double alpha__,
double const* pw_ekin__,
acc_complex_double_t const* phi__,
acc_complex_double_t const* vphi__,
acc_complex_double_t* hphi__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(num_gvec__, grid_t.x));
accLaunchKernel((add_pw_ekin_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
num_gvec__,
alpha__,
pw_ekin__,
phi__,
vphi__,
hphi__
);
}
|
8f1195d57da2b05dcf25cb4006ae4ba4e5e0a11c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -no-opaque-pointers %s -triple nvptx-unknown-unknown -fcuda-is-device -emit-llvm -o - | FileCheck %s
#include "Inputs/cuda.h"
#define MAX_THREADS_PER_BLOCK 256
#define MIN_BLOCKS_PER_MP 2
// Test both max threads per block and Min cta per sm.
extern "C" {
__global__ void
__launch_bounds__( MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP )
Kernel1()
{
}
}
// CHECK: !{{[0-9]+}} = !{void ()* @Kernel1, !"maxntidx", i32 256}
// CHECK: !{{[0-9]+}} = !{void ()* @Kernel1, !"minctasm", i32 2}
// Test only max threads per block. Min cta per sm defaults to 0, and
// CodeGen doesn't output a zero value for minctasm.
extern "C" {
__global__ void
__launch_bounds__( MAX_THREADS_PER_BLOCK )
Kernel2()
{
}
}
// CHECK: !{{[0-9]+}} = !{void ()* @Kernel2, !"maxntidx", i32 256}
template <int max_threads_per_block>
__global__ void
__launch_bounds__(max_threads_per_block)
Kernel3()
{
}
template __global__ void Kernel3<MAX_THREADS_PER_BLOCK>();
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel3{{.*}}, !"maxntidx", i32 256}
template <int max_threads_per_block, int min_blocks_per_mp>
__global__ void
__launch_bounds__(max_threads_per_block, min_blocks_per_mp)
Kernel4()
{
}
template __global__ void Kernel4<MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP>();
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel4{{.*}}, !"maxntidx", i32 256}
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel4{{.*}}, !"minctasm", i32 2}
const int constint = 100;
template <int max_threads_per_block, int min_blocks_per_mp>
__global__ void
__launch_bounds__(max_threads_per_block + constint,
min_blocks_per_mp + max_threads_per_block)
Kernel5()
{
}
template __global__ void Kernel5<MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP>();
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel5{{.*}}, !"maxntidx", i32 356}
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel5{{.*}}, !"minctasm", i32 258}
// Make sure we don't emit negative launch bounds values.
__global__ void
__launch_bounds__( -MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP )
Kernel6()
{
}
// CHECK-NOT: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel6{{.*}}, !"maxntidx",
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel6{{.*}}, !"minctasm",
__global__ void
__launch_bounds__( MAX_THREADS_PER_BLOCK, -MIN_BLOCKS_PER_MP )
Kernel7()
{
}
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel7{{.*}}, !"maxntidx",
// CHECK-NOT: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel7{{.*}}, !"minctasm",
const char constchar = 12;
__global__ void __launch_bounds__(constint, constchar) Kernel8() {}
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel8{{.*}}, !"maxntidx", i32 100
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel8{{.*}}, !"minctasm", i32 12
| 8f1195d57da2b05dcf25cb4006ae4ba4e5e0a11c.cu | // RUN: %clang_cc1 -no-opaque-pointers %s -triple nvptx-unknown-unknown -fcuda-is-device -emit-llvm -o - | FileCheck %s
#include "Inputs/cuda.h"
#define MAX_THREADS_PER_BLOCK 256
#define MIN_BLOCKS_PER_MP 2
// Test both max threads per block and Min cta per sm.
extern "C" {
__global__ void
__launch_bounds__( MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP )
Kernel1()
{
}
}
// CHECK: !{{[0-9]+}} = !{void ()* @Kernel1, !"maxntidx", i32 256}
// CHECK: !{{[0-9]+}} = !{void ()* @Kernel1, !"minctasm", i32 2}
// Test only max threads per block. Min cta per sm defaults to 0, and
// CodeGen doesn't output a zero value for minctasm.
extern "C" {
__global__ void
__launch_bounds__( MAX_THREADS_PER_BLOCK )
Kernel2()
{
}
}
// CHECK: !{{[0-9]+}} = !{void ()* @Kernel2, !"maxntidx", i32 256}
template <int max_threads_per_block>
__global__ void
__launch_bounds__(max_threads_per_block)
Kernel3()
{
}
template __global__ void Kernel3<MAX_THREADS_PER_BLOCK>();
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel3{{.*}}, !"maxntidx", i32 256}
template <int max_threads_per_block, int min_blocks_per_mp>
__global__ void
__launch_bounds__(max_threads_per_block, min_blocks_per_mp)
Kernel4()
{
}
template __global__ void Kernel4<MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP>();
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel4{{.*}}, !"maxntidx", i32 256}
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel4{{.*}}, !"minctasm", i32 2}
const int constint = 100;
template <int max_threads_per_block, int min_blocks_per_mp>
__global__ void
__launch_bounds__(max_threads_per_block + constint,
min_blocks_per_mp + max_threads_per_block)
Kernel5()
{
}
template __global__ void Kernel5<MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP>();
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel5{{.*}}, !"maxntidx", i32 356}
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel5{{.*}}, !"minctasm", i32 258}
// Make sure we don't emit negative launch bounds values.
__global__ void
__launch_bounds__( -MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP )
Kernel6()
{
}
// CHECK-NOT: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel6{{.*}}, !"maxntidx",
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel6{{.*}}, !"minctasm",
__global__ void
__launch_bounds__( MAX_THREADS_PER_BLOCK, -MIN_BLOCKS_PER_MP )
Kernel7()
{
}
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel7{{.*}}, !"maxntidx",
// CHECK-NOT: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel7{{.*}}, !"minctasm",
const char constchar = 12;
__global__ void __launch_bounds__(constint, constchar) Kernel8() {}
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel8{{.*}}, !"maxntidx", i32 100
// CHECK: !{{[0-9]+}} = !{void ()* @{{.*}}Kernel8{{.*}}, !"minctasm", i32 12
|
3f8535c21a146543e7efb455d021cfa5e5004824.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include "papi_wrapper.hpp"
#define TILE_W 32
#define Width 512
#define Element Width*Width
using namespace std;
__global__
void MatrixMulKernel(int* Md, int* Nd, int* Pd)
{
//Block Index
int by = blockIdx.y;
int bx = blockIdx.x;
//Thread Index
int ty = threadIdx.y; //Row
int tx = threadIdx.x; //Col
int Row = by*TILE_W + ty;
int Col = bx*TILE_W + tx;
if((Row<Width) && (Col<Width))
{
//Pvalue is used to store the element of the matrix
//That is computed by the thread
int Pvalue = 0;
for(int k=0 ; k<Width ; ++k)
Pvalue += Md[Row*Width+k]*Nd[k*Width+Col];
Pd[Row*Width + Col] = Pvalue;
}
}
int main()
{
cout << "----------------------------------------------Start" << endl;
cout << "This is Normal Matrix Multiplication version" << endl;
cout << "---------------------------------------------------" << endl;
cout << "Grid Dimension : " << Width/TILE_W << "x" << Width/TILE_W << endl;
cout << "Block Dimension : " << TILE_W << "x" << TILE_W << endl;
cout << "Dimension : " << Width << "x" << Width << endl;
cout << "Total Elements : " << Element << endl;
cout << "---------------------------------------------------" << endl;
hipSetDevice(1);
//Variables for Time
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
PAPIWrapper papi_ctrl;
//papi_ctrl.AddEvent(2, strdup("cuda:::device:1:inst_executed"), strdup("cuda:::device:1:gld_inst_32bit"));
papi_ctrl.AddEvent(2, strdup("cuda:::device:1:inst_executed"), strdup("cuda:::device:1:uncached_global_load_transaction"));
int size = Element*sizeof(int);
int* M = (int*)malloc(size);
int* N = (int*)malloc(size);
int* P = (int*)malloc(size);
int* Temp_sum_array = (int*)malloc(size);
int* Md;
int* Nd;
int* Pd;
srand(time(0));
for(int i=0 ; i<Element ; i++)
{
M[i] = rand()%100;
N[i] = rand()%100;
P[i] = 0;
}
hipEventRecord(start, 0);
//CPU Matrix Multiplication
int Temp_sum = 0;
for(int row=0 ; row<Width ; row++)
{
for(int col=0 ; col<Width ; col++)
{
Temp_sum = 0;
for(int n=0 ; n<Width ; n++)
{
Temp_sum += M[row*Width+n]*N[n*Width+col];
}
Temp_sum_array[row*Width+col] = Temp_sum;
}
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
float CPU_time;
hipEventElapsedTime(&CPU_time, start, end);
cout << "Matrix Multiplication by CPU : " << CPU_time/1000 << 's' << endl;
//Finish
/////////////////////////////////////////////////
//////// CUDA //////////
/////////////////////////////////////////////////
hipEventRecord(start, 0);
hipMalloc((void**)&Md, size);
hipMemcpy(Md, M, size, hipMemcpyHostToDevice);
hipMalloc((void**)&Nd, size);
hipMemcpy(Nd, N, size, hipMemcpyHostToDevice);
hipMalloc((void**)&Pd, size);
hipEventRecord(end, 0);
hipEventSynchronize(end);
float Memory_time;
hipEventElapsedTime(&Memory_time, start, end);
cout << "Time of Processing Memory : " << Memory_time/1000 << 's' << endl;
hipEventRecord(start, 0);
papi_ctrl.Start();
dim3 dimGrid(Width/TILE_W, Width/TILE_W);
dim3 dimBlock(TILE_W, TILE_W);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd);
hipEventRecord(end, 0);
hipEventSynchronize(end);
float GPU_time;
hipEventElapsedTime(&GPU_time, start, end);
hipMemcpy(P, Pd, size, hipMemcpyDeviceToHost);
papi_ctrl.Stop();
cout << "Matrix Multiplication by GPU : " << GPU_time/1000 << 's' << endl;
cout << "---------------------------------------------------" << endl;
//Print CPU Result
//cout << "CPU Result :" << endl;
//for(int i=0 ; i<Element ; i++)
// cout << Temp_sum_array[i] << ", ";
//cout << endl;
//Print GPU Result
//cout << "GPU Result :" << endl;
//for(int i=0 ; i<Element ; i++)
// cout << P[i] << ", ";
//cout << endl;
//Check Multiplication Result
int check_flag = 0;
for(int i=0 ; i<Element ; i++)
if(Temp_sum_array[i] != P[i])
{
cout << "Wrong Point at : " << i << endl;
cout << "CPU Results is : " << Temp_sum_array[i] << endl;
cout << "GPU Results is : " << P[i] << endl;
check_flag = 1;
break;
}
if(check_flag == 1)
cout << "Wrong Result" << endl;
else if(check_flag == 0)
cout << "Correct Result" << endl;
//Finish
//Compare CPU_time and GPU_time
if(CPU_time > GPU_time)
{
cout << "GPU is faster" << endl;
float SpeedUp = CPU_time/GPU_time;
cout << "Speedup : " << SpeedUp << "x" << endl;
}
else
cout << "CPU is faster" << endl;
//Finish
cout << "------------------------------------------------End" << endl;
free(M);
free(N);
free(P);
free(Temp_sum_array);
/*
hipFree(Md);
hipFree(Nd);
hipFree(Pd);
hipEventDestroy(start);
hipEventDestroy(end);
*/
return EXIT_SUCCESS;
}
| 3f8535c21a146543e7efb455d021cfa5e5004824.cu | #include <cstdio>
#include <cstdlib>
#include <iostream>
#include "papi_wrapper.hpp"
#define TILE_W 32
#define Width 512
#define Element Width*Width
using namespace std;
__global__
void MatrixMulKernel(int* Md, int* Nd, int* Pd)
{
//Block Index
int by = blockIdx.y;
int bx = blockIdx.x;
//Thread Index
int ty = threadIdx.y; //Row
int tx = threadIdx.x; //Col
int Row = by*TILE_W + ty;
int Col = bx*TILE_W + tx;
if((Row<Width) && (Col<Width))
{
//Pvalue is used to store the element of the matrix
//That is computed by the thread
int Pvalue = 0;
for(int k=0 ; k<Width ; ++k)
Pvalue += Md[Row*Width+k]*Nd[k*Width+Col];
Pd[Row*Width + Col] = Pvalue;
}
}
int main()
{
cout << "----------------------------------------------Start" << endl;
cout << "This is Normal Matrix Multiplication version" << endl;
cout << "---------------------------------------------------" << endl;
cout << "Grid Dimension : " << Width/TILE_W << "x" << Width/TILE_W << endl;
cout << "Block Dimension : " << TILE_W << "x" << TILE_W << endl;
cout << "Dimension : " << Width << "x" << Width << endl;
cout << "Total Elements : " << Element << endl;
cout << "---------------------------------------------------" << endl;
cudaSetDevice(1);
//Variables for Time
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
PAPIWrapper papi_ctrl;
//papi_ctrl.AddEvent(2, strdup("cuda:::device:1:inst_executed"), strdup("cuda:::device:1:gld_inst_32bit"));
papi_ctrl.AddEvent(2, strdup("cuda:::device:1:inst_executed"), strdup("cuda:::device:1:uncached_global_load_transaction"));
int size = Element*sizeof(int);
int* M = (int*)malloc(size);
int* N = (int*)malloc(size);
int* P = (int*)malloc(size);
int* Temp_sum_array = (int*)malloc(size);
int* Md;
int* Nd;
int* Pd;
srand(time(0));
for(int i=0 ; i<Element ; i++)
{
M[i] = rand()%100;
N[i] = rand()%100;
P[i] = 0;
}
cudaEventRecord(start, 0);
//CPU Matrix Multiplication
int Temp_sum = 0;
for(int row=0 ; row<Width ; row++)
{
for(int col=0 ; col<Width ; col++)
{
Temp_sum = 0;
for(int n=0 ; n<Width ; n++)
{
Temp_sum += M[row*Width+n]*N[n*Width+col];
}
Temp_sum_array[row*Width+col] = Temp_sum;
}
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float CPU_time;
cudaEventElapsedTime(&CPU_time, start, end);
cout << "Matrix Multiplication by CPU : " << CPU_time/1000 << 's' << endl;
//Finish
/////////////////////////////////////////////////
//////// CUDA //////////
/////////////////////////////////////////////////
cudaEventRecord(start, 0);
cudaMalloc((void**)&Md, size);
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Nd, size);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Pd, size);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float Memory_time;
cudaEventElapsedTime(&Memory_time, start, end);
cout << "Time of Processing Memory : " << Memory_time/1000 << 's' << endl;
cudaEventRecord(start, 0);
papi_ctrl.Start();
dim3 dimGrid(Width/TILE_W, Width/TILE_W);
dim3 dimBlock(TILE_W, TILE_W);
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float GPU_time;
cudaEventElapsedTime(&GPU_time, start, end);
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
papi_ctrl.Stop();
cout << "Matrix Multiplication by GPU : " << GPU_time/1000 << 's' << endl;
cout << "---------------------------------------------------" << endl;
//Print CPU Result
//cout << "CPU Result :" << endl;
//for(int i=0 ; i<Element ; i++)
// cout << Temp_sum_array[i] << ", ";
//cout << endl;
//Print GPU Result
//cout << "GPU Result :" << endl;
//for(int i=0 ; i<Element ; i++)
// cout << P[i] << ", ";
//cout << endl;
//Check Multiplication Result
int check_flag = 0;
for(int i=0 ; i<Element ; i++)
if(Temp_sum_array[i] != P[i])
{
cout << "Wrong Point at : " << i << endl;
cout << "CPU Results is : " << Temp_sum_array[i] << endl;
cout << "GPU Results is : " << P[i] << endl;
check_flag = 1;
break;
}
if(check_flag == 1)
cout << "Wrong Result" << endl;
else if(check_flag == 0)
cout << "Correct Result" << endl;
//Finish
//Compare CPU_time and GPU_time
if(CPU_time > GPU_time)
{
cout << "GPU is faster" << endl;
float SpeedUp = CPU_time/GPU_time;
cout << "Speedup : " << SpeedUp << "x" << endl;
}
else
cout << "CPU is faster" << endl;
//Finish
cout << "------------------------------------------------End" << endl;
free(M);
free(N);
free(P);
free(Temp_sum_array);
/*
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
cudaEventDestroy(start);
cudaEventDestroy(end);
*/
return EXIT_SUCCESS;
}
|
9470448c78804e25cb7cd5e95c8e5130e963dc54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<cuda_runtime.h>
#include<pixelTransformGPU.h>
#include<format.h>
#define W1 2841
#define W2 2676
#define W3 2408
#define W5 1609
#define W6 1108
#define W7 565
#define FULL_MASK 0xFFFFFFFF
__device__ inline unsigned char clip(const int x) {
return (x < 0) ? 0 : ((x > 0xFF) ? 0xFF : (unsigned char) x);
}
__global__ void iDCT_GPU(int* in,
unsigned char *out,
int stride,
int samples_x, int samples_y,
int num_DCT_blocks){
// 8 DCT blocks per thread block, 8 threads per DCT block //
int DCT_block_in_thread_block = threadIdx.x >> 3;
int block_index = (blockIdx.x << 3) + DCT_block_in_thread_block;
if (block_index >= num_DCT_blocks) return;
int thread_in_block = threadIdx.x & 7;
int row_offset = thread_in_block << 3;
in += (block_index << 6) + row_offset;
__shared__ int shared_blocks[64*8];
int* my_row = &shared_blocks[64*DCT_block_in_thread_block + row_offset];
// --------------- Do a single row in the DCT block --------------- //
int x0, x1, x2, x3, x4, x5, x6, x7, x8;
x1 = in[4] << 11;
x2 = in[6];
x3 = in[2];
x4 = in[1];
x5 = in[7];
x6 = in[5];
x7 = in[3];
x0 = (in[0] << 11) + 128;
x8 = W7 * (x4 + x5);
x4 = x8 + (W1 - W7) * x4;
x5 = x8 - (W1 + W7) * x5;
x8 = W3 * (x6 + x7);
x6 = x8 - (W3 - W5) * x6;
x7 = x8 - (W3 + W5) * x7;
x8 = x0 + x1;
x0 -= x1;
x1 = W6 * (x3 + x2);
x2 = x1 - (W2 + W6) * x2;
x3 = x1 + (W2 - W6) * x3;
x1 = x4 + x6;
x4 -= x6;
x6 = x5 + x7;
x5 -= x7;
x7 = x8 + x3;
x8 -= x3;
x3 = x0 + x2;
x0 -= x2;
x2 = (181 * (x4 + x5) + 128) >> 8;
x4 = (181 * (x4 - x5) + 128) >> 8;
my_row[0] = (x7 + x1) >> 8;
my_row[1] = (x3 + x2) >> 8;
my_row[2] = (x0 + x4) >> 8;
my_row[3] = (x8 + x6) >> 8;
my_row[4] = (x8 - x6) >> 8;
my_row[5] = (x0 - x4) >> 8;
my_row[6] = (x3 - x2) >> 8;
my_row[7] = (x7 - x1) >> 8;
// Make sure other rows within this DCT block are finished.
// Could move to cooperative group per DCT block rather than syncing a whole warp,
// buuut TBH I'm hoping the whole kernel will run in lockstep anyway :)
__syncwarp();
// -------------------- Do a single column --------------------//
int* my_col = my_row - thread_in_block * 7;
x1 = my_col[8*4] << 8;
x2 = my_col[8*6];
x3 = my_col[8*2];
x4 = my_col[8*1];
x5 = my_col[8*7];
x6 = my_col[8*5];
x7 = my_col[8*3];
x0 = (my_col[0] << 8) + 8192;
x8 = W7 * (x4 + x5) + 4;
x4 = (x8 + (W1 - W7) * x4) >> 3;
x5 = (x8 - (W1 + W7) * x5) >> 3;
x8 = W3 * (x6 + x7) + 4;
x6 = (x8 - (W3 - W5) * x6) >> 3;
x7 = (x8 - (W3 + W5) * x7) >> 3;
x8 = x0 + x1;
x0 -= x1;
x1 = W6 * (x3 + x2) + 4;
x2 = (x1 - (W2 + W6) * x2) >> 3;
x3 = (x1 + (W2 - W6) * x3) >> 3;
x1 = x4 + x6;
x4 -= x6;
x6 = x5 + x7;
x5 -= x7;
x7 = x8 + x3;
x8 -= x3;
x3 = x0 + x2;
x0 -= x2;
x2 = (181 * (x4 + x5) + 128) >> 8;
x4 = (181 * (x4 - x5) + 128) >> 8;
// Work out where in the global output to start writing //
int blocks_per_outer_block = samples_x * samples_y;
int blocks_per_row = samples_y * (stride >> 3);
int outer_block_y = block_index / blocks_per_row;
int remaining_blocks = block_index % blocks_per_row;
int outer_block_x = remaining_blocks / blocks_per_outer_block;
remaining_blocks = remaining_blocks % blocks_per_outer_block;
int inner_block_y = remaining_blocks / samples_x;
int inner_block_x = remaining_blocks % samples_x;
int block_x = outer_block_x * samples_x + inner_block_x;
int block_y = outer_block_y * samples_y + inner_block_y;
out += (block_y * stride + block_x) << 3;
out += thread_in_block;
// Writes are coalesced within a DCT block, but not within the whole thread block. ToDo? //
*out = clip(((x7 + x1) >> 14) + 128); out += stride;
*out = clip(((x3 + x2) >> 14) + 128); out += stride;
*out = clip(((x0 + x4) >> 14) + 128); out += stride;
*out = clip(((x8 + x6) >> 14) + 128); out += stride;
*out = clip(((x8 - x6) >> 14) + 128); out += stride;
*out = clip(((x0 - x4) >> 14) + 128); out += stride;
*out = clip(((x3 - x2) >> 14) + 128); out += stride;
*out = clip(((x7 - x1) >> 14) + 128);
}
// Haven't tested if this works since it turns out expecting __shfl_sync to work properly
// across divergent branches is a CC > 7.0 feature (or > Volta anyway) and I'm not buying
// a new GPU just for that. In particular I'm not sure if my choice of 'var' in the shfl
// intrinsic is correct. As such, the normal iDCT_GPU just relies on shared mem to communicate
// between rows and cols.
__global__ void iDCT_GPU_warp_shuffle(int* in,
unsigned char *out,
int stride,
int samples_x, int samples_y,
int num_DCT_blocks){
// 8 DCT blocks per thread block, 8 threads per DCT block //
int DCT_block_in_thread_block = threadIdx.x >> 3;
int block_index = (blockIdx.x << 3) + DCT_block_in_thread_block;
if (block_index >= num_DCT_blocks) return;
int thread_in_block = threadIdx.x & 7;
int row_offset = thread_in_block << 3;
in += (block_index << 6) + row_offset;
//__shared__ int shared_blocks[64*8];
//int* my_row = &shared_blocks[64*DCT_block_in_thread_block + row_offset];
// --------------- Do a single row in the DCT block --------------- //
int x0, x1, x2, x3, x4, x5, x6, x7, x8;
x1 = in[4] << 11;
x2 = in[6];
x3 = in[2];
x4 = in[1];
x5 = in[7];
x6 = in[5];
x7 = in[3];
x0 = (in[0] << 11) + 128;
x8 = W7 * (x4 + x5);
x4 = x8 + (W1 - W7) * x4;
x5 = x8 - (W1 + W7) * x5;
x8 = W3 * (x6 + x7);
x6 = x8 - (W3 - W5) * x6;
x7 = x8 - (W3 + W5) * x7;
x8 = x0 + x1;
x0 -= x1;
x1 = W6 * (x3 + x2);
x2 = x1 - (W2 + W6) * x2;
x3 = x1 + (W2 - W6) * x3;
x1 = x4 + x6;
x4 -= x6;
x6 = x5 + x7;
x5 -= x7;
x7 = x8 + x3;
x8 -= x3;
x3 = x0 + x2;
x0 -= x2;
x2 = (181 * (x4 + x5) + 128) >> 8;
x4 = (181 * (x4 - x5) + 128) >> 8;
int y0 = (x7 + x1) >> 8;
int y1 = (x3 + x2) >> 8;
int y2 = (x0 + x4) >> 8;
int y3 = (x8 + x6) >> 8;
int y4 = (x8 - x6) >> 8;
int y5 = (x0 - x4) >> 8;
int y6 = (x3 - x2) >> 8;
int y7 = (x7 - x1) >> 8;
/*my_row[0] = y0;
my_row[1] = y1;
my_row[2] = y2;
my_row[3] = y3;
my_row[4] = y4;
my_row[5] = y5;
my_row[6] = y6;
my_row[7] = y7;*/
// Make sure other rows within this DCT block are finished.
// Could move to cooperative group per DCT block rather than syncing a whole warp,
// buuut TBH I'm hoping the whole kernel will run in lockstep anyway :)
__syncwarp();
// -------------------- Do a single column --------------------//
/*int* my_col = my_row - thread_in_block * 7;
x0 = my_col[0];
x1 = my_col[8*1];
x2 = my_col[8*2];
x3 = my_col[8*3];
x4 = my_col[8*4];
x5 = my_col[8*5];
x6 = my_col[8*6];
x7 = my_col[8*7];*/
// Diagonal //
switch (thread_in_block) {
case 0:
x0 = y0; break;
case 1:
x1 = y1; break;
case 2:
x2 = y2; break;
case 3:
x3 = y3; break;
case 4:
x4 = y4; break;
case 5:
x5 = y5; break;
case 6:
x6 = y6; break;
case 7:
x7 = y7; break;
}
// Diagonal + 1 //
switch (thread_in_block) {
case 0:
x7 = __shfl_sync(FULL_MASK, y0, 7, 8); break;
case 1:
x0 = __shfl_sync(FULL_MASK, y1, 0, 8); break;
case 2:
x1 = __shfl_sync(FULL_MASK, y2, 1, 8); break;
case 3:
x2 = __shfl_sync(FULL_MASK, y3, 2, 8); break;
case 4:
x3 = __shfl_sync(FULL_MASK, y4, 3, 8); break;
case 5:
x4 = __shfl_sync(FULL_MASK, y5, 4, 8); break;
case 6:
x5 = __shfl_sync(FULL_MASK, y6, 5, 8); break;
case 7:
x6 = __shfl_sync(FULL_MASK, y7, 6, 8); break;
}
// Diagonal + 2 //
switch (thread_in_block) {
case 0:
x6 = __shfl_sync(FULL_MASK, y0, 6, 8); break;
case 1:
x7 = __shfl_sync(FULL_MASK, y1, 7, 8); break;
case 2:
x0 = __shfl_sync(FULL_MASK, y2, 0, 8); break;
case 3:
x1 = __shfl_sync(FULL_MASK, y3, 1, 8); break;
case 4:
x2 = __shfl_sync(FULL_MASK, y4, 2, 8); break;
case 5:
x3 = __shfl_sync(FULL_MASK, y5, 3, 8); break;
case 6:
x4 = __shfl_sync(FULL_MASK, y6, 4, 8); break;
case 7:
x5 = __shfl_sync(FULL_MASK, y7, 5, 8); break;
}
// Diagonal + 3 //
switch (thread_in_block) {
case 0:
x5 = __shfl_sync(FULL_MASK, y0, 5, 8); break;
case 1:
x6 = __shfl_sync(FULL_MASK, y1, 6, 8); break;
case 2:
x7 = __shfl_sync(FULL_MASK, y2, 7, 8); break;
case 3:
x0 = __shfl_sync(FULL_MASK, y3, 0, 8); break;
case 4:
x1 = __shfl_sync(FULL_MASK, y4, 1, 8); break;
case 5:
x2 = __shfl_sync(FULL_MASK, y5, 2, 8); break;
case 6:
x3 = __shfl_sync(FULL_MASK, y6, 3, 8); break;
case 7:
x4 = __shfl_sync(FULL_MASK, y7, 4, 8); break;
}
// Diagonal + 4 //
switch (thread_in_block) {
case 0:
x4 = __shfl_sync(FULL_MASK, y0, 4, 8); break;
case 1:
x5 = __shfl_sync(FULL_MASK, y1, 5, 8); break;
case 2:
x6 = __shfl_sync(FULL_MASK, y2, 6, 8); break;
case 3:
x7 = __shfl_sync(FULL_MASK, y3, 7, 8); break;
case 4:
x0 = __shfl_sync(FULL_MASK, y4, 0, 8); break;
case 5:
x1 = __shfl_sync(FULL_MASK, y5, 1, 8); break;
case 6:
x2 = __shfl_sync(FULL_MASK, y6, 2, 8); break;
case 7:
x3 = __shfl_sync(FULL_MASK, y7, 3, 8); break;
}
// Diagonal + 5 //
switch (thread_in_block) {
case 0:
x3 = __shfl_sync(FULL_MASK, y0, 3, 8); break;
case 1:
x4 = __shfl_sync(FULL_MASK, y1, 4, 8); break;
case 2:
x5 = __shfl_sync(FULL_MASK, y2, 5, 8); break;
case 3:
x6 = __shfl_sync(FULL_MASK, y3, 6, 8); break;
case 4:
x7 = __shfl_sync(FULL_MASK, y4, 7, 8); break;
case 5:
x0 = __shfl_sync(FULL_MASK, y5, 0, 8); break;
case 6:
x1 = __shfl_sync(FULL_MASK, y6, 1, 8); break;
case 7:
x2 = __shfl_sync(FULL_MASK, y7, 2, 8); break;
}
// Diagonal + 6 //
switch (thread_in_block) {
case 0:
x2 = __shfl_sync(FULL_MASK, y0, 2, 8); break;
case 1:
x3 = __shfl_sync(FULL_MASK, y1, 3, 8); break;
case 2:
x4 = __shfl_sync(FULL_MASK, y2, 4, 8); break;
case 3:
x5 = __shfl_sync(FULL_MASK, y3, 5, 8); break;
case 4:
x6 = __shfl_sync(FULL_MASK, y4, 6, 8); break;
case 5:
x7 = __shfl_sync(FULL_MASK, y5, 7, 8); break;
case 6:
x0 = __shfl_sync(FULL_MASK, y6, 0, 8); break;
case 7:
x1 = __shfl_sync(FULL_MASK, y7, 1, 8); break;
}
// Diagonal + 7 //
switch (thread_in_block) {
case 0:
x1 = __shfl_sync(FULL_MASK, y0, 1, 8); break;
case 1:
x2 = __shfl_sync(FULL_MASK, y1, 2, 8); break;
case 2:
x3 = __shfl_sync(FULL_MASK, y2, 3, 8); break;
case 3:
x4 = __shfl_sync(FULL_MASK, y3, 4, 8); break;
case 4:
x5 = __shfl_sync(FULL_MASK, y4, 5, 8); break;
case 5:
x6 = __shfl_sync(FULL_MASK, y5, 6, 8); break;
case 6:
x7 = __shfl_sync(FULL_MASK, y6, 7, 8); break;
case 7:
x0 = __shfl_sync(FULL_MASK, y7, 0, 8); break;
}
x0 = (x0 << 8) + 8192;
x4 = x4 << 8;
x8 = W7 * (x1 + x7) + 4;
x1 = (x8 + (W1 - W7) * x1) >> 3;
x7 = (x8 - (W1 + W7) * x7) >> 3;
x8 = W3 * (x5 + x3) + 4;
x5 = (x8 - (W3 - W5) * x5) >> 3;
x3 = (x8 - (W3 + W5) * x3) >> 3;
x8 = x0 + x4;
x0 -= x4;
x4 = W6 * (x2 + x6) + 4;
x6 = (x4 - (W2 + W6) * x6) >> 3;
x2 = (x4 + (W2 - W6) * x2) >> 3;
x4 = x1 + x5;
x1 -= x5;
x5 = x7 + x3;
x7 -= x3;
x3 = x8 + x2;
x8 -= x2;
x2 = x0 + x6;
x0 -= x6;
x6 = (181 * (x1 + x7) + 128) >> 8;
x1 = (181 * (x1 - x7) + 128) >> 8;
// Work out where in the global output to start writing //
int blocks_per_outer_block = samples_x * samples_y;
int blocks_per_row = samples_y * (stride >> 3);
int outer_block_y = block_index / blocks_per_row;
int remaining_blocks = block_index % blocks_per_row;
int outer_block_x = remaining_blocks / blocks_per_outer_block;
remaining_blocks = remaining_blocks % blocks_per_outer_block;
int inner_block_y = remaining_blocks / samples_x;
int inner_block_x = remaining_blocks % samples_x;
int block_x = outer_block_x * samples_x + inner_block_x;
int block_y = outer_block_y * samples_y + inner_block_y;
out += (block_y * stride + block_x) << 3;
out += thread_in_block;
// Writes are coalesced within a DCT block, but not within the whole thread block. ToDo? //
*out = clip(((x3 + x4) >> 14) + 128); out += stride;
*out = clip(((x2 + x6) >> 14) + 128); out += stride;
*out = clip(((x0 + x1) >> 14) + 128); out += stride;
*out = clip(((x8 + x5) >> 14) + 128); out += stride;
*out = clip(((x8 - x5) >> 14) + 128); out += stride;
*out = clip(((x0 - x1) >> 14) + 128); out += stride;
*out = clip(((x2 - x6) >> 14) + 128); out += stride;
*out = clip(((x3 - x4) >> 14) + 128);
}
__host__ inline unsigned char clipHost(const int x) {
return (x < 0) ? 0 : ((x > 0xFF) ? 0xFF : (unsigned char) x);
}
__global__ void upsampleChannelGPU_cokernel(unsigned char* in, unsigned char*out,
unsigned int in_width, unsigned int in_stride,
unsigned int x_scale, unsigned int y_scale) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// I assume since each input block is 64 chars, and an upsample must at least
// double one of the dimensions, that the size of 'out' is a multiple of 128.
// So for now I use thread blocks of size 128 and don't check bounds of 'out'
// if (i >= out_size) return;
int out_width = in_width << x_scale;
int y = (i / out_width) >> y_scale;
int x = (i % out_width) >> x_scale;
out[i] = in[y * in_stride + x];
}
__global__ void upsampleChannelGPU_kernel(unsigned char* in, unsigned char*out,
unsigned int in_stride, unsigned int out_width,
unsigned int x_scale, unsigned int y_scale) {
// Since each DCT block is 64 chars, can assume size of 'in' is a multiple of
// 64, hence blockDim.x is set to 64 and no bounds checking is done at the
// start of the kernel.
int i = blockIdx.x * blockDim.x + threadIdx.x;
int out_y = (i / in_stride) << y_scale;
int out_x = (i % in_stride) << x_scale;
out += out_y * out_width + out_x;
for(int y_step = 0; y_step < (1 << y_scale); y_step++) {
for(int x_step = 0; x_step < (1 << x_scale); x_step++)
out[x_step] = in[i];
out += out_width;
}
}
__global__ void upsampleAndYUVtoRGB_kernel(unsigned char* Y, unsigned char* Cb, unsigned char* Cr,
unsigned char* RGB,
unsigned int x_scale, unsigned int y_scale,
unsigned int Y_width, unsigned int Y_height,
unsigned int Y_stride,
unsigned int C_width, unsigned int C_stride) {
// Uses a thread per pixel from Cb/Cr, which could correspond to
// 1, 2 or 4 pixels from Y. Y and RGB have the same pixel dimensions, but,
// unlike RGB, Y will probably have a stride different to its width. Also
// RGB has 3 chars per pixel, whilst Y has 1.
int i = blockIdx.x * blockDim.x + threadIdx.x;
int C_x = i % C_width;
int C_y = i / C_width;
i = C_y * C_stride + C_x;
int cb = Cb[i] - 128, cr = Cr[i] - 128;
int Y_x = C_x << x_scale;
int Y_y = C_y << y_scale;
RGB += (Y_y * Y_width + Y_x) * 3;
Y += Y_y * Y_stride + Y_x;
int y_steps = 1 << y_scale;//min(1 << y_scale, Y_height - Y_y);
int x_steps = 3 << x_scale;//min(3 << x_scale, Y_width - Y_x);
for(int y_step = 0; y_step < y_steps; y_step++) {
for(int x_step = 0; x_step < x_steps;) {
int y = (*Y++) << 8;
RGB[x_step++] = clip((y + 359 * cr + 128) >> 8); // R
RGB[x_step++] = clip((y - 88 * cb - 183 * cr + 128) >> 8); // G
RGB[x_step++] = clip((y + 454 * cb + 128) >> 8); // B
}
RGB += Y_width * 3;
Y += Y_stride - (x_steps/3);
}
}
__host__ void upsampleChannelGPU(JPGReader* jpg, ColourChannel* channel) {
if ((channel->width < jpg->width) || (channel->height < jpg->height)) {
// Do an upscale //
unsigned int xshift = 0, yshift = 0;
unsigned int in_width = channel->width, in_height = channel->height;
while (channel->width < jpg->width) { channel->width <<= 1; ++xshift; }
while (channel->height < jpg->height) { channel->height <<= 1; ++yshift; }
/*int threads_per_block = 128;
int num_blocks = (channel->width * channel->height) / threads_per_block;
upsampleChannelGPU_cokernel<<<num_blocks, threads_per_block>>>(channel->device_raw_pixels.mem,
channel->device_pixels.mem,
in_width,
channel->stride,
xshift,
yshift);
if (hipGetLastError() != hipSuccess) THROW(CUDA_KERNEL_LAUNCH_ERROR);*/
int threads_per_block = 64;
int num_blocks = (in_width * in_height) / threads_per_block;
hipLaunchKernelGGL(( upsampleChannelGPU_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, channel->device_raw_pixels.mem,
channel->device_pixels.mem,
channel->stride,
channel->width,
xshift,
yshift);
if (hipGetLastError() != hipSuccess) THROW(CUDA_KERNEL_LAUNCH_ERROR);
channel->stride = channel->width;
hipMemcpy(channel->pixels.mem, channel->device_pixels.mem,
channel->pixels.size, hipMemcpyDeviceToHost);
} else {
hipMemcpy(channel->pixels.mem, channel->device_raw_pixels.mem,
channel->pixels.size, hipMemcpyDeviceToHost);
}
}
__host__ void upsampleChannelCPU(JPGReader* jpg, ColourChannel* channel) {
if ((channel->width < jpg->width) || (channel->height < jpg->height)) {
// Do an upscale //
int x, y, xshift = 0, yshift = 0;
while (channel->width < jpg->width) { channel->width <<= 1; ++xshift; }
while (channel->height < jpg->height) { channel->height <<= 1; ++yshift; }
unsigned char *lout, *out = channel->pixels.mem;
for (y = 0, lout = out; y < channel->height; ++y, lout += channel->width) {
unsigned char *lin = &channel->raw_pixels.mem[(y >> yshift) * channel->stride];
for (x = 0; x < channel->width; ++x)
lout[x] = lin[x >> xshift];
}
channel->stride = channel->width;
} else {
// Do a pointer swap, assume the compiler will make it nicer //
unsigned char* tmp1 = channel->pixels.mem;
channel->pixels.mem = channel->raw_pixels.mem;
channel->raw_pixels.mem = tmp1;
unsigned int tmp2 = channel->pixels.size;
channel->pixels.size = channel->raw_pixels.size;
channel->raw_pixels.size = tmp2;
tmp2 = channel->pixels.max_size;
channel->pixels.max_size = channel->raw_pixels.max_size;
channel->raw_pixels.max_size = tmp2;
}
}
__host__ void upsampleAndColourTransformGPU(JPGReader* jpg) {
clock_t start_time = clock();
if (jpg->num_channels == 3) {
unsigned int xshift = 0, yshift = 0;
while ((jpg->channels[1].width << xshift) < jpg->width) ++xshift;
while ((jpg->channels[1].height << yshift) < jpg->height) ++yshift;
ColourChannel *channels = jpg->channels;
int tpb = 64; // threads per block
int num_blocks = ((channels[1].width * channels[1].height) + tpb-1) / tpb;
hipLaunchKernelGGL(( upsampleAndYUVtoRGB_kernel), dim3(num_blocks), dim3(tpb), 0, 0, channels[0].device_raw_pixels.mem,
channels[1].device_raw_pixels.mem,
channels[2].device_raw_pixels.mem,
jpg->device_pixels.mem,
xshift, yshift,
channels[0].width, channels[0].height,
channels[0].stride,
channels[1].width, channels[1].stride);
if (hipGetLastError() != hipSuccess) THROW(CUDA_KERNEL_LAUNCH_ERROR);
hipMemcpy(jpg->pixels, jpg->device_pixels.mem,
jpg->device_pixels.size, hipMemcpyDeviceToHost);
} else {
ColourChannel* c = &jpg->channels[0];
hipMemcpy2D(c->pixels.mem, jpg->width,
c->device_raw_pixels.mem, c->stride,
c->width, c->height,
hipMemcpyDeviceToHost);
}
//hipDeviceSynchronize();
clock_t end_time = clock();
//jpg->time += end_time - start_time;
}
__host__ void iDCT_resample_colourTransform(JPGReader* jpg) {
clock_t start_time = clock();
int i;
ColourChannel *channel;
// Do the iDCT for each channel //
for (i = 0, channel = jpg->channels; i < jpg->num_channels; i++, channel++) {
hipMemcpy(channel->device_working_space.mem, channel->working_space.mem,
channel->working_space.size * sizeof(int), hipMemcpyHostToDevice);
int num_blocks =
jpg->num_blocks_x * channel->samples_x * jpg->num_blocks_y * channel->samples_y;
int num_thread_blocks = (num_blocks + 7) >> 3;
int num_threads_per_block = 64;
hipLaunchKernelGGL(( iDCT_GPU), dim3(num_thread_blocks), dim3(num_threads_per_block), 0, 0, channel->device_working_space.mem,
channel->device_raw_pixels.mem,
channel->stride,
channel->samples_x,
channel->samples_y,
num_blocks);
if (hipGetLastError() != hipSuccess) THROW(CUDA_KERNEL_LAUNCH_ERROR);
}
// Do the colourspace transform //
if (jpg->num_channels == 3) {
unsigned int xshift = 0, yshift = 0;
while ((jpg->channels[1].width << xshift) < jpg->width) ++xshift;
while ((jpg->channels[1].height << yshift) < jpg->height) ++yshift;
ColourChannel *channels = jpg->channels;
int tpb = 64; // threads per block
int num_blocks = ((channels[1].width * channels[1].height) + tpb-1) / tpb;
hipLaunchKernelGGL(( upsampleAndYUVtoRGB_kernel), dim3(num_blocks), dim3(tpb), 0, 0, channels[0].device_raw_pixels.mem,
channels[1].device_raw_pixels.mem,
channels[2].device_raw_pixels.mem,
jpg->device_pixels.mem,
xshift, yshift,
channels[0].width, channels[0].height,
channels[0].stride,
channels[1].width, channels[1].stride);
if (hipGetLastError() != hipSuccess) THROW(CUDA_KERNEL_LAUNCH_ERROR);
hipMemcpy(jpg->pixels, jpg->device_pixels.mem,
jpg->device_pixels.size, hipMemcpyDeviceToHost);
} else {
ColourChannel* c = &jpg->channels[0];
hipMemcpy2D(c->pixels.mem, jpg->width,
c->device_raw_pixels.mem, c->stride,
c->width, c->height,
hipMemcpyDeviceToHost);
}
//hipDeviceSynchronize();
clock_t end_time = clock();
//jpg->time += end_time - start_time;
}
__host__ void upsampleAndColourTransformHybrid(JPGReader* jpg) {
int i;
ColourChannel* channel;
clock_t start_time = clock();
for (i = 0, channel = &jpg->channels[0]; i < jpg->num_channels; ++i, ++channel) {
//if ((channel->width < jpg->width) || (channel->height < jpg->height))
upsampleChannelGPU(jpg, channel);
if ((channel->width < jpg->width) || (channel->height < jpg->height)){
fprintf(stderr, "Logical error in upscale?\n");
THROW(SYNTAX_ERROR);
}
}
if (jpg->num_channels == 3) {
// convert to RGB //
unsigned char *prgb = jpg->pixels;
const unsigned char *py = jpg->channels[0].pixels.mem;
const unsigned char *pcb = jpg->channels[1].pixels.mem;
const unsigned char *pcr = jpg->channels[2].pixels.mem;
for (int yy = jpg->height; yy; --yy) {
for (int x = 0; x < jpg->width; ++x) {
register int y = py[x] << 8;
register int cb = pcb[x] - 128;
register int cr = pcr[x] - 128;
*prgb++ = clipHost((y + 359 * cr + 128) >> 8);
*prgb++ = clipHost((y - 88 * cb - 183 * cr + 128) >> 8);
*prgb++ = clipHost((y + 454 * cb + 128) >> 8);
}
py += jpg->channels[0].stride;
pcb += jpg->channels[1].stride;
pcr += jpg->channels[2].stride;
}
} else if (jpg->channels[0].width != jpg->channels[0].stride) {
// grayscale -> only remove stride
ColourChannel *channel = &jpg->channels[0];
unsigned char *pin = &channel->pixels.mem[channel->stride];
unsigned char *pout = &channel->pixels.mem[channel->width];
for (int y = channel->height - 1; y; --y) {
memcpy(pout, pin, channel->width);
pin += channel->stride;
pout += channel->width;
}
channel->stride = channel->width;
}
//hipDeviceSynchronize();
clock_t end_time = clock();
//jpg->time += end_time - start_time;
}
__host__ void upsampleAndColourTransformCPU(JPGReader* jpg) {
int i;
ColourChannel* channel;
for (i = 0, channel = &jpg->channels[0]; i < jpg->num_channels; ++i, ++channel) {
//if ((channel->width < jpg->width) || (channel->height < jpg->height))
upsampleChannelCPU(jpg, channel);
if ((channel->width < jpg->width) || (channel->height < jpg->height)){
fprintf(stderr, "Logical error in upscale?\n");
THROW(SYNTAX_ERROR);
}
}
if (jpg->num_channels == 3) {
// convert to RGB //
unsigned char *prgb = jpg->pixels;
const unsigned char *py = jpg->channels[0].pixels.mem;
const unsigned char *pcb = jpg->channels[1].pixels.mem;
const unsigned char *pcr = jpg->channels[2].pixels.mem;
for (int yy = jpg->height; yy; --yy) {
for (int x = 0; x < jpg->width; ++x) {
register int y = py[x] << 8;
register int cb = pcb[x] - 128;
register int cr = pcr[x] - 128;
*prgb++ = clipHost((y + 359 * cr + 128) >> 8);
*prgb++ = clipHost((y - 88 * cb - 183 * cr + 128) >> 8);
*prgb++ = clipHost((y + 454 * cb + 128) >> 8);
}
py += jpg->channels[0].stride;
pcb += jpg->channels[1].stride;
pcr += jpg->channels[2].stride;
}
} else if (jpg->channels[0].width != jpg->channels[0].stride) {
// grayscale -> only remove stride
ColourChannel *channel = &jpg->channels[0];
unsigned char *pin = &channel->pixels.mem[channel->stride];
unsigned char *pout = &channel->pixels.mem[channel->width];
for (int y = channel->height - 1; y; --y) {
memcpy(pout, pin, channel->width);
pin += channel->stride;
pout += channel->width;
}
channel->stride = channel->width;
}
}
| 9470448c78804e25cb7cd5e95c8e5130e963dc54.cu | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<cuda_runtime.h>
#include<pixelTransformGPU.h>
#include<format.h>
#define W1 2841
#define W2 2676
#define W3 2408
#define W5 1609
#define W6 1108
#define W7 565
#define FULL_MASK 0xFFFFFFFF
__device__ inline unsigned char clip(const int x) {
return (x < 0) ? 0 : ((x > 0xFF) ? 0xFF : (unsigned char) x);
}
__global__ void iDCT_GPU(int* in,
unsigned char *out,
int stride,
int samples_x, int samples_y,
int num_DCT_blocks){
// 8 DCT blocks per thread block, 8 threads per DCT block //
int DCT_block_in_thread_block = threadIdx.x >> 3;
int block_index = (blockIdx.x << 3) + DCT_block_in_thread_block;
if (block_index >= num_DCT_blocks) return;
int thread_in_block = threadIdx.x & 7;
int row_offset = thread_in_block << 3;
in += (block_index << 6) + row_offset;
__shared__ int shared_blocks[64*8];
int* my_row = &shared_blocks[64*DCT_block_in_thread_block + row_offset];
// --------------- Do a single row in the DCT block --------------- //
int x0, x1, x2, x3, x4, x5, x6, x7, x8;
x1 = in[4] << 11;
x2 = in[6];
x3 = in[2];
x4 = in[1];
x5 = in[7];
x6 = in[5];
x7 = in[3];
x0 = (in[0] << 11) + 128;
x8 = W7 * (x4 + x5);
x4 = x8 + (W1 - W7) * x4;
x5 = x8 - (W1 + W7) * x5;
x8 = W3 * (x6 + x7);
x6 = x8 - (W3 - W5) * x6;
x7 = x8 - (W3 + W5) * x7;
x8 = x0 + x1;
x0 -= x1;
x1 = W6 * (x3 + x2);
x2 = x1 - (W2 + W6) * x2;
x3 = x1 + (W2 - W6) * x3;
x1 = x4 + x6;
x4 -= x6;
x6 = x5 + x7;
x5 -= x7;
x7 = x8 + x3;
x8 -= x3;
x3 = x0 + x2;
x0 -= x2;
x2 = (181 * (x4 + x5) + 128) >> 8;
x4 = (181 * (x4 - x5) + 128) >> 8;
my_row[0] = (x7 + x1) >> 8;
my_row[1] = (x3 + x2) >> 8;
my_row[2] = (x0 + x4) >> 8;
my_row[3] = (x8 + x6) >> 8;
my_row[4] = (x8 - x6) >> 8;
my_row[5] = (x0 - x4) >> 8;
my_row[6] = (x3 - x2) >> 8;
my_row[7] = (x7 - x1) >> 8;
// Make sure other rows within this DCT block are finished.
// Could move to cooperative group per DCT block rather than syncing a whole warp,
// buuut TBH I'm hoping the whole kernel will run in lockstep anyway :)
__syncwarp();
// -------------------- Do a single column --------------------//
int* my_col = my_row - thread_in_block * 7;
x1 = my_col[8*4] << 8;
x2 = my_col[8*6];
x3 = my_col[8*2];
x4 = my_col[8*1];
x5 = my_col[8*7];
x6 = my_col[8*5];
x7 = my_col[8*3];
x0 = (my_col[0] << 8) + 8192;
x8 = W7 * (x4 + x5) + 4;
x4 = (x8 + (W1 - W7) * x4) >> 3;
x5 = (x8 - (W1 + W7) * x5) >> 3;
x8 = W3 * (x6 + x7) + 4;
x6 = (x8 - (W3 - W5) * x6) >> 3;
x7 = (x8 - (W3 + W5) * x7) >> 3;
x8 = x0 + x1;
x0 -= x1;
x1 = W6 * (x3 + x2) + 4;
x2 = (x1 - (W2 + W6) * x2) >> 3;
x3 = (x1 + (W2 - W6) * x3) >> 3;
x1 = x4 + x6;
x4 -= x6;
x6 = x5 + x7;
x5 -= x7;
x7 = x8 + x3;
x8 -= x3;
x3 = x0 + x2;
x0 -= x2;
x2 = (181 * (x4 + x5) + 128) >> 8;
x4 = (181 * (x4 - x5) + 128) >> 8;
// Work out where in the global output to start writing //
int blocks_per_outer_block = samples_x * samples_y;
int blocks_per_row = samples_y * (stride >> 3);
int outer_block_y = block_index / blocks_per_row;
int remaining_blocks = block_index % blocks_per_row;
int outer_block_x = remaining_blocks / blocks_per_outer_block;
remaining_blocks = remaining_blocks % blocks_per_outer_block;
int inner_block_y = remaining_blocks / samples_x;
int inner_block_x = remaining_blocks % samples_x;
int block_x = outer_block_x * samples_x + inner_block_x;
int block_y = outer_block_y * samples_y + inner_block_y;
out += (block_y * stride + block_x) << 3;
out += thread_in_block;
// Writes are coalesced within a DCT block, but not within the whole thread block. ToDo? //
*out = clip(((x7 + x1) >> 14) + 128); out += stride;
*out = clip(((x3 + x2) >> 14) + 128); out += stride;
*out = clip(((x0 + x4) >> 14) + 128); out += stride;
*out = clip(((x8 + x6) >> 14) + 128); out += stride;
*out = clip(((x8 - x6) >> 14) + 128); out += stride;
*out = clip(((x0 - x4) >> 14) + 128); out += stride;
*out = clip(((x3 - x2) >> 14) + 128); out += stride;
*out = clip(((x7 - x1) >> 14) + 128);
}
// Haven't tested if this works since it turns out expecting __shfl_sync to work properly
// across divergent branches is a CC > 7.0 feature (or > Volta anyway) and I'm not buying
// a new GPU just for that. In particular I'm not sure if my choice of 'var' in the shfl
// intrinsic is correct. As such, the normal iDCT_GPU just relies on shared mem to communicate
// between rows and cols.
__global__ void iDCT_GPU_warp_shuffle(int* in,
unsigned char *out,
int stride,
int samples_x, int samples_y,
int num_DCT_blocks){
// 8 DCT blocks per thread block, 8 threads per DCT block //
int DCT_block_in_thread_block = threadIdx.x >> 3;
int block_index = (blockIdx.x << 3) + DCT_block_in_thread_block;
if (block_index >= num_DCT_blocks) return;
int thread_in_block = threadIdx.x & 7;
int row_offset = thread_in_block << 3;
in += (block_index << 6) + row_offset;
//__shared__ int shared_blocks[64*8];
//int* my_row = &shared_blocks[64*DCT_block_in_thread_block + row_offset];
// --------------- Do a single row in the DCT block --------------- //
int x0, x1, x2, x3, x4, x5, x6, x7, x8;
x1 = in[4] << 11;
x2 = in[6];
x3 = in[2];
x4 = in[1];
x5 = in[7];
x6 = in[5];
x7 = in[3];
x0 = (in[0] << 11) + 128;
x8 = W7 * (x4 + x5);
x4 = x8 + (W1 - W7) * x4;
x5 = x8 - (W1 + W7) * x5;
x8 = W3 * (x6 + x7);
x6 = x8 - (W3 - W5) * x6;
x7 = x8 - (W3 + W5) * x7;
x8 = x0 + x1;
x0 -= x1;
x1 = W6 * (x3 + x2);
x2 = x1 - (W2 + W6) * x2;
x3 = x1 + (W2 - W6) * x3;
x1 = x4 + x6;
x4 -= x6;
x6 = x5 + x7;
x5 -= x7;
x7 = x8 + x3;
x8 -= x3;
x3 = x0 + x2;
x0 -= x2;
x2 = (181 * (x4 + x5) + 128) >> 8;
x4 = (181 * (x4 - x5) + 128) >> 8;
int y0 = (x7 + x1) >> 8;
int y1 = (x3 + x2) >> 8;
int y2 = (x0 + x4) >> 8;
int y3 = (x8 + x6) >> 8;
int y4 = (x8 - x6) >> 8;
int y5 = (x0 - x4) >> 8;
int y6 = (x3 - x2) >> 8;
int y7 = (x7 - x1) >> 8;
/*my_row[0] = y0;
my_row[1] = y1;
my_row[2] = y2;
my_row[3] = y3;
my_row[4] = y4;
my_row[5] = y5;
my_row[6] = y6;
my_row[7] = y7;*/
// Make sure other rows within this DCT block are finished.
// Could move to cooperative group per DCT block rather than syncing a whole warp,
// buuut TBH I'm hoping the whole kernel will run in lockstep anyway :)
__syncwarp();
// -------------------- Do a single column --------------------//
/*int* my_col = my_row - thread_in_block * 7;
x0 = my_col[0];
x1 = my_col[8*1];
x2 = my_col[8*2];
x3 = my_col[8*3];
x4 = my_col[8*4];
x5 = my_col[8*5];
x6 = my_col[8*6];
x7 = my_col[8*7];*/
// Diagonal //
switch (thread_in_block) {
case 0:
x0 = y0; break;
case 1:
x1 = y1; break;
case 2:
x2 = y2; break;
case 3:
x3 = y3; break;
case 4:
x4 = y4; break;
case 5:
x5 = y5; break;
case 6:
x6 = y6; break;
case 7:
x7 = y7; break;
}
// Diagonal + 1 //
switch (thread_in_block) {
case 0:
x7 = __shfl_sync(FULL_MASK, y0, 7, 8); break;
case 1:
x0 = __shfl_sync(FULL_MASK, y1, 0, 8); break;
case 2:
x1 = __shfl_sync(FULL_MASK, y2, 1, 8); break;
case 3:
x2 = __shfl_sync(FULL_MASK, y3, 2, 8); break;
case 4:
x3 = __shfl_sync(FULL_MASK, y4, 3, 8); break;
case 5:
x4 = __shfl_sync(FULL_MASK, y5, 4, 8); break;
case 6:
x5 = __shfl_sync(FULL_MASK, y6, 5, 8); break;
case 7:
x6 = __shfl_sync(FULL_MASK, y7, 6, 8); break;
}
// Diagonal + 2 //
switch (thread_in_block) {
case 0:
x6 = __shfl_sync(FULL_MASK, y0, 6, 8); break;
case 1:
x7 = __shfl_sync(FULL_MASK, y1, 7, 8); break;
case 2:
x0 = __shfl_sync(FULL_MASK, y2, 0, 8); break;
case 3:
x1 = __shfl_sync(FULL_MASK, y3, 1, 8); break;
case 4:
x2 = __shfl_sync(FULL_MASK, y4, 2, 8); break;
case 5:
x3 = __shfl_sync(FULL_MASK, y5, 3, 8); break;
case 6:
x4 = __shfl_sync(FULL_MASK, y6, 4, 8); break;
case 7:
x5 = __shfl_sync(FULL_MASK, y7, 5, 8); break;
}
// Diagonal + 3 //
switch (thread_in_block) {
case 0:
x5 = __shfl_sync(FULL_MASK, y0, 5, 8); break;
case 1:
x6 = __shfl_sync(FULL_MASK, y1, 6, 8); break;
case 2:
x7 = __shfl_sync(FULL_MASK, y2, 7, 8); break;
case 3:
x0 = __shfl_sync(FULL_MASK, y3, 0, 8); break;
case 4:
x1 = __shfl_sync(FULL_MASK, y4, 1, 8); break;
case 5:
x2 = __shfl_sync(FULL_MASK, y5, 2, 8); break;
case 6:
x3 = __shfl_sync(FULL_MASK, y6, 3, 8); break;
case 7:
x4 = __shfl_sync(FULL_MASK, y7, 4, 8); break;
}
// Diagonal + 4 //
switch (thread_in_block) {
case 0:
x4 = __shfl_sync(FULL_MASK, y0, 4, 8); break;
case 1:
x5 = __shfl_sync(FULL_MASK, y1, 5, 8); break;
case 2:
x6 = __shfl_sync(FULL_MASK, y2, 6, 8); break;
case 3:
x7 = __shfl_sync(FULL_MASK, y3, 7, 8); break;
case 4:
x0 = __shfl_sync(FULL_MASK, y4, 0, 8); break;
case 5:
x1 = __shfl_sync(FULL_MASK, y5, 1, 8); break;
case 6:
x2 = __shfl_sync(FULL_MASK, y6, 2, 8); break;
case 7:
x3 = __shfl_sync(FULL_MASK, y7, 3, 8); break;
}
// Diagonal + 5 //
switch (thread_in_block) {
case 0:
x3 = __shfl_sync(FULL_MASK, y0, 3, 8); break;
case 1:
x4 = __shfl_sync(FULL_MASK, y1, 4, 8); break;
case 2:
x5 = __shfl_sync(FULL_MASK, y2, 5, 8); break;
case 3:
x6 = __shfl_sync(FULL_MASK, y3, 6, 8); break;
case 4:
x7 = __shfl_sync(FULL_MASK, y4, 7, 8); break;
case 5:
x0 = __shfl_sync(FULL_MASK, y5, 0, 8); break;
case 6:
x1 = __shfl_sync(FULL_MASK, y6, 1, 8); break;
case 7:
x2 = __shfl_sync(FULL_MASK, y7, 2, 8); break;
}
// Diagonal + 6 //
switch (thread_in_block) {
case 0:
x2 = __shfl_sync(FULL_MASK, y0, 2, 8); break;
case 1:
x3 = __shfl_sync(FULL_MASK, y1, 3, 8); break;
case 2:
x4 = __shfl_sync(FULL_MASK, y2, 4, 8); break;
case 3:
x5 = __shfl_sync(FULL_MASK, y3, 5, 8); break;
case 4:
x6 = __shfl_sync(FULL_MASK, y4, 6, 8); break;
case 5:
x7 = __shfl_sync(FULL_MASK, y5, 7, 8); break;
case 6:
x0 = __shfl_sync(FULL_MASK, y6, 0, 8); break;
case 7:
x1 = __shfl_sync(FULL_MASK, y7, 1, 8); break;
}
// Diagonal + 7 //
switch (thread_in_block) {
case 0:
x1 = __shfl_sync(FULL_MASK, y0, 1, 8); break;
case 1:
x2 = __shfl_sync(FULL_MASK, y1, 2, 8); break;
case 2:
x3 = __shfl_sync(FULL_MASK, y2, 3, 8); break;
case 3:
x4 = __shfl_sync(FULL_MASK, y3, 4, 8); break;
case 4:
x5 = __shfl_sync(FULL_MASK, y4, 5, 8); break;
case 5:
x6 = __shfl_sync(FULL_MASK, y5, 6, 8); break;
case 6:
x7 = __shfl_sync(FULL_MASK, y6, 7, 8); break;
case 7:
x0 = __shfl_sync(FULL_MASK, y7, 0, 8); break;
}
x0 = (x0 << 8) + 8192;
x4 = x4 << 8;
x8 = W7 * (x1 + x7) + 4;
x1 = (x8 + (W1 - W7) * x1) >> 3;
x7 = (x8 - (W1 + W7) * x7) >> 3;
x8 = W3 * (x5 + x3) + 4;
x5 = (x8 - (W3 - W5) * x5) >> 3;
x3 = (x8 - (W3 + W5) * x3) >> 3;
x8 = x0 + x4;
x0 -= x4;
x4 = W6 * (x2 + x6) + 4;
x6 = (x4 - (W2 + W6) * x6) >> 3;
x2 = (x4 + (W2 - W6) * x2) >> 3;
x4 = x1 + x5;
x1 -= x5;
x5 = x7 + x3;
x7 -= x3;
x3 = x8 + x2;
x8 -= x2;
x2 = x0 + x6;
x0 -= x6;
x6 = (181 * (x1 + x7) + 128) >> 8;
x1 = (181 * (x1 - x7) + 128) >> 8;
// Work out where in the global output to start writing //
int blocks_per_outer_block = samples_x * samples_y;
int blocks_per_row = samples_y * (stride >> 3);
int outer_block_y = block_index / blocks_per_row;
int remaining_blocks = block_index % blocks_per_row;
int outer_block_x = remaining_blocks / blocks_per_outer_block;
remaining_blocks = remaining_blocks % blocks_per_outer_block;
int inner_block_y = remaining_blocks / samples_x;
int inner_block_x = remaining_blocks % samples_x;
int block_x = outer_block_x * samples_x + inner_block_x;
int block_y = outer_block_y * samples_y + inner_block_y;
out += (block_y * stride + block_x) << 3;
out += thread_in_block;
// Writes are coalesced within a DCT block, but not within the whole thread block. ToDo? //
*out = clip(((x3 + x4) >> 14) + 128); out += stride;
*out = clip(((x2 + x6) >> 14) + 128); out += stride;
*out = clip(((x0 + x1) >> 14) + 128); out += stride;
*out = clip(((x8 + x5) >> 14) + 128); out += stride;
*out = clip(((x8 - x5) >> 14) + 128); out += stride;
*out = clip(((x0 - x1) >> 14) + 128); out += stride;
*out = clip(((x2 - x6) >> 14) + 128); out += stride;
*out = clip(((x3 - x4) >> 14) + 128);
}
__host__ inline unsigned char clipHost(const int x) {
return (x < 0) ? 0 : ((x > 0xFF) ? 0xFF : (unsigned char) x);
}
__global__ void upsampleChannelGPU_cokernel(unsigned char* in, unsigned char*out,
unsigned int in_width, unsigned int in_stride,
unsigned int x_scale, unsigned int y_scale) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// I assume since each input block is 64 chars, and an upsample must at least
// double one of the dimensions, that the size of 'out' is a multiple of 128.
// So for now I use thread blocks of size 128 and don't check bounds of 'out'
// if (i >= out_size) return;
int out_width = in_width << x_scale;
int y = (i / out_width) >> y_scale;
int x = (i % out_width) >> x_scale;
out[i] = in[y * in_stride + x];
}
__global__ void upsampleChannelGPU_kernel(unsigned char* in, unsigned char*out,
unsigned int in_stride, unsigned int out_width,
unsigned int x_scale, unsigned int y_scale) {
// Since each DCT block is 64 chars, can assume size of 'in' is a multiple of
// 64, hence blockDim.x is set to 64 and no bounds checking is done at the
// start of the kernel.
int i = blockIdx.x * blockDim.x + threadIdx.x;
int out_y = (i / in_stride) << y_scale;
int out_x = (i % in_stride) << x_scale;
out += out_y * out_width + out_x;
for(int y_step = 0; y_step < (1 << y_scale); y_step++) {
for(int x_step = 0; x_step < (1 << x_scale); x_step++)
out[x_step] = in[i];
out += out_width;
}
}
__global__ void upsampleAndYUVtoRGB_kernel(unsigned char* Y, unsigned char* Cb, unsigned char* Cr,
unsigned char* RGB,
unsigned int x_scale, unsigned int y_scale,
unsigned int Y_width, unsigned int Y_height,
unsigned int Y_stride,
unsigned int C_width, unsigned int C_stride) {
// Uses a thread per pixel from Cb/Cr, which could correspond to
// 1, 2 or 4 pixels from Y. Y and RGB have the same pixel dimensions, but,
// unlike RGB, Y will probably have a stride different to its width. Also
// RGB has 3 chars per pixel, whilst Y has 1.
int i = blockIdx.x * blockDim.x + threadIdx.x;
int C_x = i % C_width;
int C_y = i / C_width;
i = C_y * C_stride + C_x;
int cb = Cb[i] - 128, cr = Cr[i] - 128;
int Y_x = C_x << x_scale;
int Y_y = C_y << y_scale;
RGB += (Y_y * Y_width + Y_x) * 3;
Y += Y_y * Y_stride + Y_x;
int y_steps = 1 << y_scale;//min(1 << y_scale, Y_height - Y_y);
int x_steps = 3 << x_scale;//min(3 << x_scale, Y_width - Y_x);
for(int y_step = 0; y_step < y_steps; y_step++) {
for(int x_step = 0; x_step < x_steps;) {
int y = (*Y++) << 8;
RGB[x_step++] = clip((y + 359 * cr + 128) >> 8); // R
RGB[x_step++] = clip((y - 88 * cb - 183 * cr + 128) >> 8); // G
RGB[x_step++] = clip((y + 454 * cb + 128) >> 8); // B
}
RGB += Y_width * 3;
Y += Y_stride - (x_steps/3);
}
}
__host__ void upsampleChannelGPU(JPGReader* jpg, ColourChannel* channel) {
if ((channel->width < jpg->width) || (channel->height < jpg->height)) {
// Do an upscale //
unsigned int xshift = 0, yshift = 0;
unsigned int in_width = channel->width, in_height = channel->height;
while (channel->width < jpg->width) { channel->width <<= 1; ++xshift; }
while (channel->height < jpg->height) { channel->height <<= 1; ++yshift; }
/*int threads_per_block = 128;
int num_blocks = (channel->width * channel->height) / threads_per_block;
upsampleChannelGPU_cokernel<<<num_blocks, threads_per_block>>>(channel->device_raw_pixels.mem,
channel->device_pixels.mem,
in_width,
channel->stride,
xshift,
yshift);
if (cudaGetLastError() != cudaSuccess) THROW(CUDA_KERNEL_LAUNCH_ERROR);*/
int threads_per_block = 64;
int num_blocks = (in_width * in_height) / threads_per_block;
upsampleChannelGPU_kernel<<<num_blocks, threads_per_block>>>(channel->device_raw_pixels.mem,
channel->device_pixels.mem,
channel->stride,
channel->width,
xshift,
yshift);
if (cudaGetLastError() != cudaSuccess) THROW(CUDA_KERNEL_LAUNCH_ERROR);
channel->stride = channel->width;
cudaMemcpy(channel->pixels.mem, channel->device_pixels.mem,
channel->pixels.size, cudaMemcpyDeviceToHost);
} else {
cudaMemcpy(channel->pixels.mem, channel->device_raw_pixels.mem,
channel->pixels.size, cudaMemcpyDeviceToHost);
}
}
__host__ void upsampleChannelCPU(JPGReader* jpg, ColourChannel* channel) {
if ((channel->width < jpg->width) || (channel->height < jpg->height)) {
// Do an upscale //
int x, y, xshift = 0, yshift = 0;
while (channel->width < jpg->width) { channel->width <<= 1; ++xshift; }
while (channel->height < jpg->height) { channel->height <<= 1; ++yshift; }
unsigned char *lout, *out = channel->pixels.mem;
for (y = 0, lout = out; y < channel->height; ++y, lout += channel->width) {
unsigned char *lin = &channel->raw_pixels.mem[(y >> yshift) * channel->stride];
for (x = 0; x < channel->width; ++x)
lout[x] = lin[x >> xshift];
}
channel->stride = channel->width;
} else {
// Do a pointer swap, assume the compiler will make it nicer //
unsigned char* tmp1 = channel->pixels.mem;
channel->pixels.mem = channel->raw_pixels.mem;
channel->raw_pixels.mem = tmp1;
unsigned int tmp2 = channel->pixels.size;
channel->pixels.size = channel->raw_pixels.size;
channel->raw_pixels.size = tmp2;
tmp2 = channel->pixels.max_size;
channel->pixels.max_size = channel->raw_pixels.max_size;
channel->raw_pixels.max_size = tmp2;
}
}
__host__ void upsampleAndColourTransformGPU(JPGReader* jpg) {
clock_t start_time = clock();
if (jpg->num_channels == 3) {
unsigned int xshift = 0, yshift = 0;
while ((jpg->channels[1].width << xshift) < jpg->width) ++xshift;
while ((jpg->channels[1].height << yshift) < jpg->height) ++yshift;
ColourChannel *channels = jpg->channels;
int tpb = 64; // threads per block
int num_blocks = ((channels[1].width * channels[1].height) + tpb-1) / tpb;
upsampleAndYUVtoRGB_kernel<<<num_blocks, tpb>>>(channels[0].device_raw_pixels.mem,
channels[1].device_raw_pixels.mem,
channels[2].device_raw_pixels.mem,
jpg->device_pixels.mem,
xshift, yshift,
channels[0].width, channels[0].height,
channels[0].stride,
channels[1].width, channels[1].stride);
if (cudaGetLastError() != cudaSuccess) THROW(CUDA_KERNEL_LAUNCH_ERROR);
cudaMemcpy(jpg->pixels, jpg->device_pixels.mem,
jpg->device_pixels.size, cudaMemcpyDeviceToHost);
} else {
ColourChannel* c = &jpg->channels[0];
cudaMemcpy2D(c->pixels.mem, jpg->width,
c->device_raw_pixels.mem, c->stride,
c->width, c->height,
cudaMemcpyDeviceToHost);
}
//cudaDeviceSynchronize();
clock_t end_time = clock();
//jpg->time += end_time - start_time;
}
__host__ void iDCT_resample_colourTransform(JPGReader* jpg) {
clock_t start_time = clock();
int i;
ColourChannel *channel;
// Do the iDCT for each channel //
for (i = 0, channel = jpg->channels; i < jpg->num_channels; i++, channel++) {
cudaMemcpy(channel->device_working_space.mem, channel->working_space.mem,
channel->working_space.size * sizeof(int), cudaMemcpyHostToDevice);
int num_blocks =
jpg->num_blocks_x * channel->samples_x * jpg->num_blocks_y * channel->samples_y;
int num_thread_blocks = (num_blocks + 7) >> 3;
int num_threads_per_block = 64;
iDCT_GPU<<<num_thread_blocks, num_threads_per_block>>>(channel->device_working_space.mem,
channel->device_raw_pixels.mem,
channel->stride,
channel->samples_x,
channel->samples_y,
num_blocks);
if (cudaGetLastError() != cudaSuccess) THROW(CUDA_KERNEL_LAUNCH_ERROR);
}
// Do the colourspace transform //
if (jpg->num_channels == 3) {
unsigned int xshift = 0, yshift = 0;
while ((jpg->channels[1].width << xshift) < jpg->width) ++xshift;
while ((jpg->channels[1].height << yshift) < jpg->height) ++yshift;
ColourChannel *channels = jpg->channels;
int tpb = 64; // threads per block
int num_blocks = ((channels[1].width * channels[1].height) + tpb-1) / tpb;
upsampleAndYUVtoRGB_kernel<<<num_blocks, tpb>>>(channels[0].device_raw_pixels.mem,
channels[1].device_raw_pixels.mem,
channels[2].device_raw_pixels.mem,
jpg->device_pixels.mem,
xshift, yshift,
channels[0].width, channels[0].height,
channels[0].stride,
channels[1].width, channels[1].stride);
if (cudaGetLastError() != cudaSuccess) THROW(CUDA_KERNEL_LAUNCH_ERROR);
cudaMemcpy(jpg->pixels, jpg->device_pixels.mem,
jpg->device_pixels.size, cudaMemcpyDeviceToHost);
} else {
ColourChannel* c = &jpg->channels[0];
cudaMemcpy2D(c->pixels.mem, jpg->width,
c->device_raw_pixels.mem, c->stride,
c->width, c->height,
cudaMemcpyDeviceToHost);
}
//cudaDeviceSynchronize();
clock_t end_time = clock();
//jpg->time += end_time - start_time;
}
__host__ void upsampleAndColourTransformHybrid(JPGReader* jpg) {
int i;
ColourChannel* channel;
clock_t start_time = clock();
for (i = 0, channel = &jpg->channels[0]; i < jpg->num_channels; ++i, ++channel) {
//if ((channel->width < jpg->width) || (channel->height < jpg->height))
upsampleChannelGPU(jpg, channel);
if ((channel->width < jpg->width) || (channel->height < jpg->height)){
fprintf(stderr, "Logical error in upscale?\n");
THROW(SYNTAX_ERROR);
}
}
if (jpg->num_channels == 3) {
// convert to RGB //
unsigned char *prgb = jpg->pixels;
const unsigned char *py = jpg->channels[0].pixels.mem;
const unsigned char *pcb = jpg->channels[1].pixels.mem;
const unsigned char *pcr = jpg->channels[2].pixels.mem;
for (int yy = jpg->height; yy; --yy) {
for (int x = 0; x < jpg->width; ++x) {
register int y = py[x] << 8;
register int cb = pcb[x] - 128;
register int cr = pcr[x] - 128;
*prgb++ = clipHost((y + 359 * cr + 128) >> 8);
*prgb++ = clipHost((y - 88 * cb - 183 * cr + 128) >> 8);
*prgb++ = clipHost((y + 454 * cb + 128) >> 8);
}
py += jpg->channels[0].stride;
pcb += jpg->channels[1].stride;
pcr += jpg->channels[2].stride;
}
} else if (jpg->channels[0].width != jpg->channels[0].stride) {
// grayscale -> only remove stride
ColourChannel *channel = &jpg->channels[0];
unsigned char *pin = &channel->pixels.mem[channel->stride];
unsigned char *pout = &channel->pixels.mem[channel->width];
for (int y = channel->height - 1; y; --y) {
memcpy(pout, pin, channel->width);
pin += channel->stride;
pout += channel->width;
}
channel->stride = channel->width;
}
//cudaDeviceSynchronize();
clock_t end_time = clock();
//jpg->time += end_time - start_time;
}
__host__ void upsampleAndColourTransformCPU(JPGReader* jpg) {
int i;
ColourChannel* channel;
for (i = 0, channel = &jpg->channels[0]; i < jpg->num_channels; ++i, ++channel) {
//if ((channel->width < jpg->width) || (channel->height < jpg->height))
upsampleChannelCPU(jpg, channel);
if ((channel->width < jpg->width) || (channel->height < jpg->height)){
fprintf(stderr, "Logical error in upscale?\n");
THROW(SYNTAX_ERROR);
}
}
if (jpg->num_channels == 3) {
// convert to RGB //
unsigned char *prgb = jpg->pixels;
const unsigned char *py = jpg->channels[0].pixels.mem;
const unsigned char *pcb = jpg->channels[1].pixels.mem;
const unsigned char *pcr = jpg->channels[2].pixels.mem;
for (int yy = jpg->height; yy; --yy) {
for (int x = 0; x < jpg->width; ++x) {
register int y = py[x] << 8;
register int cb = pcb[x] - 128;
register int cr = pcr[x] - 128;
*prgb++ = clipHost((y + 359 * cr + 128) >> 8);
*prgb++ = clipHost((y - 88 * cb - 183 * cr + 128) >> 8);
*prgb++ = clipHost((y + 454 * cb + 128) >> 8);
}
py += jpg->channels[0].stride;
pcb += jpg->channels[1].stride;
pcr += jpg->channels[2].stride;
}
} else if (jpg->channels[0].width != jpg->channels[0].stride) {
// grayscale -> only remove stride
ColourChannel *channel = &jpg->channels[0];
unsigned char *pin = &channel->pixels.mem[channel->stride];
unsigned char *pout = &channel->pixels.mem[channel->width];
for (int y = channel->height - 1; y; --y) {
memcpy(pout, pin, channel->width);
pin += channel->stride;
pout += channel->width;
}
channel->stride = channel->width;
}
}
|
1954ed2c051f3eae01d7bd1a738d377fa05e34e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
#include <iostream>
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
////////////////////////////////////////////////////////////////////////////////////////
///// Full Volume Scan6
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
__device__ int global_count = 0;
__device__ int output_xyz_count = 0; // *************************************************
__device__ unsigned int blocks_done = 0;
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_I[CTA_SIZE * MAX_LOCAL_POINTS];
struct FullScan6
{
PtrStep<short2> volume;
float3 cell_size;
mutable PtrSz<PointType> output;
mutable PtrSz<PointType> output_xyz;
mutable PtrSz<float> output_intensity;
__device__ __forceinline__ float
fetch (pcl::gpu::kinfuLS::tsdf_buffer buffer, int x, int y, int z, int& weight) const
{
float tsdf;
const short2* tmp_pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]);
short2* pos = const_cast<short2*> (tmp_pos);
shift_tsdf_pointer (&pos, buffer);
unpack_tsdf (*pos, tsdf, weight);
return tsdf;
}
__device__ __forceinline__ float
fetch (int x, int y, int z, int& weight) const
{
float tsdf;
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
return tsdf;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if CUDART_VERSION >= 9000
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
#else
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
#endif
float3 V;
V.x = (x + 0.5f) * cell_size.x;
V.y = (y + 0.5f) * cell_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < VOLUME_Z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < VOLUME_X && y < VOLUME_Y)
{
int W;
float F = fetch (x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * cell_size.z;
// process dx
if (x + 1 < VOLUME_X)
{
int Wn;
float Fn = fetch (x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) )
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + cell_size.x;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.x = (V.x * std::abs (Fn) + Vnx * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (x + 1 < VOLUME_X) */
// process dy
if (y + 1 < VOLUME_Y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) )
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + cell_size.y;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.y = (V.y * std::abs (Fn) + Vny * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (y + 1 < VOLUME_Y) */
// process dz
// if (z + 1 < VOLUME_Z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + cell_size.z;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.z = (V.z * std::abs (Fn) + Vnz * std::abs (F)) * d_inv;
points[local_count++] = p;
}
}/* if (z + 1 < VOLUME_Z) */
}/* if (W != 0 && F != 1.f) */
}/* if (x < VOLUME_X && y < VOLUME_Y) */
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
//not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
int offset_storage = old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, offset_storage += Warp::STRIDE)
{
if (offset_storage >= output_xyz.size) break;
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
store_point_type (x, y, z, output_xyz.data, offset_storage);
}
bool full = (old_global_count + total_warp) >= output_xyz.size;
if (full)
break;
}
}/* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// Prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
// Last block
if (value == total_blocks - 1)
{
output_xyz_count = min ((int)output_xyz.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
// OPERATOR USED BY EXTRACT_SLICE_AS_CLOUD.
// This operator extracts the cloud as TSDF values and X,Y,Z indices.
// The previous operator generates a regular point cloud in meters.
// This one generates a TSDF Point Cloud in grid indices.
__device__ __forceinline__ void
operator () (pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
int ftid = Block::flattenedThreadId ();
int minimum_Z = 0;
int maximum_Z = VOLUME_Z - 1;
for (int z = minimum_Z; z < maximum_Z; ++z)
{
// The black zone is the name given to the subvolume within the TSDF Volume grid that is shifted out.
// In other words, the set of points in the TSDF grid that we want to extract in order to add it to the world model being built in CPU.
bool in_black_zone = ( (x >= minBounds.x && x <= maxBounds.x) || (y >= minBounds.y && y <= maxBounds.y) || ( z >= minBounds.z && z <= maxBounds.z) ) ;
float4 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < buffer.voxels_size.x && y < buffer.voxels_size.y && in_black_zone)
{
int W;
float F = fetch (buffer, x, y, z, W);
if (W != 0.0f && F != 1.f && F < 0.98 && F != 0.0f && F > -1.0f)
{
float4 p;
p.x = x;
p.y = y;
p.z = z;
p.w = F;
points[local_count++] = p;
}
}/* if (x < VOLUME_X && y < VOLUME_Y) */
// local_count counts the number of zero crossing for the current thread. Now we need to merge this knowledge with the other threads
// not we fulfilled points array at current iteration
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
int total_warp = __popc (__ballot (local_count > 0))
+ __popc (__ballot (local_count > 1))
+ __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0) ///more than 0 zero-crossings
{
int lane = Warp::laneId (); ///index of thread within warp [0-31]
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
// Pointer to the beginning of the current warp buffer
volatile int* cta_buffer = (int*)(storage_X + storage_index);
// Compute offset of current warp
// Call in place scanning (see http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html)
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane); //How many crossings did we have before index "lane" ?
// We want to do only 1 operation per warp (not thread) -> because it is faster
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp); ///We use atomicAdd, so that threads do not collide
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
// Perform compaction (dump all current crossings)
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;// x coordinates of the points we found in STORAGE_X
storage_Y[storage_index + offset + l] = points[l].y;// y coordinates of the points we found in STORAGE_Y
storage_Z[storage_index + offset + l] = points[l].z;// z coordinates of the points we found in STORAGE_Z
storage_I[storage_index + offset + l] = points[l].w;// Intensity values of the points we found in STORAGE_I
}
// Retrieve Zero-crossings as 3D points
int offset_storage = old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, offset_storage += Warp::STRIDE)
{
if (offset_storage >= output_xyz.size) break;
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
float i = storage_I[storage_index + idx];
store_point_intensity (x, y, z, i, output_xyz.data, output_intensity.data, offset_storage);
}
// Sanity check to make sure our output_xyz buffer is not full already
bool full = (old_global_count + total_warp) >= output_xyz.size;
if (full)
break;
}
} /* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// Prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
// Last block
if (value == total_blocks - 1)
{
output_xyz_count = min ((int)output_xyz.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float4* ptr, int offset) const
{
*(ptr + offset) = make_float4 (x, y, z, 0);
}
//INLINE FUNCTION THAT STORES XYZ AND INTENSITY VALUES IN 2 SEPARATE DeviceArrays.
// ptr_xyz: pointer to the BEGINNING of the XYZ deviceArray
// ptr_instensity: pointer to the BEGINNING of the Intensity deviceArray
// offset: offset to apply to both XYZ and Intensity
__device__ __forceinline__ void
store_point_intensity (float x, float y, float z, float i, float4* ptr_xyz, float* ptr_intensity, int offset) const
{
*(ptr_xyz + offset) = make_float4 (x, y, z, 0);
*(ptr_intensity + offset) = i;
}
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float3* ptr, int offset) const
{
*(ptr + offset) = make_float3 (x, y, z);
}
};
__global__ void
extractKernel (const FullScan6 fs)
{
fs ();
}
__global__ void
extractSliceKernel (const FullScan6 fs, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds)
{
fs (buffer, minBounds, maxBounds);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
extractCloud (const PtrStep<short2>& volume, const float3& volume_size, PtrSz<PointType> output_xyz)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / VOLUME_X;
fs.cell_size.y = volume_size.y / VOLUME_Y;
fs.cell_size.z = volume_size.z / VOLUME_Z;
fs.output_xyz = output_xyz;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
hipLaunchKernelGGL(( extractKernel), dim3(grid), dim3(block), 0, 0, fs);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall ( hipDeviceSynchronize () );
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_xyz_count, sizeof (size)) );
// cudaSafeCall ( hipMemcpyFromSymbol (&size, "output_xyz_count", sizeof (size)) );
return ((size_t)size);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
extractSliceAsCloud (const PtrStep<short2>& volume, const float3& volume_size, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const int shiftX, const int shiftY, const int shiftZ,
PtrSz<PointType> output_xyz, PtrSz<float> output_intensities)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / buffer->voxels_size.x;
fs.cell_size.y = volume_size.y / buffer->voxels_size.y;
fs.cell_size.z = volume_size.z / buffer->voxels_size.z;
fs.output_xyz = output_xyz;
fs.output_intensity = output_intensities;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//Compute slice bounds
int newX = buffer->origin_GRID.x + shiftX;
int newY = buffer->origin_GRID.y + shiftY;
int newZ = buffer->origin_GRID.z + shiftZ;
int3 minBounds, maxBounds;
//X
if (newX >= 0)
{
minBounds.x = buffer->origin_GRID.x;
maxBounds.x = newX;
}
else
{
minBounds.x = newX + buffer->voxels_size.x - 1;
maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x - 1;
}
if (minBounds.x > maxBounds.x)
std::swap (minBounds.x, maxBounds.x);
//Y
if (newY >= 0)
{
minBounds.y = buffer->origin_GRID.y;
maxBounds.y = newY;
}
else
{
minBounds.y = newY + buffer->voxels_size.y - 1;
maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y - 1;
}
if(minBounds.y > maxBounds.y)
std::swap (minBounds.y, maxBounds.y);
//Z
if (newZ >= 0)
{
minBounds.z = buffer->origin_GRID.z;
maxBounds.z = newZ;
}
else
{
minBounds.z = newZ + buffer->voxels_size.z - 1;
maxBounds.z = buffer->origin_GRID.z + buffer->voxels_size.z - 1;
}
if (minBounds.z > maxBounds.z)
std::swap(minBounds.z, maxBounds.z);
minBounds.x -= buffer->origin_GRID.x;
maxBounds.x -= buffer->origin_GRID.x;
minBounds.y -= buffer->origin_GRID.y;
maxBounds.y -= buffer->origin_GRID.y;
minBounds.z -= buffer->origin_GRID.z;
maxBounds.z -= buffer->origin_GRID.z;
if (minBounds.x < 0) // We are shifting Left
{
minBounds.x += buffer->voxels_size.x;
maxBounds.x += (buffer->voxels_size.x);
}
if (minBounds.y < 0) // We are shifting up
{
minBounds.y += buffer->voxels_size.y;
maxBounds.y += (buffer->voxels_size.y);
}
if (minBounds.z < 0) // We are shifting back
{
minBounds.z += buffer->voxels_size.z;
maxBounds.z += buffer->voxels_size.z;
}
// Extraction call
hipLaunchKernelGGL(( extractSliceKernel), dim3(grid), dim3(block), 0, 0, fs, *buffer, minBounds, maxBounds);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall ( hipDeviceSynchronize () );
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_xyz_count, sizeof(size)) );
return (size_t)size;
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
namespace kinfuLS
{
template<typename NormalType>
struct ExtractNormals
{
float3 cell_size;
PtrStep<short2> volume;
PtrSz<PointType> points;
mutable NormalType* output;
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ float3
fetchPoint (int idx) const
{
PointType p = points.data[idx];
return make_float3 (p.x, p.y, p.z);
}
__device__ __forceinline__ void
storeNormal (int idx, float3 normal) const
{
NormalType n;
n.x = normal.x; n.y = normal.y; n.z = normal.z;
output[idx] = n;
}
__device__ __forceinline__ int3
getVoxel (const float3& point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ void
operator () () const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = fetchPoint (idx);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
t = point;
t.x += cell_size.x;
float Fx1 = interpolateTrilineary (t);
t = point;
t.x -= cell_size.x;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = point;
t.y += cell_size.y;
float Fy1 = interpolateTrilineary (t);
t = point;
t.y -= cell_size.y;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = point;
t.z += cell_size.z;
float Fz1 = interpolateTrilineary (t);
t = point;
t.z -= cell_size.z;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
}
storeNormal (idx, n);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
/*
//OLD CODE
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
if (point.x < vx) g.x--;
if (point.y < vy) g.y--;
if (point.z < vz) g.z--;
//float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
//float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
//float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float a = point.x/ cell_size.x - (g.x + 0.5f);
float b = point.y/ cell_size.y - (g.y + 0.5f);
float c = point.z/ cell_size.z - (g.z + 0.5f);
*/
//NEW CODE
float a = point.x/ cell_size.x - (g.x + 0.5f); if (a<0) { g.x--; a+=1.0f; };
float b = point.y/ cell_size.y - (g.y + 0.5f); if (b<0) { g.y--; b+=1.0f; };
float c = point.z/ cell_size.z - (g.z + 0.5f); if (c<0) { g.z--; c+=1.0f; };
float res = (1 - a) * (
(1 - b) * ( readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * c )
+ b * ( readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * c )
) + a * (
(1 - b) * ( readTsdf (g.x + 1, g.y + 0, g.z + 0) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * c )
+ b * ( readTsdf (g.x + 1, g.y + 1, g.z + 0) * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * c )
);
return res;
}
};
template<typename NormalType>
__global__ void
extractNormalsKernel (const ExtractNormals<NormalType> en) {
en ();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename NormalType> void
extractNormals (const PtrStep<short2>& volume, const float3& volume_size,
const PtrSz<PointType>& points, NormalType* output)
{
ExtractNormals<NormalType> en;
en.volume = volume;
en.cell_size.x = volume_size.x / VOLUME_X;
en.cell_size.y = volume_size.y / VOLUME_Y;
en.cell_size.z = volume_size.z / VOLUME_Z;
en.points = points;
en.output = output;
dim3 block (256);
dim3 grid (divUp (points.size, block.x));
hipLaunchKernelGGL(( extractNormalsKernel), dim3(grid), dim3(block), 0, 0, en);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
template void extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output);
template void extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
}
}
}
| 1954ed2c051f3eae01d7bd1a738d377fa05e34e9.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
#include <iostream>
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
////////////////////////////////////////////////////////////////////////////////////////
///// Full Volume Scan6
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
__device__ int global_count = 0;
__device__ int output_xyz_count = 0; // *************************************************
__device__ unsigned int blocks_done = 0;
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_I[CTA_SIZE * MAX_LOCAL_POINTS];
struct FullScan6
{
PtrStep<short2> volume;
float3 cell_size;
mutable PtrSz<PointType> output;
mutable PtrSz<PointType> output_xyz;
mutable PtrSz<float> output_intensity;
__device__ __forceinline__ float
fetch (pcl::gpu::kinfuLS::tsdf_buffer buffer, int x, int y, int z, int& weight) const
{
float tsdf;
const short2* tmp_pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]);
short2* pos = const_cast<short2*> (tmp_pos);
shift_tsdf_pointer (&pos, buffer);
unpack_tsdf (*pos, tsdf, weight);
return tsdf;
}
__device__ __forceinline__ float
fetch (int x, int y, int z, int& weight) const
{
float tsdf;
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
return tsdf;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if CUDART_VERSION >= 9000
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
#else
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
#endif
float3 V;
V.x = (x + 0.5f) * cell_size.x;
V.y = (y + 0.5f) * cell_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < VOLUME_Z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < VOLUME_X && y < VOLUME_Y)
{
int W;
float F = fetch (x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * cell_size.z;
// process dx
if (x + 1 < VOLUME_X)
{
int Wn;
float Fn = fetch (x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) )
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + cell_size.x;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.x = (V.x * std::abs (Fn) + Vnx * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (x + 1 < VOLUME_X) */
// process dy
if (y + 1 < VOLUME_Y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) )
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + cell_size.y;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.y = (V.y * std::abs (Fn) + Vny * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (y + 1 < VOLUME_Y) */
// process dz
// if (z + 1 < VOLUME_Z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + cell_size.z;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.z = (V.z * std::abs (Fn) + Vnz * std::abs (F)) * d_inv;
points[local_count++] = p;
}
}/* if (z + 1 < VOLUME_Z) */
}/* if (W != 0 && F != 1.f) */
}/* if (x < VOLUME_X && y < VOLUME_Y) */
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
//not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
int offset_storage = old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, offset_storage += Warp::STRIDE)
{
if (offset_storage >= output_xyz.size) break;
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
store_point_type (x, y, z, output_xyz.data, offset_storage);
}
bool full = (old_global_count + total_warp) >= output_xyz.size;
if (full)
break;
}
}/* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// Prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
// Last block
if (value == total_blocks - 1)
{
output_xyz_count = min ((int)output_xyz.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
// OPERATOR USED BY EXTRACT_SLICE_AS_CLOUD.
// This operator extracts the cloud as TSDF values and X,Y,Z indices.
// The previous operator generates a regular point cloud in meters.
// This one generates a TSDF Point Cloud in grid indices.
__device__ __forceinline__ void
operator () (pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
int ftid = Block::flattenedThreadId ();
int minimum_Z = 0;
int maximum_Z = VOLUME_Z - 1;
for (int z = minimum_Z; z < maximum_Z; ++z)
{
// The black zone is the name given to the subvolume within the TSDF Volume grid that is shifted out.
// In other words, the set of points in the TSDF grid that we want to extract in order to add it to the world model being built in CPU.
bool in_black_zone = ( (x >= minBounds.x && x <= maxBounds.x) || (y >= minBounds.y && y <= maxBounds.y) || ( z >= minBounds.z && z <= maxBounds.z) ) ;
float4 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < buffer.voxels_size.x && y < buffer.voxels_size.y && in_black_zone)
{
int W;
float F = fetch (buffer, x, y, z, W);
if (W != 0.0f && F != 1.f && F < 0.98 && F != 0.0f && F > -1.0f)
{
float4 p;
p.x = x;
p.y = y;
p.z = z;
p.w = F;
points[local_count++] = p;
}
}/* if (x < VOLUME_X && y < VOLUME_Y) */
// local_count counts the number of zero crossing for the current thread. Now we need to merge this knowledge with the other threads
// not we fulfilled points array at current iteration
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
int total_warp = __popc (__ballot (local_count > 0))
+ __popc (__ballot (local_count > 1))
+ __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0) ///more than 0 zero-crossings
{
int lane = Warp::laneId (); ///index of thread within warp [0-31]
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
// Pointer to the beginning of the current warp buffer
volatile int* cta_buffer = (int*)(storage_X + storage_index);
// Compute offset of current warp
// Call in place scanning (see http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html)
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane); //How many crossings did we have before index "lane" ?
// We want to do only 1 operation per warp (not thread) -> because it is faster
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp); ///We use atomicAdd, so that threads do not collide
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
// Perform compaction (dump all current crossings)
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;// x coordinates of the points we found in STORAGE_X
storage_Y[storage_index + offset + l] = points[l].y;// y coordinates of the points we found in STORAGE_Y
storage_Z[storage_index + offset + l] = points[l].z;// z coordinates of the points we found in STORAGE_Z
storage_I[storage_index + offset + l] = points[l].w;// Intensity values of the points we found in STORAGE_I
}
// Retrieve Zero-crossings as 3D points
int offset_storage = old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, offset_storage += Warp::STRIDE)
{
if (offset_storage >= output_xyz.size) break;
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
float i = storage_I[storage_index + idx];
store_point_intensity (x, y, z, i, output_xyz.data, output_intensity.data, offset_storage);
}
// Sanity check to make sure our output_xyz buffer is not full already
bool full = (old_global_count + total_warp) >= output_xyz.size;
if (full)
break;
}
} /* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// Prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
// Last block
if (value == total_blocks - 1)
{
output_xyz_count = min ((int)output_xyz.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float4* ptr, int offset) const
{
*(ptr + offset) = make_float4 (x, y, z, 0);
}
//INLINE FUNCTION THAT STORES XYZ AND INTENSITY VALUES IN 2 SEPARATE DeviceArrays.
// ptr_xyz: pointer to the BEGINNING of the XYZ deviceArray
// ptr_instensity: pointer to the BEGINNING of the Intensity deviceArray
// offset: offset to apply to both XYZ and Intensity
__device__ __forceinline__ void
store_point_intensity (float x, float y, float z, float i, float4* ptr_xyz, float* ptr_intensity, int offset) const
{
*(ptr_xyz + offset) = make_float4 (x, y, z, 0);
*(ptr_intensity + offset) = i;
}
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float3* ptr, int offset) const
{
*(ptr + offset) = make_float3 (x, y, z);
}
};
__global__ void
extractKernel (const FullScan6 fs)
{
fs ();
}
__global__ void
extractSliceKernel (const FullScan6 fs, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds)
{
fs (buffer, minBounds, maxBounds);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
extractCloud (const PtrStep<short2>& volume, const float3& volume_size, PtrSz<PointType> output_xyz)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / VOLUME_X;
fs.cell_size.y = volume_size.y / VOLUME_Y;
fs.cell_size.z = volume_size.z / VOLUME_Z;
fs.output_xyz = output_xyz;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
extractKernel<<<grid, block>>>(fs);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall ( cudaDeviceSynchronize () );
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_xyz_count, sizeof (size)) );
// cudaSafeCall ( cudaMemcpyFromSymbol (&size, "output_xyz_count", sizeof (size)) );
return ((size_t)size);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
extractSliceAsCloud (const PtrStep<short2>& volume, const float3& volume_size, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const int shiftX, const int shiftY, const int shiftZ,
PtrSz<PointType> output_xyz, PtrSz<float> output_intensities)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / buffer->voxels_size.x;
fs.cell_size.y = volume_size.y / buffer->voxels_size.y;
fs.cell_size.z = volume_size.z / buffer->voxels_size.z;
fs.output_xyz = output_xyz;
fs.output_intensity = output_intensities;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//Compute slice bounds
int newX = buffer->origin_GRID.x + shiftX;
int newY = buffer->origin_GRID.y + shiftY;
int newZ = buffer->origin_GRID.z + shiftZ;
int3 minBounds, maxBounds;
//X
if (newX >= 0)
{
minBounds.x = buffer->origin_GRID.x;
maxBounds.x = newX;
}
else
{
minBounds.x = newX + buffer->voxels_size.x - 1;
maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x - 1;
}
if (minBounds.x > maxBounds.x)
std::swap (minBounds.x, maxBounds.x);
//Y
if (newY >= 0)
{
minBounds.y = buffer->origin_GRID.y;
maxBounds.y = newY;
}
else
{
minBounds.y = newY + buffer->voxels_size.y - 1;
maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y - 1;
}
if(minBounds.y > maxBounds.y)
std::swap (minBounds.y, maxBounds.y);
//Z
if (newZ >= 0)
{
minBounds.z = buffer->origin_GRID.z;
maxBounds.z = newZ;
}
else
{
minBounds.z = newZ + buffer->voxels_size.z - 1;
maxBounds.z = buffer->origin_GRID.z + buffer->voxels_size.z - 1;
}
if (minBounds.z > maxBounds.z)
std::swap(minBounds.z, maxBounds.z);
minBounds.x -= buffer->origin_GRID.x;
maxBounds.x -= buffer->origin_GRID.x;
minBounds.y -= buffer->origin_GRID.y;
maxBounds.y -= buffer->origin_GRID.y;
minBounds.z -= buffer->origin_GRID.z;
maxBounds.z -= buffer->origin_GRID.z;
if (minBounds.x < 0) // We are shifting Left
{
minBounds.x += buffer->voxels_size.x;
maxBounds.x += (buffer->voxels_size.x);
}
if (minBounds.y < 0) // We are shifting up
{
minBounds.y += buffer->voxels_size.y;
maxBounds.y += (buffer->voxels_size.y);
}
if (minBounds.z < 0) // We are shifting back
{
minBounds.z += buffer->voxels_size.z;
maxBounds.z += buffer->voxels_size.z;
}
// Extraction call
extractSliceKernel<<<grid, block>>>(fs, *buffer, minBounds, maxBounds);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall ( cudaDeviceSynchronize () );
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_xyz_count, sizeof(size)) );
return (size_t)size;
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
namespace kinfuLS
{
template<typename NormalType>
struct ExtractNormals
{
float3 cell_size;
PtrStep<short2> volume;
PtrSz<PointType> points;
mutable NormalType* output;
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ float3
fetchPoint (int idx) const
{
PointType p = points.data[idx];
return make_float3 (p.x, p.y, p.z);
}
__device__ __forceinline__ void
storeNormal (int idx, float3 normal) const
{
NormalType n;
n.x = normal.x; n.y = normal.y; n.z = normal.z;
output[idx] = n;
}
__device__ __forceinline__ int3
getVoxel (const float3& point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ void
operator () () const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = fetchPoint (idx);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
t = point;
t.x += cell_size.x;
float Fx1 = interpolateTrilineary (t);
t = point;
t.x -= cell_size.x;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = point;
t.y += cell_size.y;
float Fy1 = interpolateTrilineary (t);
t = point;
t.y -= cell_size.y;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = point;
t.z += cell_size.z;
float Fz1 = interpolateTrilineary (t);
t = point;
t.z -= cell_size.z;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
}
storeNormal (idx, n);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
/*
//OLD CODE
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
if (point.x < vx) g.x--;
if (point.y < vy) g.y--;
if (point.z < vz) g.z--;
//float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
//float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
//float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float a = point.x/ cell_size.x - (g.x + 0.5f);
float b = point.y/ cell_size.y - (g.y + 0.5f);
float c = point.z/ cell_size.z - (g.z + 0.5f);
*/
//NEW CODE
float a = point.x/ cell_size.x - (g.x + 0.5f); if (a<0) { g.x--; a+=1.0f; };
float b = point.y/ cell_size.y - (g.y + 0.5f); if (b<0) { g.y--; b+=1.0f; };
float c = point.z/ cell_size.z - (g.z + 0.5f); if (c<0) { g.z--; c+=1.0f; };
float res = (1 - a) * (
(1 - b) * ( readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * c )
+ b * ( readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * c )
) + a * (
(1 - b) * ( readTsdf (g.x + 1, g.y + 0, g.z + 0) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * c )
+ b * ( readTsdf (g.x + 1, g.y + 1, g.z + 0) * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * c )
);
return res;
}
};
template<typename NormalType>
__global__ void
extractNormalsKernel (const ExtractNormals<NormalType> en) {
en ();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename NormalType> void
extractNormals (const PtrStep<short2>& volume, const float3& volume_size,
const PtrSz<PointType>& points, NormalType* output)
{
ExtractNormals<NormalType> en;
en.volume = volume;
en.cell_size.x = volume_size.x / VOLUME_X;
en.cell_size.y = volume_size.y / VOLUME_Y;
en.cell_size.z = volume_size.z / VOLUME_Z;
en.points = points;
en.output = output;
dim3 block (256);
dim3 grid (divUp (points.size, block.x));
extractNormalsKernel<<<grid, block>>>(en);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
template void extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output);
template void extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
}
}
}
|
919a640b6c847cd29d0f5be634c1cdba4aef7aa0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/high_res_clock.h>
#include <utilities/base_fixture.hpp>
#include <utilities/device_comm_wrapper.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <utilities/thrust_wrapper.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/partition_manager.hpp>
#include <cuco/detail/hash_functions.cuh>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/prims/count_if_v.cuh>
#include <thrust/count.h>
#include <raft/comms/comms.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <raft/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <gtest/gtest.h>
#include <random>
// do the perf measurements
// enabled by command line parameter s'--perf'
//
static int PERF = 0;
template <typename vertex_t>
struct test_predicate {
int mod{};
test_predicate(int mod_count) : mod(mod_count) {}
__device__ bool operator()(const vertex_t& val)
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
return (0 == (hash_func(val) % mod));
}
};
struct Prims_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MG_CountIfV
: public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> {
public:
Tests_MG_CountIfV() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// Compare the results of count_if_v primitive and thrust count_if on a single GPU
template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed>
void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase)
{
// 1. initialize handle
raft::handle_t handle{};
HighResClock hr_clock{};
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto const comm_rank = comm.get_rank();
auto row_comm_size = static_cast<int>(sqrt(static_cast<double>(comm_size)));
while (comm_size % row_comm_size != 0) {
--row_comm_size;
}
cugraph::partition_2d::subcomm_factory_t<cugraph::partition_2d::key_naming_t, vertex_t>
subcomm_factory(handle, row_comm_size);
// 2. create MG graph
if (PERF) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto [mg_graph, d_mg_renumber_map_labels] =
input_usecase.template construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>(
handle, true, true);
if (PERF) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG construct_graph took " << elapsed_time * 1e-6 << " s.\n";
}
auto mg_graph_view = mg_graph.view();
const int hash_bin_count = 5;
// 3. run MG count if
if (PERF) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
vertex_t const* data = (*d_mg_renumber_map_labels).data();
auto vertex_count =
count_if_v(handle, mg_graph_view, data, test_predicate<vertex_t>(hash_bin_count));
if (PERF) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG count if took " << elapsed_time * 1e-6 << " s.\n";
}
// 4. compare SG & MG results
if (prims_usecase.check_correctness) {
cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, store_transposed, false> sg_graph(
handle);
std::tie(sg_graph, std::ignore) =
input_usecase.template construct_graph<vertex_t, edge_t, weight_t, store_transposed, false>(
handle, true, false);
auto sg_graph_view = sg_graph.view();
auto expected_vertex_count =
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_last()),
test_predicate<vertex_t>(hash_bin_count));
ASSERT_TRUE(expected_vertex_count == vertex_count);
}
}
};
using Tests_MG_CountIfV_File = Tests_MG_CountIfV<cugraph::test::File_Usecase>;
using Tests_MG_CountIfV_Rmat = Tests_MG_CountIfV<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MG_CountIfV_File, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_CountIfV_Rmat, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_CountIfV_File, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, true>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_CountIfV_Rmat, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, true>(std::get<0>(param), std::get<1>(param));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MG_CountIfV_File,
::testing::Combine(
::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"),
cugraph::test::File_Usecase("test/datasets/web-Google.mtx"),
cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"),
cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"))));
INSTANTIATE_TEST_SUITE_P(
rmat_small_test,
Tests_MG_CountIfV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::Rmat_Usecase(
10, 16, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
INSTANTIATE_TEST_SUITE_P(
rmat_large_test,
Tests_MG_CountIfV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(
20, 32, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
| 919a640b6c847cd29d0f5be634c1cdba4aef7aa0.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/high_res_clock.h>
#include <utilities/base_fixture.hpp>
#include <utilities/device_comm_wrapper.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <utilities/thrust_wrapper.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/partition_manager.hpp>
#include <cuco/detail/hash_functions.cuh>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/prims/count_if_v.cuh>
#include <thrust/count.h>
#include <raft/comms/comms.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <raft/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <gtest/gtest.h>
#include <random>
// do the perf measurements
// enabled by command line parameter s'--perf'
//
static int PERF = 0;
template <typename vertex_t>
struct test_predicate {
int mod{};
test_predicate(int mod_count) : mod(mod_count) {}
__device__ bool operator()(const vertex_t& val)
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
return (0 == (hash_func(val) % mod));
}
};
struct Prims_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MG_CountIfV
: public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> {
public:
Tests_MG_CountIfV() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// Compare the results of count_if_v primitive and thrust count_if on a single GPU
template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed>
void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase)
{
// 1. initialize handle
raft::handle_t handle{};
HighResClock hr_clock{};
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto const comm_rank = comm.get_rank();
auto row_comm_size = static_cast<int>(sqrt(static_cast<double>(comm_size)));
while (comm_size % row_comm_size != 0) {
--row_comm_size;
}
cugraph::partition_2d::subcomm_factory_t<cugraph::partition_2d::key_naming_t, vertex_t>
subcomm_factory(handle, row_comm_size);
// 2. create MG graph
if (PERF) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto [mg_graph, d_mg_renumber_map_labels] =
input_usecase.template construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>(
handle, true, true);
if (PERF) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG construct_graph took " << elapsed_time * 1e-6 << " s.\n";
}
auto mg_graph_view = mg_graph.view();
const int hash_bin_count = 5;
// 3. run MG count if
if (PERF) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
vertex_t const* data = (*d_mg_renumber_map_labels).data();
auto vertex_count =
count_if_v(handle, mg_graph_view, data, test_predicate<vertex_t>(hash_bin_count));
if (PERF) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG count if took " << elapsed_time * 1e-6 << " s.\n";
}
// 4. compare SG & MG results
if (prims_usecase.check_correctness) {
cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, store_transposed, false> sg_graph(
handle);
std::tie(sg_graph, std::ignore) =
input_usecase.template construct_graph<vertex_t, edge_t, weight_t, store_transposed, false>(
handle, true, false);
auto sg_graph_view = sg_graph.view();
auto expected_vertex_count =
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_last()),
test_predicate<vertex_t>(hash_bin_count));
ASSERT_TRUE(expected_vertex_count == vertex_count);
}
}
};
using Tests_MG_CountIfV_File = Tests_MG_CountIfV<cugraph::test::File_Usecase>;
using Tests_MG_CountIfV_Rmat = Tests_MG_CountIfV<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MG_CountIfV_File, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_CountIfV_Rmat, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_CountIfV_File, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, true>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_CountIfV_Rmat, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, true>(std::get<0>(param), std::get<1>(param));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MG_CountIfV_File,
::testing::Combine(
::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"),
cugraph::test::File_Usecase("test/datasets/web-Google.mtx"),
cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"),
cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"))));
INSTANTIATE_TEST_SUITE_P(
rmat_small_test,
Tests_MG_CountIfV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::Rmat_Usecase(
10, 16, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
INSTANTIATE_TEST_SUITE_P(
rmat_large_test,
Tests_MG_CountIfV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(
20, 32, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
|
2203a470fdcc51818e43bf6d62121d3798afb95b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* GridTools
*
* Copyright (c) 2014-2019, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gridtools/stencil_composition/backend_cuda/simple_device_memory_allocator.hpp>
#include "../cuda_test_helper.hpp"
#include <gridtools/common/integral_constant.hpp>
#include <gridtools/tools/backend_select.hpp>
#include <gtest/gtest.h>
namespace gridtools {
namespace {
template <typename PtrHolder>
__device__ bool check_allocation(PtrHolder ptr_holder) {
auto &ref = *ptr_holder();
ref = 1.;
return ref == 1.;
}
template <typename PtrHolder>
__global__ void test_allocated(PtrHolder testee, bool *result) {}
TEST(simple_device_memory_allocator, test) {
simple_device_memory_allocator alloc;
auto ptr_holder = alloc.allocate<float_type>(1);
auto result = gridtools::on_device::exec(
GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&check_allocation<decltype(ptr_holder)>), ptr_holder);
ASSERT_TRUE(result);
}
} // namespace
} // namespace gridtools
| 2203a470fdcc51818e43bf6d62121d3798afb95b.cu | /*
* GridTools
*
* Copyright (c) 2014-2019, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gridtools/stencil_composition/backend_cuda/simple_device_memory_allocator.hpp>
#include "../cuda_test_helper.hpp"
#include <gridtools/common/integral_constant.hpp>
#include <gridtools/tools/backend_select.hpp>
#include <gtest/gtest.h>
namespace gridtools {
namespace {
template <typename PtrHolder>
__device__ bool check_allocation(PtrHolder ptr_holder) {
auto &ref = *ptr_holder();
ref = 1.;
return ref == 1.;
}
template <typename PtrHolder>
__global__ void test_allocated(PtrHolder testee, bool *result) {}
TEST(simple_device_memory_allocator, test) {
simple_device_memory_allocator alloc;
auto ptr_holder = alloc.allocate<float_type>(1);
auto result = gridtools::on_device::exec(
GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&check_allocation<decltype(ptr_holder)>), ptr_holder);
ASSERT_TRUE(result);
}
} // namespace
} // namespace gridtools
|
2d4bcaef13ddf92bbd1de2e05a8d0ed3be3ba45e.hip | // !!! This is a file automatically generated by hipify!!!
/*
CUDA BarnesHut v3.1: Simulation of the gravitational forces
in a galactic cluster using the Barnes-Hut n-body algorithm
Copyright (c) 2013, Texas State University-San Marcos. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted for academic, research, experimental, or personal use provided that
the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University-San Marcos nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
For all other uses, please contact the Office for Commercialization and Industry
Relations at Texas State University-San Marcos <http://www.txstate.edu/ocir/>.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher <[email protected]>
*/
#define __KEPLER__
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#ifdef __KEPLER__
// thread count
#define THREADS1 1024 /* must be a power of 2 */
#define THREADS2 1024
#define THREADS3 768
#define THREADS4 128
#define THREADS5 1024
#define THREADS6 1024
// block count = factor * #SMs
#define FACTOR1 2
#define FACTOR2 2
#define FACTOR3 1 /* must all be resident at the same time */
#define FACTOR4 4 /* must all be resident at the same time */
#define FACTOR5 2
#define FACTOR6 2
#else
// thread count
#define THREADS1 512 /* must be a power of 2 */
#define THREADS2 512
#define THREADS3 128
#define THREADS4 64
#define THREADS5 256
#define THREADS6 1024
// block count = factor * #SMs
#define FACTOR1 3
#define FACTOR2 3
#define FACTOR3 6 /* must all be resident at the same time */
#define FACTOR4 6 /* must all be resident at the same time */
#define FACTOR5 5
#define FACTOR6 1
#endif
#define WARPSIZE 32
#define MAXDEPTH 32
__device__ volatile int stepd, bottomd, maxdepthd;
__device__ unsigned int blkcntd;
__device__ volatile float radiusd;
/******************************************************************************/
/*** initialize memory ********************************************************/
/******************************************************************************/
__global__ void InitializationKernel(int * __restrict errd)
{
*errd = 0;
stepd = -1;
maxdepthd = 1;
blkcntd = 0;
}
/******************************************************************************/
/*** compute center and radius ************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS1, FACTOR1)
void BoundingBoxKernel(int nnodesd, int nbodiesd, volatile int * __restrict startd, volatile int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict maxxd, volatile float * __restrict maxyd, volatile float * __restrict maxzd, volatile float * __restrict minxd, volatile float * __restrict minyd, volatile float * __restrict minzd)
{
register int i, j, k, inc;
register float val, minx, maxx, miny, maxy, minz, maxz;
__shared__ volatile float sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1], sminz[THREADS1], smaxz[THREADS1];
// initialize with valid data (in case #bodies < #threads)
minx = maxx = posxd[0];
miny = maxy = posyd[0];
minz = maxz = poszd[0];
// scan all bodies
i = threadIdx.x;
inc = THREADS1 * gridDim.x;
for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) {
val = posxd[j];
minx = fminf(minx, val);
maxx = fmaxf(maxx, val);
val = posyd[j];
miny = fminf(miny, val);
maxy = fmaxf(maxy, val);
val = poszd[j];
minz = fminf(minz, val);
maxz = fmaxf(maxz, val);
}
// reduction in shared memory
sminx[i] = minx;
smaxx[i] = maxx;
sminy[i] = miny;
smaxy[i] = maxy;
sminz[i] = minz;
smaxz[i] = maxz;
for (j = THREADS1 / 2; j > 0; j /= 2) {
__syncthreads();
if (i < j) {
k = i + j;
sminx[i] = minx = fminf(minx, sminx[k]);
smaxx[i] = maxx = fmaxf(maxx, smaxx[k]);
sminy[i] = miny = fminf(miny, sminy[k]);
smaxy[i] = maxy = fmaxf(maxy, smaxy[k]);
sminz[i] = minz = fminf(minz, sminz[k]);
smaxz[i] = maxz = fmaxf(maxz, smaxz[k]);
}
}
// write block result to global memory
if (i == 0) {
k = blockIdx.x;
minxd[k] = minx;
maxxd[k] = maxx;
minyd[k] = miny;
maxyd[k] = maxy;
minzd[k] = minz;
maxzd[k] = maxz;
__threadfence();
inc = gridDim.x - 1;
if (inc == atomicInc(&blkcntd, inc)) {
// I'm the last block, so combine all block results
for (j = 0; j <= inc; j++) {
minx = fminf(minx, minxd[j]);
maxx = fmaxf(maxx, maxxd[j]);
miny = fminf(miny, minyd[j]);
maxy = fmaxf(maxy, maxyd[j]);
minz = fminf(minz, minzd[j]);
maxz = fmaxf(maxz, maxzd[j]);
}
// compute 'radius'
val = fmaxf(maxx - minx, maxy - miny);
radiusd = fmaxf(val, maxz - minz) * 0.5f;
// create root node
k = nnodesd;
bottomd = k;
massd[k] = -1.0f;
startd[k] = 0;
posxd[k] = (minx + maxx) * 0.5f;
posyd[k] = (miny + maxy) * 0.5f;
poszd[k] = (minz + maxz) * 0.5f;
k *= 8;
for (i = 0; i < 8; i++) childd[k + i] = -1;
stepd++;
}
}
}
/******************************************************************************/
/*** build tree ***************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(1024, 1)
void ClearKernel1(int nnodesd, int nbodiesd, volatile int * __restrict childd)
{
register int k, inc, top, bottom;
top = 8 * nnodesd;
bottom = 8 * nbodiesd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
// iterate over all cells assigned to thread
while (k < top) {
childd[k] = -1;
k += inc;
}
}
__global__
__launch_bounds__(THREADS2, FACTOR2)
void TreeBuildingKernel(int nnodesd, int nbodiesd, volatile int * __restrict errd, volatile int * __restrict childd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd)
{
register int i, j, depth, localmaxdepth, skip, inc;
register float x, y, z, r;
register float px, py, pz;
register float dx, dy, dz;
register int ch, n, cell, locked, patch;
register float radius, rootx, rooty, rootz;
// cache root data
radius = radiusd;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
localmaxdepth = 1;
skip = 1;
inc = blockDim.x * gridDim.x;
i = threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
if (skip != 0) {
// new body, so start traversing at root
skip = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius * 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (rootx < px) {j = 1; dx = r;}
if (rooty < py) {j |= 2; dy = r;}
if (rootz < pz) {j |= 4; dz = r;}
x = rootx + dx;
y = rooty + dy;
z = rootz + dz;
}
// follow path to leaf cell
ch = childd[n*8+j];
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == -1) {
if (-1 == atomicCAS((int *)&childd[locked], -1, i)) { // if null, just insert the new body
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 1;
}
} else { // there already is a body in this position
if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock
patch = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub((int *)&bottomd, 1) - 1;
if (cell <= nbodiesd) {
*errd = 1;
bottomd = nnodesd;
}
if (patch != -1) {
childd[n*8+j] = cell;
}
patch = max(patch, cell);
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j |= 2;
if (z < poszd[ch]) j |= 4;
childd[cell*8+j] = ch;
n = cell;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 2;
}
}
}
__syncthreads(); // __threadfence();
if (skip == 2) {
childd[locked] = patch;
}
}
// record maximum tree depth
atomicMax((int *)&maxdepthd, localmaxdepth);
}
__global__
__launch_bounds__(1024, 1)
void ClearKernel2(int nnodesd, volatile int * __restrict startd, volatile float * __restrict massd)
{
register int k, inc, bottom;
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
// iterate over all cells assigned to thread
while (k < nnodesd) {
massd[k] = -1.0f;
startd[k] = -1;
k += inc;
}
}
/******************************************************************************/
/*** compute center of mass ***************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS3, FACTOR3)
void SummarizationKernel(const int nnodesd, const int nbodiesd, volatile int * __restrict countd, const int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd)
{
register int i, j, k, ch, inc, cnt, bottom, flag;
register float m, cm, px, py, pz;
__shared__ int child[THREADS3 * 8];
__shared__ float mass[THREADS3 * 8];
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
register int restart = k;
for (j = 0; j < 5; j++) { // wait-free pre-passes
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (massd[k] < 0.0f) {
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
child[i*THREADS3+threadIdx.x] = ch; // cache children
if ((ch >= nbodiesd) && ((mass[i*THREADS3+threadIdx.x] = massd[ch]) < 0.0f)) {
break;
}
}
if (i == 8) {
// all children are ready
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if (ch >= 0) {
if (ch >= nbodiesd) { // count bodies (needed later)
m = mass[i*THREADS3+threadIdx.x];
cnt += countd[ch];
} else {
m = massd[ch];
cnt++;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
}
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
__threadfence(); // make sure data are visible before setting mass
massd[k] = cm;
}
}
k += inc; // move on to next cell
}
k = restart;
}
flag = 0;
j = 0;
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (massd[k] >= 0.0f) {
k += inc;
} else {
if (j == 0) {
j = 8;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
child[i*THREADS3+threadIdx.x] = ch; // cache children
if ((ch < nbodiesd) || ((mass[i*THREADS3+threadIdx.x] = massd[ch]) >= 0.0f)) {
j--;
}
}
} else {
j = 8;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if ((ch < nbodiesd) || (mass[i*THREADS3+threadIdx.x] >= 0.0f) || ((mass[i*THREADS3+threadIdx.x] = massd[ch]) >= 0.0f)) {
j--;
}
}
}
if (j == 0) {
// all children are ready
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if (ch >= 0) {
if (ch >= nbodiesd) { // count bodies (needed later)
m = mass[i*THREADS3+threadIdx.x];
cnt += countd[ch];
} else {
m = massd[ch];
cnt++;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
}
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
flag = 1;
}
}
__syncthreads(); // __threadfence();
if (flag != 0) {
massd[k] = cm;
k += inc;
flag = 0;
}
}
}
/******************************************************************************/
/*** sort bodies **************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS4, FACTOR4)
void SortKernel(int nnodesd, int nbodiesd, int * __restrict sortd, int * __restrict countd, volatile int * __restrict startd, int * __restrict childd)
{
register int i, j, k, ch, dec, start, bottom;
bottom = bottomd;
dec = blockDim.x * gridDim.x;
k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= bottom) {
start = startd[k];
if (start >= 0) {
j = 0;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front (needed later for speed)
childd[k*8+i] = -1;
childd[k*8+j] = ch;
}
j++;
if (ch >= nbodiesd) {
// child is a cell
startd[ch] = start; // set start ID of child
start += countd[ch]; // add #bodies in subtree
} else {
// child is a body
sortd[start] = ch; // record body in 'sorted' array
start++;
}
}
}
k -= dec; // move on to next cell
}
}
}
/******************************************************************************/
/*** compute force ************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS5, FACTOR5)
void ForceCalculationKernel(int nnodesd, int nbodiesd, volatile int * __restrict errd, float dthfd, float itolsqd, float epssqd, volatile int * __restrict sortd, volatile int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict velxd, volatile float * __restrict velyd, volatile float * __restrict velzd, volatile float * __restrict accxd, volatile float * __restrict accyd, volatile float * __restrict acczd)
{
register int i, j, k, n, depth, base, sbase, diff, pd, nd;
register float px, py, pz, ax, ay, az, dx, dy, dz, tmp;
__shared__ volatile int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ float dq[MAXDEPTH * THREADS5/WARPSIZE];
if (0 == threadIdx.x) {
tmp = radiusd * 2;
// precompute values that depend only on tree level
dq[0] = tmp * tmp * itolsqd;
for (i = 1; i < maxdepthd; i++) {
dq[i] = dq[i - 1] * 0.25f;
dq[i - 1] += epssqd;
}
dq[i - 1] += epssqd;
if (maxdepthd > MAXDEPTH) {
*errd = maxdepthd;
}
}
__syncthreads();
if (maxdepthd <= MAXDEPTH) {
// figure out first thread in each warp (lane 0)
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
diff = threadIdx.x - sbase;
// make multiple copies to avoid index calculations later
if (diff < MAXDEPTH) {
dq[diff+j] = dq[diff];
}
__syncthreads();
__threadfence_block();
// iterate over all bodies assigned to thread
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) {
i = sortd[k]; // get permuted/sorted index
// cache position info
px = posxd[i];
py = posyd[i];
pz = poszd[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
// initialize iteration stack, i.e., push root node onto stack
depth = j;
if (sbase == threadIdx.x) {
pos[j] = 0;
node[j] = nnodesd * 8;
}
do {
// stack is not empty
pd = pos[depth];
nd = node[depth];
while (pd < 8) {
// node on top of stack has more children to process
n = childd[nd + pd]; // load child pointer
pd++;
if (n >= 0) {
dx = posxd[n] - px;
dy = posyd[n] - py;
dz = poszd[n] - pz;
tmp = dx*dx + (dy*dy + (dz*dz + epssqd)); // compute distance squared (plus softening)
if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body)
tmp = rsqrtf(tmp); // compute distance
tmp = massd[n] * tmp * tmp * tmp;
ax += dx * tmp;
ay += dy * tmp;
az += dz * tmp;
} else {
// push cell onto stack
if (sbase == threadIdx.x) { // maybe don't push and inc if last child
pos[depth] = pd;
node[depth] = nd;
}
depth++;
pd = 0;
nd = n * 8;
}
} else {
pd = 8; // early out because all remaining children are also zero
}
}
depth--; // done with this level
} while (depth >= j);
if (stepd > 0) {
// update velocity
velxd[i] += (ax - accxd[i]) * dthfd;
velyd[i] += (ay - accyd[i]) * dthfd;
velzd[i] += (az - acczd[i]) * dthfd;
}
// save computed acceleration
accxd[i] = ax;
accyd[i] = ay;
acczd[i] = az;
}
}
}
/******************************************************************************/
/*** advance bodies ***********************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS6, FACTOR6)
void IntegrationKernel(int nbodiesd, float dtimed, float dthfd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict velxd, volatile float * __restrict velyd, volatile float * __restrict velzd, volatile float * __restrict accxd, volatile float * __restrict accyd, volatile float * __restrict acczd)
{
register int i, inc;
register float dvelx, dvely, dvelz;
register float velhx, velhy, velhz;
// iterate over all bodies assigned to thread
inc = blockDim.x * gridDim.x;
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) {
// integrate
dvelx = accxd[i] * dthfd;
dvely = accyd[i] * dthfd;
dvelz = acczd[i] * dthfd;
velhx = velxd[i] + dvelx;
velhy = velyd[i] + dvely;
velhz = velzd[i] + dvelz;
posxd[i] += velhx * dtimed;
posyd[i] += velhy * dtimed;
poszd[i] += velhz * dtimed;
velxd[i] = velhx + dvelx;
velyd[i] = velhy + dvely;
velzd[i] = velhz + dvelz;
}
}
/******************************************************************************/
static void CudaTest(char *msg)
{
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", hipGetErrorString(e));
exit(-1);
}
}
/******************************************************************************/
// random number generator
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7FFFFFFF
#define TWOTO31 2147483648.0
static int A = 1;
static int B = 0;
static int randx = 1;
static int lastrand;
static void drndset(int seed)
{
A = 1;
B = 0;
randx = (A * seed + B) & MASK;
A = (MULT * A) & MASK;
B = (MULT * B + ADD) & MASK;
}
static double drnd()
{
lastrand = randx;
randx = (A * randx + B) & MASK;
return (double)lastrand / TWOTO31;
}
/******************************************************************************/
int main(int argc, char *argv[])
{
register int i, run, blocks;
int nnodes, nbodies, step, timesteps;
register double runtime;
int error;
register float dtime, dthf, epssq, itolsq;
float time, timing[7];
hipEvent_t start, stop;
float *mass, *posx, *posy, *posz, *velx, *vely, *velz;
int *errl, *sortl, *childl, *countl, *startl;
float *massl;
float *posxl, *posyl, *poszl;
float *velxl, *velyl, *velzl;
float *accxl, *accyl, *acczl;
float *maxxl, *maxyl, *maxzl;
float *minxl, *minyl, *minzl;
register double rsc, vsc, r, v, x, y, z, sq, scale;
// perform some checks
printf("CUDA BarnesHut v3.1 ");
#ifdef __KEPLER__
printf("[Kepler]\n");
#else
printf("[Fermi]\n");
#endif
printf("Copyright (c) 2013, Texas State University-San Marcos. All rights reserved.\n");
fflush(stdout);
if (argc != 4) {
fprintf(stderr, "\n");
fprintf(stderr, "arguments: number_of_bodies number_of_timesteps device\n");
exit(-1);
}
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
const int dev = atoi(argv[3]);
if ((dev < 0) || (deviceCount <= dev)) {
fprintf(stderr, "There is no device %d\n", dev);
exit(-1);
}
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if (deviceProp.major < 2) {
fprintf(stderr, "Need at least compute capability 2.0\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
blocks = deviceProp.multiProcessorCount;
// fprintf(stderr, "blocks = %d\n", blocks);
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
if (MAXDEPTH > WARPSIZE) {
fprintf(stderr, "MAXDEPTH must be less than or equal to WARPSIZE\n");
exit(-1);
}
if ((THREADS1 <= 0) || (THREADS1 & (THREADS1-1) != 0)) {
fprintf(stderr, "THREADS1 must be greater than zero and a power of two\n");
exit(-1);
}
// set L1/shared memory configuration
hipFuncSetCacheConfig(BoundingBoxKernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(TreeBuildingKernel, hipFuncCachePreferL1);
hipFuncSetCacheConfig(ClearKernel1, hipFuncCachePreferL1);
hipFuncSetCacheConfig(ClearKernel2, hipFuncCachePreferL1);
hipFuncSetCacheConfig(SummarizationKernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(SortKernel, hipFuncCachePreferL1);
#ifdef __KEPLER__
hipFuncSetCacheConfig(ForceCalculationKernel, hipFuncCachePreferEqual);
#else
hipFuncSetCacheConfig(ForceCalculationKernel, hipFuncCachePreferL1);
#endif
hipFuncSetCacheConfig(IntegrationKernel, hipFuncCachePreferL1);
hipGetLastError(); // reset error value
for (run = 0; run < 3; run++) {
for (i = 0; i < 7; i++) timing[i] = 0.0f;
nbodies = atoi(argv[1]);
if (nbodies < 1) {
fprintf(stderr, "nbodies is too small: %d\n", nbodies);
exit(-1);
}
if (nbodies > (1 << 30)) {
fprintf(stderr, "nbodies is too large: %d\n", nbodies);
exit(-1);
}
nnodes = nbodies * 2;
if (nnodes < 1024*blocks) nnodes = 1024*blocks;
while ((nnodes & (WARPSIZE-1)) != 0) nnodes++;
nnodes--;
timesteps = atoi(argv[2]);
dtime = 0.025; dthf = dtime * 0.5f;
epssq = 0.05 * 0.05;
itolsq = 1.0f / (0.5 * 0.5);
// allocate memory
if (run == 0) {
printf("configuration: %d bodies, %d time steps\n", nbodies, timesteps);
mass = (float *)malloc(sizeof(float) * nbodies);
if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);}
posx = (float *)malloc(sizeof(float) * nbodies);
if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * nbodies);
if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
posz = (float *)malloc(sizeof(float) * nbodies);
if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);}
velx = (float *)malloc(sizeof(float) * nbodies);
if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);}
vely = (float *)malloc(sizeof(float) * nbodies);
if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);}
velz = (float *)malloc(sizeof(float) * nbodies);
if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);}
if (hipSuccess != hipMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd");
if (hipSuccess != hipMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd");
if (hipSuccess != hipMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd");
if (hipSuccess != hipMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd");
if (hipSuccess != hipMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd");
if (hipSuccess != hipMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd");
if (hipSuccess != hipMalloc((void **)&velxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velxd\n"); CudaTest("couldn't allocate velxd");
if (hipSuccess != hipMalloc((void **)&velyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velyd\n"); CudaTest("couldn't allocate velyd");
if (hipSuccess != hipMalloc((void **)&velzl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velzd\n"); CudaTest("couldn't allocate velzd");
if (hipSuccess != hipMalloc((void **)&accxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate accxd\n"); CudaTest("couldn't allocate accxd");
if (hipSuccess != hipMalloc((void **)&accyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate accyd\n"); CudaTest("couldn't allocate accyd");
if (hipSuccess != hipMalloc((void **)&acczl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate acczd\n"); CudaTest("couldn't allocate acczd");
if (hipSuccess != hipMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd");
if (hipSuccess != hipMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd");
if (hipSuccess != hipMalloc((void **)&sortl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate sortd\n"); CudaTest("couldn't allocate sortd");
if (hipSuccess != hipMalloc((void **)&maxxl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd");
if (hipSuccess != hipMalloc((void **)&maxyl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd");
if (hipSuccess != hipMalloc((void **)&maxzl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd");
if (hipSuccess != hipMalloc((void **)&minxl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd");
if (hipSuccess != hipMalloc((void **)&minyl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd");
if (hipSuccess != hipMalloc((void **)&minzl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd");
}
// generate input
drndset(7);
rsc = (3 * 3.1415926535897932384626433832795) / 16;
vsc = sqrt(1.0 / rsc);
for (i = 0; i < nbodies; i++) {
mass[i] = 1.0 / nbodies;
r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1);
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = rsc * r / sqrt(sq);
posx[i] = x * scale;
posy[i] = y * scale;
posz[i] = z * scale;
do {
x = drnd();
y = drnd() * 0.1;
} while (y > x*x * pow(1 - x*x, 3.5));
v = x * sqrt(2.0 / sqrt(1 + r*r));
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = vsc * v / sqrt(sq);
velx[i] = x * scale;
vely[i] = y * scale;
velz[i] = z * scale;
}
if (hipSuccess != hipMemcpy(massl, mass, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed");
if (hipSuccess != hipMemcpy(posxl, posx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed");
if (hipSuccess != hipMemcpy(posyl, posy, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed");
if (hipSuccess != hipMemcpy(poszl, posz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed");
if (hipSuccess != hipMemcpy(velxl, velx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed");
if (hipSuccess != hipMemcpy(velyl, vely, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed");
if (hipSuccess != hipMemcpy(velzl, velz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed");
// run timesteps (launch GPU kernels)
hipEventCreate(&start); hipEventCreate(&stop);
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( InitializationKernel), dim3(1), dim3(1), 0, 0, errl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[0] += time;
CudaTest("kernel 0 launch failed");
for (step = 0; step < timesteps; step++) {
hipEventRecord(start, 0);
hipLaunchKernelGGL(( BoundingBoxKernel), dim3(blocks * FACTOR1), dim3(THREADS1), 0, 0, nnodes, nbodies, startl, childl, massl, posxl, posyl, poszl, maxxl, maxyl, maxzl, minxl, minyl, minzl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[1] += time;
CudaTest("kernel 1 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( ClearKernel1), dim3(blocks * 1), dim3(1024), 0, 0, nnodes, nbodies, childl);
hipLaunchKernelGGL(( TreeBuildingKernel), dim3(blocks * FACTOR2), dim3(THREADS2), 0, 0, nnodes, nbodies, errl, childl, posxl, posyl, poszl);
hipLaunchKernelGGL(( ClearKernel2), dim3(blocks * 1), dim3(1024), 0, 0, nnodes, startl, massl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[2] += time;
CudaTest("kernel 2 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( SummarizationKernel), dim3(blocks * FACTOR3), dim3(THREADS3), 0, 0, nnodes, nbodies, countl, childl, massl, posxl, posyl, poszl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[3] += time;
CudaTest("kernel 3 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( SortKernel), dim3(blocks * FACTOR4), dim3(THREADS4), 0, 0, nnodes, nbodies, sortl, countl, startl, childl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[4] += time;
CudaTest("kernel 4 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( ForceCalculationKernel), dim3(blocks * FACTOR5), dim3(THREADS5), 0, 0, nnodes, nbodies, errl, dthf, itolsq, epssq, sortl, childl, massl, posxl, posyl, poszl, velxl, velyl, velzl, accxl, accyl, acczl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[5] += time;
CudaTest("kernel 5 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( IntegrationKernel), dim3(blocks * FACTOR6), dim3(THREADS6), 0, 0, nbodies, dtime, dthf, posxl, posyl, poszl, velxl, velyl, velzl, accxl, accyl, acczl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[6] += time;
CudaTest("kernel 6 launch failed");
}
CudaTest("kernel launch failed");
hipEventDestroy(start); hipEventDestroy(stop);
// transfer result back to CPU
if (hipSuccess != hipMemcpy(&error, errl, sizeof(int), hipMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed");
if (hipSuccess != hipMemcpy(posx, posxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed");
if (hipSuccess != hipMemcpy(posy, posyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed");
if (hipSuccess != hipMemcpy(posz, poszl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed");
if (hipSuccess != hipMemcpy(velx, velxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed");
if (hipSuccess != hipMemcpy(vely, velyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed");
if (hipSuccess != hipMemcpy(velz, velzl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed");
gettimeofday(&endtime, NULL);
runtime = endtime.tv_sec + endtime.tv_usec/1000000.0 - starttime.tv_sec - starttime.tv_usec/1000000.0;
printf("runtime: %.4lf s (", runtime);
time = 0;
for (i = 1; i < 7; i++) {
printf(" %.1f ", timing[i]);
time += timing[i];
}
if (error == 0) {
printf(") = %.1f ms\n", time);
} else {
printf(") = %.1f ms FAILED %d\n", time, error);
}
}
// print output
i = 0;
// for (i = 0; i < nbodies; i++) {
printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]);
// }
free(mass);
free(posx);
free(posy);
free(posz);
free(velx);
free(vely);
free(velz);
hipFree(errl);
hipFree(childl);
hipFree(massl);
hipFree(posxl);
hipFree(posyl);
hipFree(poszl);
hipFree(countl);
hipFree(startl);
hipFree(maxxl);
hipFree(maxyl);
hipFree(maxzl);
hipFree(minxl);
hipFree(minyl);
hipFree(minzl);
return 0;
}
| 2d4bcaef13ddf92bbd1de2e05a8d0ed3be3ba45e.cu | /*
CUDA BarnesHut v3.1: Simulation of the gravitational forces
in a galactic cluster using the Barnes-Hut n-body algorithm
Copyright (c) 2013, Texas State University-San Marcos. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted for academic, research, experimental, or personal use provided that
the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University-San Marcos nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
For all other uses, please contact the Office for Commercialization and Industry
Relations at Texas State University-San Marcos <http://www.txstate.edu/ocir/>.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher <[email protected]>
*/
#define __KEPLER__
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#ifdef __KEPLER__
// thread count
#define THREADS1 1024 /* must be a power of 2 */
#define THREADS2 1024
#define THREADS3 768
#define THREADS4 128
#define THREADS5 1024
#define THREADS6 1024
// block count = factor * #SMs
#define FACTOR1 2
#define FACTOR2 2
#define FACTOR3 1 /* must all be resident at the same time */
#define FACTOR4 4 /* must all be resident at the same time */
#define FACTOR5 2
#define FACTOR6 2
#else
// thread count
#define THREADS1 512 /* must be a power of 2 */
#define THREADS2 512
#define THREADS3 128
#define THREADS4 64
#define THREADS5 256
#define THREADS6 1024
// block count = factor * #SMs
#define FACTOR1 3
#define FACTOR2 3
#define FACTOR3 6 /* must all be resident at the same time */
#define FACTOR4 6 /* must all be resident at the same time */
#define FACTOR5 5
#define FACTOR6 1
#endif
#define WARPSIZE 32
#define MAXDEPTH 32
__device__ volatile int stepd, bottomd, maxdepthd;
__device__ unsigned int blkcntd;
__device__ volatile float radiusd;
/******************************************************************************/
/*** initialize memory ********************************************************/
/******************************************************************************/
__global__ void InitializationKernel(int * __restrict errd)
{
*errd = 0;
stepd = -1;
maxdepthd = 1;
blkcntd = 0;
}
/******************************************************************************/
/*** compute center and radius ************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS1, FACTOR1)
void BoundingBoxKernel(int nnodesd, int nbodiesd, volatile int * __restrict startd, volatile int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict maxxd, volatile float * __restrict maxyd, volatile float * __restrict maxzd, volatile float * __restrict minxd, volatile float * __restrict minyd, volatile float * __restrict minzd)
{
register int i, j, k, inc;
register float val, minx, maxx, miny, maxy, minz, maxz;
__shared__ volatile float sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1], sminz[THREADS1], smaxz[THREADS1];
// initialize with valid data (in case #bodies < #threads)
minx = maxx = posxd[0];
miny = maxy = posyd[0];
minz = maxz = poszd[0];
// scan all bodies
i = threadIdx.x;
inc = THREADS1 * gridDim.x;
for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) {
val = posxd[j];
minx = fminf(minx, val);
maxx = fmaxf(maxx, val);
val = posyd[j];
miny = fminf(miny, val);
maxy = fmaxf(maxy, val);
val = poszd[j];
minz = fminf(minz, val);
maxz = fmaxf(maxz, val);
}
// reduction in shared memory
sminx[i] = minx;
smaxx[i] = maxx;
sminy[i] = miny;
smaxy[i] = maxy;
sminz[i] = minz;
smaxz[i] = maxz;
for (j = THREADS1 / 2; j > 0; j /= 2) {
__syncthreads();
if (i < j) {
k = i + j;
sminx[i] = minx = fminf(minx, sminx[k]);
smaxx[i] = maxx = fmaxf(maxx, smaxx[k]);
sminy[i] = miny = fminf(miny, sminy[k]);
smaxy[i] = maxy = fmaxf(maxy, smaxy[k]);
sminz[i] = minz = fminf(minz, sminz[k]);
smaxz[i] = maxz = fmaxf(maxz, smaxz[k]);
}
}
// write block result to global memory
if (i == 0) {
k = blockIdx.x;
minxd[k] = minx;
maxxd[k] = maxx;
minyd[k] = miny;
maxyd[k] = maxy;
minzd[k] = minz;
maxzd[k] = maxz;
__threadfence();
inc = gridDim.x - 1;
if (inc == atomicInc(&blkcntd, inc)) {
// I'm the last block, so combine all block results
for (j = 0; j <= inc; j++) {
minx = fminf(minx, minxd[j]);
maxx = fmaxf(maxx, maxxd[j]);
miny = fminf(miny, minyd[j]);
maxy = fmaxf(maxy, maxyd[j]);
minz = fminf(minz, minzd[j]);
maxz = fmaxf(maxz, maxzd[j]);
}
// compute 'radius'
val = fmaxf(maxx - minx, maxy - miny);
radiusd = fmaxf(val, maxz - minz) * 0.5f;
// create root node
k = nnodesd;
bottomd = k;
massd[k] = -1.0f;
startd[k] = 0;
posxd[k] = (minx + maxx) * 0.5f;
posyd[k] = (miny + maxy) * 0.5f;
poszd[k] = (minz + maxz) * 0.5f;
k *= 8;
for (i = 0; i < 8; i++) childd[k + i] = -1;
stepd++;
}
}
}
/******************************************************************************/
/*** build tree ***************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(1024, 1)
void ClearKernel1(int nnodesd, int nbodiesd, volatile int * __restrict childd)
{
register int k, inc, top, bottom;
top = 8 * nnodesd;
bottom = 8 * nbodiesd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
// iterate over all cells assigned to thread
while (k < top) {
childd[k] = -1;
k += inc;
}
}
__global__
__launch_bounds__(THREADS2, FACTOR2)
void TreeBuildingKernel(int nnodesd, int nbodiesd, volatile int * __restrict errd, volatile int * __restrict childd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd)
{
register int i, j, depth, localmaxdepth, skip, inc;
register float x, y, z, r;
register float px, py, pz;
register float dx, dy, dz;
register int ch, n, cell, locked, patch;
register float radius, rootx, rooty, rootz;
// cache root data
radius = radiusd;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
localmaxdepth = 1;
skip = 1;
inc = blockDim.x * gridDim.x;
i = threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
if (skip != 0) {
// new body, so start traversing at root
skip = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius * 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (rootx < px) {j = 1; dx = r;}
if (rooty < py) {j |= 2; dy = r;}
if (rootz < pz) {j |= 4; dz = r;}
x = rootx + dx;
y = rooty + dy;
z = rootz + dz;
}
// follow path to leaf cell
ch = childd[n*8+j];
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == -1) {
if (-1 == atomicCAS((int *)&childd[locked], -1, i)) { // if null, just insert the new body
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 1;
}
} else { // there already is a body in this position
if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock
patch = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub((int *)&bottomd, 1) - 1;
if (cell <= nbodiesd) {
*errd = 1;
bottomd = nnodesd;
}
if (patch != -1) {
childd[n*8+j] = cell;
}
patch = max(patch, cell);
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j |= 2;
if (z < poszd[ch]) j |= 4;
childd[cell*8+j] = ch;
n = cell;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 2;
}
}
}
__syncthreads(); // __threadfence();
if (skip == 2) {
childd[locked] = patch;
}
}
// record maximum tree depth
atomicMax((int *)&maxdepthd, localmaxdepth);
}
__global__
__launch_bounds__(1024, 1)
void ClearKernel2(int nnodesd, volatile int * __restrict startd, volatile float * __restrict massd)
{
register int k, inc, bottom;
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
// iterate over all cells assigned to thread
while (k < nnodesd) {
massd[k] = -1.0f;
startd[k] = -1;
k += inc;
}
}
/******************************************************************************/
/*** compute center of mass ***************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS3, FACTOR3)
void SummarizationKernel(const int nnodesd, const int nbodiesd, volatile int * __restrict countd, const int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd)
{
register int i, j, k, ch, inc, cnt, bottom, flag;
register float m, cm, px, py, pz;
__shared__ int child[THREADS3 * 8];
__shared__ float mass[THREADS3 * 8];
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
register int restart = k;
for (j = 0; j < 5; j++) { // wait-free pre-passes
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (massd[k] < 0.0f) {
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
child[i*THREADS3+threadIdx.x] = ch; // cache children
if ((ch >= nbodiesd) && ((mass[i*THREADS3+threadIdx.x] = massd[ch]) < 0.0f)) {
break;
}
}
if (i == 8) {
// all children are ready
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if (ch >= 0) {
if (ch >= nbodiesd) { // count bodies (needed later)
m = mass[i*THREADS3+threadIdx.x];
cnt += countd[ch];
} else {
m = massd[ch];
cnt++;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
}
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
__threadfence(); // make sure data are visible before setting mass
massd[k] = cm;
}
}
k += inc; // move on to next cell
}
k = restart;
}
flag = 0;
j = 0;
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (massd[k] >= 0.0f) {
k += inc;
} else {
if (j == 0) {
j = 8;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
child[i*THREADS3+threadIdx.x] = ch; // cache children
if ((ch < nbodiesd) || ((mass[i*THREADS3+threadIdx.x] = massd[ch]) >= 0.0f)) {
j--;
}
}
} else {
j = 8;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if ((ch < nbodiesd) || (mass[i*THREADS3+threadIdx.x] >= 0.0f) || ((mass[i*THREADS3+threadIdx.x] = massd[ch]) >= 0.0f)) {
j--;
}
}
}
if (j == 0) {
// all children are ready
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if (ch >= 0) {
if (ch >= nbodiesd) { // count bodies (needed later)
m = mass[i*THREADS3+threadIdx.x];
cnt += countd[ch];
} else {
m = massd[ch];
cnt++;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
}
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
flag = 1;
}
}
__syncthreads(); // __threadfence();
if (flag != 0) {
massd[k] = cm;
k += inc;
flag = 0;
}
}
}
/******************************************************************************/
/*** sort bodies **************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS4, FACTOR4)
void SortKernel(int nnodesd, int nbodiesd, int * __restrict sortd, int * __restrict countd, volatile int * __restrict startd, int * __restrict childd)
{
register int i, j, k, ch, dec, start, bottom;
bottom = bottomd;
dec = blockDim.x * gridDim.x;
k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= bottom) {
start = startd[k];
if (start >= 0) {
j = 0;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front (needed later for speed)
childd[k*8+i] = -1;
childd[k*8+j] = ch;
}
j++;
if (ch >= nbodiesd) {
// child is a cell
startd[ch] = start; // set start ID of child
start += countd[ch]; // add #bodies in subtree
} else {
// child is a body
sortd[start] = ch; // record body in 'sorted' array
start++;
}
}
}
k -= dec; // move on to next cell
}
}
}
/******************************************************************************/
/*** compute force ************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS5, FACTOR5)
void ForceCalculationKernel(int nnodesd, int nbodiesd, volatile int * __restrict errd, float dthfd, float itolsqd, float epssqd, volatile int * __restrict sortd, volatile int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict velxd, volatile float * __restrict velyd, volatile float * __restrict velzd, volatile float * __restrict accxd, volatile float * __restrict accyd, volatile float * __restrict acczd)
{
register int i, j, k, n, depth, base, sbase, diff, pd, nd;
register float px, py, pz, ax, ay, az, dx, dy, dz, tmp;
__shared__ volatile int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ float dq[MAXDEPTH * THREADS5/WARPSIZE];
if (0 == threadIdx.x) {
tmp = radiusd * 2;
// precompute values that depend only on tree level
dq[0] = tmp * tmp * itolsqd;
for (i = 1; i < maxdepthd; i++) {
dq[i] = dq[i - 1] * 0.25f;
dq[i - 1] += epssqd;
}
dq[i - 1] += epssqd;
if (maxdepthd > MAXDEPTH) {
*errd = maxdepthd;
}
}
__syncthreads();
if (maxdepthd <= MAXDEPTH) {
// figure out first thread in each warp (lane 0)
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
diff = threadIdx.x - sbase;
// make multiple copies to avoid index calculations later
if (diff < MAXDEPTH) {
dq[diff+j] = dq[diff];
}
__syncthreads();
__threadfence_block();
// iterate over all bodies assigned to thread
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) {
i = sortd[k]; // get permuted/sorted index
// cache position info
px = posxd[i];
py = posyd[i];
pz = poszd[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
// initialize iteration stack, i.e., push root node onto stack
depth = j;
if (sbase == threadIdx.x) {
pos[j] = 0;
node[j] = nnodesd * 8;
}
do {
// stack is not empty
pd = pos[depth];
nd = node[depth];
while (pd < 8) {
// node on top of stack has more children to process
n = childd[nd + pd]; // load child pointer
pd++;
if (n >= 0) {
dx = posxd[n] - px;
dy = posyd[n] - py;
dz = poszd[n] - pz;
tmp = dx*dx + (dy*dy + (dz*dz + epssqd)); // compute distance squared (plus softening)
if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body)
tmp = rsqrtf(tmp); // compute distance
tmp = massd[n] * tmp * tmp * tmp;
ax += dx * tmp;
ay += dy * tmp;
az += dz * tmp;
} else {
// push cell onto stack
if (sbase == threadIdx.x) { // maybe don't push and inc if last child
pos[depth] = pd;
node[depth] = nd;
}
depth++;
pd = 0;
nd = n * 8;
}
} else {
pd = 8; // early out because all remaining children are also zero
}
}
depth--; // done with this level
} while (depth >= j);
if (stepd > 0) {
// update velocity
velxd[i] += (ax - accxd[i]) * dthfd;
velyd[i] += (ay - accyd[i]) * dthfd;
velzd[i] += (az - acczd[i]) * dthfd;
}
// save computed acceleration
accxd[i] = ax;
accyd[i] = ay;
acczd[i] = az;
}
}
}
/******************************************************************************/
/*** advance bodies ***********************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS6, FACTOR6)
void IntegrationKernel(int nbodiesd, float dtimed, float dthfd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict velxd, volatile float * __restrict velyd, volatile float * __restrict velzd, volatile float * __restrict accxd, volatile float * __restrict accyd, volatile float * __restrict acczd)
{
register int i, inc;
register float dvelx, dvely, dvelz;
register float velhx, velhy, velhz;
// iterate over all bodies assigned to thread
inc = blockDim.x * gridDim.x;
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) {
// integrate
dvelx = accxd[i] * dthfd;
dvely = accyd[i] * dthfd;
dvelz = acczd[i] * dthfd;
velhx = velxd[i] + dvelx;
velhy = velyd[i] + dvely;
velhz = velzd[i] + dvelz;
posxd[i] += velhx * dtimed;
posyd[i] += velhy * dtimed;
poszd[i] += velhz * dtimed;
velxd[i] = velhx + dvelx;
velyd[i] = velhy + dvely;
velzd[i] = velhz + dvelz;
}
}
/******************************************************************************/
static void CudaTest(char *msg)
{
cudaError_t e;
cudaThreadSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
exit(-1);
}
}
/******************************************************************************/
// random number generator
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7FFFFFFF
#define TWOTO31 2147483648.0
static int A = 1;
static int B = 0;
static int randx = 1;
static int lastrand;
static void drndset(int seed)
{
A = 1;
B = 0;
randx = (A * seed + B) & MASK;
A = (MULT * A) & MASK;
B = (MULT * B + ADD) & MASK;
}
static double drnd()
{
lastrand = randx;
randx = (A * randx + B) & MASK;
return (double)lastrand / TWOTO31;
}
/******************************************************************************/
int main(int argc, char *argv[])
{
register int i, run, blocks;
int nnodes, nbodies, step, timesteps;
register double runtime;
int error;
register float dtime, dthf, epssq, itolsq;
float time, timing[7];
cudaEvent_t start, stop;
float *mass, *posx, *posy, *posz, *velx, *vely, *velz;
int *errl, *sortl, *childl, *countl, *startl;
float *massl;
float *posxl, *posyl, *poszl;
float *velxl, *velyl, *velzl;
float *accxl, *accyl, *acczl;
float *maxxl, *maxyl, *maxzl;
float *minxl, *minyl, *minzl;
register double rsc, vsc, r, v, x, y, z, sq, scale;
// perform some checks
printf("CUDA BarnesHut v3.1 ");
#ifdef __KEPLER__
printf("[Kepler]\n");
#else
printf("[Fermi]\n");
#endif
printf("Copyright (c) 2013, Texas State University-San Marcos. All rights reserved.\n");
fflush(stdout);
if (argc != 4) {
fprintf(stderr, "\n");
fprintf(stderr, "arguments: number_of_bodies number_of_timesteps device\n");
exit(-1);
}
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
const int dev = atoi(argv[3]);
if ((dev < 0) || (deviceCount <= dev)) {
fprintf(stderr, "There is no device %d\n", dev);
exit(-1);
}
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if (deviceProp.major < 2) {
fprintf(stderr, "Need at least compute capability 2.0\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
blocks = deviceProp.multiProcessorCount;
// fprintf(stderr, "blocks = %d\n", blocks);
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
if (MAXDEPTH > WARPSIZE) {
fprintf(stderr, "MAXDEPTH must be less than or equal to WARPSIZE\n");
exit(-1);
}
if ((THREADS1 <= 0) || (THREADS1 & (THREADS1-1) != 0)) {
fprintf(stderr, "THREADS1 must be greater than zero and a power of two\n");
exit(-1);
}
// set L1/shared memory configuration
cudaFuncSetCacheConfig(BoundingBoxKernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(TreeBuildingKernel, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(ClearKernel1, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(ClearKernel2, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(SummarizationKernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(SortKernel, cudaFuncCachePreferL1);
#ifdef __KEPLER__
cudaFuncSetCacheConfig(ForceCalculationKernel, cudaFuncCachePreferEqual);
#else
cudaFuncSetCacheConfig(ForceCalculationKernel, cudaFuncCachePreferL1);
#endif
cudaFuncSetCacheConfig(IntegrationKernel, cudaFuncCachePreferL1);
cudaGetLastError(); // reset error value
for (run = 0; run < 3; run++) {
for (i = 0; i < 7; i++) timing[i] = 0.0f;
nbodies = atoi(argv[1]);
if (nbodies < 1) {
fprintf(stderr, "nbodies is too small: %d\n", nbodies);
exit(-1);
}
if (nbodies > (1 << 30)) {
fprintf(stderr, "nbodies is too large: %d\n", nbodies);
exit(-1);
}
nnodes = nbodies * 2;
if (nnodes < 1024*blocks) nnodes = 1024*blocks;
while ((nnodes & (WARPSIZE-1)) != 0) nnodes++;
nnodes--;
timesteps = atoi(argv[2]);
dtime = 0.025; dthf = dtime * 0.5f;
epssq = 0.05 * 0.05;
itolsq = 1.0f / (0.5 * 0.5);
// allocate memory
if (run == 0) {
printf("configuration: %d bodies, %d time steps\n", nbodies, timesteps);
mass = (float *)malloc(sizeof(float) * nbodies);
if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);}
posx = (float *)malloc(sizeof(float) * nbodies);
if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * nbodies);
if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
posz = (float *)malloc(sizeof(float) * nbodies);
if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);}
velx = (float *)malloc(sizeof(float) * nbodies);
if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);}
vely = (float *)malloc(sizeof(float) * nbodies);
if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);}
velz = (float *)malloc(sizeof(float) * nbodies);
if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);}
if (cudaSuccess != cudaMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd");
if (cudaSuccess != cudaMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd");
if (cudaSuccess != cudaMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd");
if (cudaSuccess != cudaMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd");
if (cudaSuccess != cudaMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd");
if (cudaSuccess != cudaMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd");
if (cudaSuccess != cudaMalloc((void **)&velxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velxd\n"); CudaTest("couldn't allocate velxd");
if (cudaSuccess != cudaMalloc((void **)&velyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velyd\n"); CudaTest("couldn't allocate velyd");
if (cudaSuccess != cudaMalloc((void **)&velzl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velzd\n"); CudaTest("couldn't allocate velzd");
if (cudaSuccess != cudaMalloc((void **)&accxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate accxd\n"); CudaTest("couldn't allocate accxd");
if (cudaSuccess != cudaMalloc((void **)&accyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate accyd\n"); CudaTest("couldn't allocate accyd");
if (cudaSuccess != cudaMalloc((void **)&acczl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate acczd\n"); CudaTest("couldn't allocate acczd");
if (cudaSuccess != cudaMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd");
if (cudaSuccess != cudaMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd");
if (cudaSuccess != cudaMalloc((void **)&sortl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate sortd\n"); CudaTest("couldn't allocate sortd");
if (cudaSuccess != cudaMalloc((void **)&maxxl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd");
if (cudaSuccess != cudaMalloc((void **)&maxyl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd");
if (cudaSuccess != cudaMalloc((void **)&maxzl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd");
if (cudaSuccess != cudaMalloc((void **)&minxl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd");
if (cudaSuccess != cudaMalloc((void **)&minyl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd");
if (cudaSuccess != cudaMalloc((void **)&minzl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd");
}
// generate input
drndset(7);
rsc = (3 * 3.1415926535897932384626433832795) / 16;
vsc = sqrt(1.0 / rsc);
for (i = 0; i < nbodies; i++) {
mass[i] = 1.0 / nbodies;
r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1);
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = rsc * r / sqrt(sq);
posx[i] = x * scale;
posy[i] = y * scale;
posz[i] = z * scale;
do {
x = drnd();
y = drnd() * 0.1;
} while (y > x*x * pow(1 - x*x, 3.5));
v = x * sqrt(2.0 / sqrt(1 + r*r));
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = vsc * v / sqrt(sq);
velx[i] = x * scale;
vely[i] = y * scale;
velz[i] = z * scale;
}
if (cudaSuccess != cudaMemcpy(massl, mass, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed");
if (cudaSuccess != cudaMemcpy(posxl, posx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed");
if (cudaSuccess != cudaMemcpy(posyl, posy, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed");
if (cudaSuccess != cudaMemcpy(poszl, posz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed");
if (cudaSuccess != cudaMemcpy(velxl, velx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed");
if (cudaSuccess != cudaMemcpy(velyl, vely, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed");
if (cudaSuccess != cudaMemcpy(velzl, velz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed");
// run timesteps (launch GPU kernels)
cudaEventCreate(&start); cudaEventCreate(&stop);
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
cudaEventRecord(start, 0);
InitializationKernel<<<1, 1>>>(errl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[0] += time;
CudaTest("kernel 0 launch failed");
for (step = 0; step < timesteps; step++) {
cudaEventRecord(start, 0);
BoundingBoxKernel<<<blocks * FACTOR1, THREADS1>>>(nnodes, nbodies, startl, childl, massl, posxl, posyl, poszl, maxxl, maxyl, maxzl, minxl, minyl, minzl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[1] += time;
CudaTest("kernel 1 launch failed");
cudaEventRecord(start, 0);
ClearKernel1<<<blocks * 1, 1024>>>(nnodes, nbodies, childl);
TreeBuildingKernel<<<blocks * FACTOR2, THREADS2>>>(nnodes, nbodies, errl, childl, posxl, posyl, poszl);
ClearKernel2<<<blocks * 1, 1024>>>(nnodes, startl, massl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[2] += time;
CudaTest("kernel 2 launch failed");
cudaEventRecord(start, 0);
SummarizationKernel<<<blocks * FACTOR3, THREADS3>>>(nnodes, nbodies, countl, childl, massl, posxl, posyl, poszl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[3] += time;
CudaTest("kernel 3 launch failed");
cudaEventRecord(start, 0);
SortKernel<<<blocks * FACTOR4, THREADS4>>>(nnodes, nbodies, sortl, countl, startl, childl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[4] += time;
CudaTest("kernel 4 launch failed");
cudaEventRecord(start, 0);
ForceCalculationKernel<<<blocks * FACTOR5, THREADS5>>>(nnodes, nbodies, errl, dthf, itolsq, epssq, sortl, childl, massl, posxl, posyl, poszl, velxl, velyl, velzl, accxl, accyl, acczl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[5] += time;
CudaTest("kernel 5 launch failed");
cudaEventRecord(start, 0);
IntegrationKernel<<<blocks * FACTOR6, THREADS6>>>(nbodies, dtime, dthf, posxl, posyl, poszl, velxl, velyl, velzl, accxl, accyl, acczl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[6] += time;
CudaTest("kernel 6 launch failed");
}
CudaTest("kernel launch failed");
cudaEventDestroy(start); cudaEventDestroy(stop);
// transfer result back to CPU
if (cudaSuccess != cudaMemcpy(&error, errl, sizeof(int), cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed");
if (cudaSuccess != cudaMemcpy(posx, posxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed");
if (cudaSuccess != cudaMemcpy(posy, posyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed");
if (cudaSuccess != cudaMemcpy(posz, poszl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed");
if (cudaSuccess != cudaMemcpy(velx, velxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed");
if (cudaSuccess != cudaMemcpy(vely, velyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed");
if (cudaSuccess != cudaMemcpy(velz, velzl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed");
gettimeofday(&endtime, NULL);
runtime = endtime.tv_sec + endtime.tv_usec/1000000.0 - starttime.tv_sec - starttime.tv_usec/1000000.0;
printf("runtime: %.4lf s (", runtime);
time = 0;
for (i = 1; i < 7; i++) {
printf(" %.1f ", timing[i]);
time += timing[i];
}
if (error == 0) {
printf(") = %.1f ms\n", time);
} else {
printf(") = %.1f ms FAILED %d\n", time, error);
}
}
// print output
i = 0;
// for (i = 0; i < nbodies; i++) {
printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]);
// }
free(mass);
free(posx);
free(posy);
free(posz);
free(velx);
free(vely);
free(velz);
cudaFree(errl);
cudaFree(childl);
cudaFree(massl);
cudaFree(posxl);
cudaFree(posyl);
cudaFree(poszl);
cudaFree(countl);
cudaFree(startl);
cudaFree(maxxl);
cudaFree(maxyl);
cudaFree(maxzl);
cudaFree(minxl);
cudaFree(minyl);
cudaFree(minzl);
return 0;
}
|
1f7142c2348bce61ee979fd3b26926af1f831113.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__
void add_kernel(float *A, float *B, float *C) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void add_cuda(at::Tensor A, at::Tensor B, at::Tensor C) {
int d = A.size(0);
float *h_A = A.data_ptr<float>();
float *h_B = B.data_ptr<float>();
float *h_C = C.data_ptr<float>();
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, d * sizeof(float));
hipMalloc(&d_B, d * sizeof(float));
hipMalloc(&d_C, d * sizeof(float));
hipMemcpy(d_A, h_A, d * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, d * sizeof(float), hipMemcpyHostToDevice);
add_kernel << < 1, d >> > (d_A, d_B, d_C);
hipMemcpy(h_C, d_C, d * sizeof(float), hipMemcpyDeviceToHost);
} | 1f7142c2348bce61ee979fd3b26926af1f831113.cu | #include <ATen/ATen.h>
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__
void add_kernel(float *A, float *B, float *C) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void add_cuda(at::Tensor A, at::Tensor B, at::Tensor C) {
int d = A.size(0);
float *h_A = A.data_ptr<float>();
float *h_B = B.data_ptr<float>();
float *h_C = C.data_ptr<float>();
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, d * sizeof(float));
cudaMalloc(&d_B, d * sizeof(float));
cudaMalloc(&d_C, d * sizeof(float));
cudaMemcpy(d_A, h_A, d * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, d * sizeof(float), cudaMemcpyHostToDevice);
add_kernel << < 1, d >> > (d_A, d_B, d_C);
cudaMemcpy(h_C, d_C, d * sizeof(float), cudaMemcpyDeviceToHost);
} |
1a71bf4cb14bf8257922810dab0d6a5b102758d9.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2017-2023 by XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../../../src/common/common.h"
#include "../../../src/data/ellpack_page.cuh" // for EllpackPageImpl
#include "../../../src/data/ellpack_page.h" // for EllpackPage
#include "../../../src/tree/param.h" // for TrainParam
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../filesystem.h" // dmlc::TemporaryDirectory
#include "../helpers.h"
#include "../histogram_helpers.h"
#include "xgboost/context.h"
#include "xgboost/json.h"
namespace xgboost::tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::safe_cuda(hipSetDevice(0));
constexpr size_t kNBins = 128;
constexpr int kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogramStorage<kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (int i = 0; i < kNNodes; ++i) {
histogram.AllocateHistograms({i});
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (int i = 0; i < kNNodes; ++i) {
histogram.AllocateHistograms({i});
}
for (int i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Add two new nodes
histogram.AllocateHistograms({kNNodes});
histogram.AllocateHistograms({kNNodes + 1});
// Old cached nodes should still exist
for (int i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Should be deleted
ASSERT_FALSE(histogram.HistogramExists(kNNodes));
// Most recent node should exist
ASSERT_TRUE(histogram.HistogramExists(kNNodes + 1));
// Add same node again - should fail
EXPECT_ANY_THROW(histogram.AllocateHistograms({kNNodes + 1}););
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
Args args{
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
Context ctx{MakeCUDACtx(0)};
auto cs = std::make_shared<common::ColumnSampler>(0);
GPUHistMakerDevice maker(&ctx, /*is_external_memory=*/false, {}, kNRows, param, cs, kNCols,
batch_param, MetaInfo());
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
HostDeviceVector<GradientPair> gpair(kNRows);
for (auto &gp : gpair.HostVector()) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gp = GradientPair(grad, hess);
}
gpair.SetDevice(0);
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (page->gidx_buffer.HostVector());
maker.row_partitioner = std::make_unique<RowPartitioner>(0, kNRows);
maker.hist.Init(0, page->Cuts().TotalBins());
maker.hist.AllocateHistograms({0});
maker.gpair = gpair.DeviceSpan();
maker.quantiser = std::make_unique<GradientQuantiser>(maker.gpair, MetaInfo());
maker.page = page.get();
maker.InitFeatureGroupsOnce();
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
maker.feature_groups->DeviceAccessor(0), gpair.DeviceSpan(),
maker.row_partitioner->GetRows(0), maker.hist.GetNodeHistogram(0),
*maker.quantiser, !use_shared_memory_histograms);
DeviceHistogramStorage<>& d_hist = maker.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientPairInt64> h_result (node_histogram.size());
dh::safe_cuda(hipMemcpy(h_result.data(), node_histogram.data(), node_histogram.size_bytes(),
hipMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
for (size_t i = 0; i < h_result.size(); ++i) {
auto result = maker.quantiser->ToFloatingPoint(h_result[i]);
ASSERT_NEAR(result.GetGrad(), solution[i].GetGrad(), 0.01f);
ASSERT_NEAR(result.GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
}
HistogramCutsWrapper GetHostCutMatrix () {
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
return cmat;
}
inline GradientQuantiser DummyRoundingFactor() {
thrust::device_vector<GradientPair> gpair(1);
gpair[0] = {1000.f, 1000.f}; // Tests should not exceed sum of 1000
return {dh::ToSpan(gpair), MetaInfo()};
}
void TestHistogramIndexImpl() {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
Context ctx(MakeCUDACtx(0));
ObjInfo task{ObjInfo::kRegression};
tree::GPUHistMaker hist_maker{&ctx, &task}, hist_maker_ext{&ctx, &task};
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
dmlc::TemporaryDirectory tempdir;
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true, tempdir));
Args training_params = {{"max_depth", "10"}, {"max_leaves", "0"}};
TrainParam param;
param.UpdateAllowUnknown(training_params);
hist_maker.Configure(training_params);
hist_maker.InitDataOnce(¶m, hist_maker_dmat.get());
hist_maker_ext.Configure(training_params);
hist_maker_ext.InitDataOnce(¶m, hist_maker_ext_dmat.get());
// Extract the device maker from the histogram makers and from that its compressed
// histogram index
const auto &maker = hist_maker.maker;
auto grad = GenerateRandomGradients(kNRows);
grad.SetDevice(0);
maker->Reset(&grad, hist_maker_dmat.get(), kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(maker->page->gidx_buffer.HostVector());
const auto &maker_ext = hist_maker_ext.maker;
maker_ext->Reset(&grad, hist_maker_ext_dmat.get(), kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.HostVector());
ASSERT_EQ(maker->page->Cuts().TotalBins(), maker_ext->page->Cuts().TotalBins());
ASSERT_EQ(maker->page->gidx_buffer.Size(), maker_ext->page->gidx_buffer.Size());
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl();
}
void UpdateTree(Context const* ctx, linalg::Matrix<GradientPair>* gpair, DMatrix* dmat,
size_t gpu_page_size, RegTree* tree, HostDeviceVector<bst_float>* preds,
float subsample = 1.0f, const std::string& sampling_method = "uniform",
int max_bin = 2) {
if (gpu_page_size > 0) {
// Loop over the batches and count the records
int64_t batch_count = 0;
int64_t row_count = 0;
for (const auto& batch : dmat->GetBatches<EllpackPage>(
ctx, BatchParam{max_bin, TrainParam::DftSparseThreshold()})) {
EXPECT_LT(batch.Size(), dmat->Info().num_row_);
batch_count++;
row_count += batch.Size();
}
EXPECT_GE(batch_count, 2);
EXPECT_EQ(row_count, dmat->Info().num_row_);
}
Args args{
{"max_depth", "2"},
{"max_bin", std::to_string(max_bin)},
{"min_child_weight", "0.0"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"subsample", std::to_string(subsample)},
{"sampling_method", sampling_method},
};
TrainParam param;
param.UpdateAllowUnknown(args);
ObjInfo task{ObjInfo::kRegression};
tree::GPUHistMaker hist_maker{ctx, &task};
hist_maker.Configure(Args{});
std::vector<HostDeviceVector<bst_node_t>> position(1);
hist_maker.Update(¶m, gpair, dmat, common::Span<HostDeviceVector<bst_node_t>>{position},
{tree});
auto cache = linalg::MakeTensorView(ctx, preds->DeviceSpan(), preds->Size(), 1);
hist_maker.UpdatePredictionCache(dmat, cache);
}
TEST(GpuHist, UniformSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
linalg::Matrix<GradientPair> gpair({kRows}, Context{}.MakeCUDA().Ordinal());
gpair.Data()->Copy(GenerateRandomGradients(kRows));
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
Context ctx(MakeCUDACtx(0));
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample, "uniform",
kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-8);
}
}
TEST(GpuHist, GradientBasedSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
linalg::Matrix<GradientPair> gpair({kRows}, MakeCUDACtx(0).Ordinal());
gpair.Data()->Copy(GenerateRandomGradients(kRows));
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
Context ctx(MakeCUDACtx(0));
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample,
"gradient_based", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-3);
}
}
TEST(GpuHist, ExternalMemory) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
dmlc::TemporaryDirectory tmpdir;
// Create a DMatrix with multiple batches.
std::unique_ptr<DMatrix> dmat_ext(
CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache"));
// Create a single batch DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache"));
Context ctx(MakeCUDACtx(0));
linalg::Matrix<GradientPair> gpair({kRows}, ctx.Ordinal());
gpair.Data()->Copy(GenerateRandomGradients(kRows));
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using multiple ELLPACK pages.
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, 1.0, "uniform", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-6);
}
}
TEST(GpuHist, ExternalMemoryWithSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
constexpr float kSubsample = 0.5;
const std::string kSamplingMethod = "gradient_based";
common::GlobalRandom().seed(0);
dmlc::TemporaryDirectory tmpdir;
// Create a single batch DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache"));
// Create a DMatrix with multiple batches.
std::unique_ptr<DMatrix> dmat_ext(
CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache"));
Context ctx(MakeCUDACtx(0));
linalg::Matrix<GradientPair> gpair({kRows}, ctx.Ordinal());
gpair.Data()->Copy(GenerateRandomGradients(kRows));
// Build a tree using the in-memory DMatrix.
auto rng = common::GlobalRandom();
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, kSubsample, kSamplingMethod, kRows);
// Build another tree using multiple ELLPACK pages.
common::GlobalRandom() = rng;
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, kSubsample,
kSamplingMethod, kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
ASSERT_NEAR(preds_h[i], preds_ext_h[i], 1e-3);
}
}
TEST(GpuHist, ConfigIO) {
Context ctx(MakeCUDACtx(0));
ObjInfo task{ObjInfo::kRegression};
std::unique_ptr<TreeUpdater> updater{TreeUpdater::Create("grow_gpu_hist", &ctx, &task)};
updater->Configure(Args{});
Json j_updater{Object{}};
updater->SaveConfig(&j_updater);
ASSERT_TRUE(IsA<Object>(j_updater["hist_train_param"]));
updater->LoadConfig(j_updater);
Json j_updater_roundtrip{Object{}};
updater->SaveConfig(&j_updater_roundtrip);
ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["hist_train_param"]));
ASSERT_EQ(j_updater, j_updater_roundtrip);
}
TEST(GpuHist, MaxDepth) {
Context ctx(MakeCUDACtx(0));
size_t constexpr kRows = 16;
size_t constexpr kCols = 4;
auto p_mat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto learner = std::unique_ptr<Learner>(Learner::Create({p_mat}));
learner->SetParam("max_depth", "32");
learner->Configure();
ASSERT_THROW({learner->UpdateOneIter(0, p_mat);}, dmlc::Error);
}
namespace {
RegTree GetUpdatedTree(Context const* ctx, DMatrix* dmat) {
ObjInfo task{ObjInfo::kRegression};
GPUHistMaker hist_maker{ctx, &task};
hist_maker.Configure(Args{});
TrainParam param;
param.UpdateAllowUnknown(Args{});
linalg::Matrix<GradientPair> gpair({dmat->Info().num_row_}, ctx->Ordinal());
gpair.Data()->Copy(GenerateRandomGradients(dmat->Info().num_row_));
std::vector<HostDeviceVector<bst_node_t>> position(1);
RegTree tree;
hist_maker.Update(¶m, &gpair, dmat, common::Span<HostDeviceVector<bst_node_t>>{position},
{&tree});
return tree;
}
void VerifyColumnSplit(bst_row_t rows, bst_feature_t cols, RegTree const& expected_tree) {
Context ctx(MakeCUDACtx(GPUIDX));
auto Xy = RandomDataGenerator{rows, cols, 0}.GenerateDMatrix(true);
auto const world_size = collective::GetWorldSize();
auto const rank = collective::GetRank();
std::unique_ptr<DMatrix> sliced{Xy->SliceCol(world_size, rank)};
RegTree tree = GetUpdatedTree(&ctx, sliced.get());
Json json{Object{}};
tree.SaveModel(&json);
Json expected_json{Object{}};
expected_tree.SaveModel(&expected_json);
ASSERT_EQ(json, expected_json);
}
} // anonymous namespace
class MGPUHistTest : public BaseMGPUTest {};
TEST_F(MGPUHistTest, GPUHistColumnSplit) {
auto constexpr kRows = 32;
auto constexpr kCols = 16;
Context ctx(MakeCUDACtx(0));
auto dmat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix(true);
RegTree expected_tree = GetUpdatedTree(&ctx, dmat.get());
DoTest(VerifyColumnSplit, kRows, kCols, expected_tree);
}
} // namespace xgboost::tree
| 1a71bf4cb14bf8257922810dab0d6a5b102758d9.cu | /**
* Copyright 2017-2023 by XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../../../src/common/common.h"
#include "../../../src/data/ellpack_page.cuh" // for EllpackPageImpl
#include "../../../src/data/ellpack_page.h" // for EllpackPage
#include "../../../src/tree/param.h" // for TrainParam
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../filesystem.h" // dmlc::TemporaryDirectory
#include "../helpers.h"
#include "../histogram_helpers.h"
#include "xgboost/context.h"
#include "xgboost/json.h"
namespace xgboost::tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::safe_cuda(cudaSetDevice(0));
constexpr size_t kNBins = 128;
constexpr int kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogramStorage<kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (int i = 0; i < kNNodes; ++i) {
histogram.AllocateHistograms({i});
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (int i = 0; i < kNNodes; ++i) {
histogram.AllocateHistograms({i});
}
for (int i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Add two new nodes
histogram.AllocateHistograms({kNNodes});
histogram.AllocateHistograms({kNNodes + 1});
// Old cached nodes should still exist
for (int i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Should be deleted
ASSERT_FALSE(histogram.HistogramExists(kNNodes));
// Most recent node should exist
ASSERT_TRUE(histogram.HistogramExists(kNNodes + 1));
// Add same node again - should fail
EXPECT_ANY_THROW(histogram.AllocateHistograms({kNNodes + 1}););
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
Args args{
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
Context ctx{MakeCUDACtx(0)};
auto cs = std::make_shared<common::ColumnSampler>(0);
GPUHistMakerDevice maker(&ctx, /*is_external_memory=*/false, {}, kNRows, param, cs, kNCols,
batch_param, MetaInfo());
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
HostDeviceVector<GradientPair> gpair(kNRows);
for (auto &gp : gpair.HostVector()) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gp = GradientPair(grad, hess);
}
gpair.SetDevice(0);
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (page->gidx_buffer.HostVector());
maker.row_partitioner = std::make_unique<RowPartitioner>(0, kNRows);
maker.hist.Init(0, page->Cuts().TotalBins());
maker.hist.AllocateHistograms({0});
maker.gpair = gpair.DeviceSpan();
maker.quantiser = std::make_unique<GradientQuantiser>(maker.gpair, MetaInfo());
maker.page = page.get();
maker.InitFeatureGroupsOnce();
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
maker.feature_groups->DeviceAccessor(0), gpair.DeviceSpan(),
maker.row_partitioner->GetRows(0), maker.hist.GetNodeHistogram(0),
*maker.quantiser, !use_shared_memory_histograms);
DeviceHistogramStorage<>& d_hist = maker.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientPairInt64> h_result (node_histogram.size());
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), node_histogram.size_bytes(),
cudaMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
for (size_t i = 0; i < h_result.size(); ++i) {
auto result = maker.quantiser->ToFloatingPoint(h_result[i]);
ASSERT_NEAR(result.GetGrad(), solution[i].GetGrad(), 0.01f);
ASSERT_NEAR(result.GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
}
HistogramCutsWrapper GetHostCutMatrix () {
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
return cmat;
}
inline GradientQuantiser DummyRoundingFactor() {
thrust::device_vector<GradientPair> gpair(1);
gpair[0] = {1000.f, 1000.f}; // Tests should not exceed sum of 1000
return {dh::ToSpan(gpair), MetaInfo()};
}
void TestHistogramIndexImpl() {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
Context ctx(MakeCUDACtx(0));
ObjInfo task{ObjInfo::kRegression};
tree::GPUHistMaker hist_maker{&ctx, &task}, hist_maker_ext{&ctx, &task};
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
dmlc::TemporaryDirectory tempdir;
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true, tempdir));
Args training_params = {{"max_depth", "10"}, {"max_leaves", "0"}};
TrainParam param;
param.UpdateAllowUnknown(training_params);
hist_maker.Configure(training_params);
hist_maker.InitDataOnce(¶m, hist_maker_dmat.get());
hist_maker_ext.Configure(training_params);
hist_maker_ext.InitDataOnce(¶m, hist_maker_ext_dmat.get());
// Extract the device maker from the histogram makers and from that its compressed
// histogram index
const auto &maker = hist_maker.maker;
auto grad = GenerateRandomGradients(kNRows);
grad.SetDevice(0);
maker->Reset(&grad, hist_maker_dmat.get(), kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(maker->page->gidx_buffer.HostVector());
const auto &maker_ext = hist_maker_ext.maker;
maker_ext->Reset(&grad, hist_maker_ext_dmat.get(), kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.HostVector());
ASSERT_EQ(maker->page->Cuts().TotalBins(), maker_ext->page->Cuts().TotalBins());
ASSERT_EQ(maker->page->gidx_buffer.Size(), maker_ext->page->gidx_buffer.Size());
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl();
}
void UpdateTree(Context const* ctx, linalg::Matrix<GradientPair>* gpair, DMatrix* dmat,
size_t gpu_page_size, RegTree* tree, HostDeviceVector<bst_float>* preds,
float subsample = 1.0f, const std::string& sampling_method = "uniform",
int max_bin = 2) {
if (gpu_page_size > 0) {
// Loop over the batches and count the records
int64_t batch_count = 0;
int64_t row_count = 0;
for (const auto& batch : dmat->GetBatches<EllpackPage>(
ctx, BatchParam{max_bin, TrainParam::DftSparseThreshold()})) {
EXPECT_LT(batch.Size(), dmat->Info().num_row_);
batch_count++;
row_count += batch.Size();
}
EXPECT_GE(batch_count, 2);
EXPECT_EQ(row_count, dmat->Info().num_row_);
}
Args args{
{"max_depth", "2"},
{"max_bin", std::to_string(max_bin)},
{"min_child_weight", "0.0"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"subsample", std::to_string(subsample)},
{"sampling_method", sampling_method},
};
TrainParam param;
param.UpdateAllowUnknown(args);
ObjInfo task{ObjInfo::kRegression};
tree::GPUHistMaker hist_maker{ctx, &task};
hist_maker.Configure(Args{});
std::vector<HostDeviceVector<bst_node_t>> position(1);
hist_maker.Update(¶m, gpair, dmat, common::Span<HostDeviceVector<bst_node_t>>{position},
{tree});
auto cache = linalg::MakeTensorView(ctx, preds->DeviceSpan(), preds->Size(), 1);
hist_maker.UpdatePredictionCache(dmat, cache);
}
TEST(GpuHist, UniformSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
linalg::Matrix<GradientPair> gpair({kRows}, Context{}.MakeCUDA().Ordinal());
gpair.Data()->Copy(GenerateRandomGradients(kRows));
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
Context ctx(MakeCUDACtx(0));
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample, "uniform",
kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-8);
}
}
TEST(GpuHist, GradientBasedSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
linalg::Matrix<GradientPair> gpair({kRows}, MakeCUDACtx(0).Ordinal());
gpair.Data()->Copy(GenerateRandomGradients(kRows));
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
Context ctx(MakeCUDACtx(0));
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample,
"gradient_based", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-3);
}
}
TEST(GpuHist, ExternalMemory) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
dmlc::TemporaryDirectory tmpdir;
// Create a DMatrix with multiple batches.
std::unique_ptr<DMatrix> dmat_ext(
CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache"));
// Create a single batch DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache"));
Context ctx(MakeCUDACtx(0));
linalg::Matrix<GradientPair> gpair({kRows}, ctx.Ordinal());
gpair.Data()->Copy(GenerateRandomGradients(kRows));
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using multiple ELLPACK pages.
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, 1.0, "uniform", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-6);
}
}
TEST(GpuHist, ExternalMemoryWithSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
constexpr float kSubsample = 0.5;
const std::string kSamplingMethod = "gradient_based";
common::GlobalRandom().seed(0);
dmlc::TemporaryDirectory tmpdir;
// Create a single batch DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache"));
// Create a DMatrix with multiple batches.
std::unique_ptr<DMatrix> dmat_ext(
CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache"));
Context ctx(MakeCUDACtx(0));
linalg::Matrix<GradientPair> gpair({kRows}, ctx.Ordinal());
gpair.Data()->Copy(GenerateRandomGradients(kRows));
// Build a tree using the in-memory DMatrix.
auto rng = common::GlobalRandom();
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, kSubsample, kSamplingMethod, kRows);
// Build another tree using multiple ELLPACK pages.
common::GlobalRandom() = rng;
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, kSubsample,
kSamplingMethod, kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
ASSERT_NEAR(preds_h[i], preds_ext_h[i], 1e-3);
}
}
TEST(GpuHist, ConfigIO) {
Context ctx(MakeCUDACtx(0));
ObjInfo task{ObjInfo::kRegression};
std::unique_ptr<TreeUpdater> updater{TreeUpdater::Create("grow_gpu_hist", &ctx, &task)};
updater->Configure(Args{});
Json j_updater{Object{}};
updater->SaveConfig(&j_updater);
ASSERT_TRUE(IsA<Object>(j_updater["hist_train_param"]));
updater->LoadConfig(j_updater);
Json j_updater_roundtrip{Object{}};
updater->SaveConfig(&j_updater_roundtrip);
ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["hist_train_param"]));
ASSERT_EQ(j_updater, j_updater_roundtrip);
}
TEST(GpuHist, MaxDepth) {
Context ctx(MakeCUDACtx(0));
size_t constexpr kRows = 16;
size_t constexpr kCols = 4;
auto p_mat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto learner = std::unique_ptr<Learner>(Learner::Create({p_mat}));
learner->SetParam("max_depth", "32");
learner->Configure();
ASSERT_THROW({learner->UpdateOneIter(0, p_mat);}, dmlc::Error);
}
namespace {
RegTree GetUpdatedTree(Context const* ctx, DMatrix* dmat) {
ObjInfo task{ObjInfo::kRegression};
GPUHistMaker hist_maker{ctx, &task};
hist_maker.Configure(Args{});
TrainParam param;
param.UpdateAllowUnknown(Args{});
linalg::Matrix<GradientPair> gpair({dmat->Info().num_row_}, ctx->Ordinal());
gpair.Data()->Copy(GenerateRandomGradients(dmat->Info().num_row_));
std::vector<HostDeviceVector<bst_node_t>> position(1);
RegTree tree;
hist_maker.Update(¶m, &gpair, dmat, common::Span<HostDeviceVector<bst_node_t>>{position},
{&tree});
return tree;
}
void VerifyColumnSplit(bst_row_t rows, bst_feature_t cols, RegTree const& expected_tree) {
Context ctx(MakeCUDACtx(GPUIDX));
auto Xy = RandomDataGenerator{rows, cols, 0}.GenerateDMatrix(true);
auto const world_size = collective::GetWorldSize();
auto const rank = collective::GetRank();
std::unique_ptr<DMatrix> sliced{Xy->SliceCol(world_size, rank)};
RegTree tree = GetUpdatedTree(&ctx, sliced.get());
Json json{Object{}};
tree.SaveModel(&json);
Json expected_json{Object{}};
expected_tree.SaveModel(&expected_json);
ASSERT_EQ(json, expected_json);
}
} // anonymous namespace
class MGPUHistTest : public BaseMGPUTest {};
TEST_F(MGPUHistTest, GPUHistColumnSplit) {
auto constexpr kRows = 32;
auto constexpr kCols = 16;
Context ctx(MakeCUDACtx(0));
auto dmat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix(true);
RegTree expected_tree = GetUpdatedTree(&ctx, dmat.get());
DoTest(VerifyColumnSplit, kRows, kCols, expected_tree);
}
} // namespace xgboost::tree
|
bfbc79f50c511b745e00b6b1cb42ab73be8b4d52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( cube), dim3(2), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
| bfbc79f50c511b745e00b6b1cb42ab73be8b4d52.cu | #include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
cube<<<2, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
757285872a678f5862e085ca207c8c1da29d84b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include "helper.h"
#include <iostream>
#include <math.h>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__device__ void eigenvalues (float m11,float m12,float m22, float* lambda){
float T = m11 + m22;
float D = m11*m22 - m12*m12;
lambda[1] = (float)T/2.f + sqrt(pow((float)T,2.f)/4.f - (float)D);
lambda[2] = (float)T/2.f - sqrt(pow((float)T,2.f)/4.f - (float)D);
}
__global__ void T_eigenvalues(float* imgIn_cuda, float* imgOut_cuda,float* T_out_cuda, int w, int h, int nc, float alpha, float beta){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = 0;
float *lambda = new float[2];
float m11, m12, m22;
if (t_numx < w && t_numy < h && t_numz < nc){
m11 = T_out_cuda[t_numx + w*t_numy + w*h*0];
m12 = T_out_cuda[t_numx + w*t_numy + w*h*1];
m22 = T_out_cuda[t_numx + w*t_numy + w*h*2];
eigenvalues(m11, m12, m22, lambda);
if (lambda[2] >= lambda[1] && lambda[1] >= alpha){
imgOut_cuda[t_numx + w*t_numy + w*h*0] = 255;
imgOut_cuda[t_numx + w*t_numy + w*h*1] = 0;
imgOut_cuda[t_numx + w*t_numy + w*h*2] = 0;
}
else if (lambda[1] >= alpha && alpha >=beta && beta >= lambda[2]){
imgOut_cuda[t_numx + w*t_numy + w*h*0] = 255;
imgOut_cuda[t_numx + w*t_numy + w*h*1] = 255;
imgOut_cuda[t_numx + w*t_numy + w*h*2] = 0;
}
else{
imgOut_cuda[t_numx + w*t_numy + w*h*0] = 0.5*imgIn_cuda[t_numx + w*t_numy + w*h*0];
imgOut_cuda[t_numx + w*t_numy + w*h*1] = 0.5*imgIn_cuda[t_numx + w*t_numy + w*h*1];
imgOut_cuda[t_numx + w*t_numy + w*h*2] = 0.5*imgIn_cuda[t_numx + w*t_numy + w*h*2];
}
}
}
__global__ void gradient_fd(float* imgOut_cuda,float* cuda_v1,float* cuda_v2,int w,int h,int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
if (t_numx + 1 < w && t_numy < h && t_numz < nc){
cuda_v1[t_numx + w*t_numy + w*h*t_numz] = imgOut_cuda[t_numx + 1 + w*t_numy + w*h*t_numz] - imgOut_cuda[t_numx + w*t_numy + w*h*t_numz];
}
if (t_numx < w && t_numy + 1< h && t_numz < nc){
cuda_v2[t_numx + w*t_numy + w*h*t_numz] = imgOut_cuda[t_numx + w*(t_numy+1) + w*h*t_numz] - imgOut_cuda[t_numx + w*t_numy + w*h*t_numz];
}
}
__global__ void gradient_rand(float* imgOut_cuda,float* cuda_v1,float* cuda_v2,int w,int h,int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
if (t_numx < w && t_numy < h && t_numz < nc){
int x_neg = t_numx-1;
int x_pos = t_numx+1;
int y_neg = t_numy-1;
int y_pos = t_numy+1;
if (x_neg<0) x_neg = 0;
if (x_pos>=w) x_pos = w-1;
if (y_neg<0) y_neg = 0;
if (y_pos>=h) y_pos = h-1;
cuda_v1[t_numx + w*t_numy + w*h*t_numz] = 1.f/32.f*(3*imgOut_cuda[x_pos + w*y_pos + w*h*t_numz]
+ 10*imgOut_cuda[x_pos + w*t_numy + w*h*t_numz]
+ 3*imgOut_cuda[x_pos + w*y_neg + w*h*t_numz]
- 3*imgOut_cuda[x_neg + w*y_pos + w*h*t_numz]
- 10*imgOut_cuda[x_neg + w*t_numy + w*h*t_numz]
- 3*imgOut_cuda[x_neg + w*y_neg + w*h*t_numz]) ;
cuda_v2[t_numx + w*t_numy + w*h*t_numz] = 1.f/32.f*( 3*imgOut_cuda[x_pos + w*y_pos + w*h*t_numz]
+ 10*imgOut_cuda[t_numx + w*y_pos + w*h*t_numz]
+ 3*imgOut_cuda[x_neg + w*y_pos + w*h*t_numz]
- 3*imgOut_cuda[x_pos + w*y_neg + w*h*t_numz]
- 10*imgOut_cuda[t_numx + w*y_neg + w*h*t_numz]
- 3*imgOut_cuda[x_neg + w*y_neg + w*h*t_numz]) ;
}
}
__host__ void gaussian_kernel(float *kernel, float sigma, int radius){
float sum = 0 ;
for(int j = -radius; j<=radius ; j++){
for (int i=-radius; i<=radius; i++){
int index = i+radius + (2*radius+1)*(j+radius);
kernel[index] = 0.5/3.14159/pow(sigma,2.0)*pow(2.71828,-(pow(i,2) + pow(j,2))/2/(pow(sigma,2)));
sum = sum + kernel[index];
}
}
for (int i=0; i<(2*radius + 1)*(2*radius + 1); i++){
kernel[i] = kernel[i] / sum;
}
}
__global__ void cuda_convolution(float *imgIn_cuda, float* imgOut_cuda, float* kernel_cuda, int w, int h, int nc, int radius){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
if (t_numx < w && t_numy < h && t_numz < nc){
int index = t_numx + w*t_numy + w*h*t_numz;
//imgOut_cuda[index] = 0;
for (int p = -radius; p <= radius; p++) {
for (int q =-radius; q <= radius; q++) {
int temp_j = t_numy + p;
int temp_i = t_numx + q;
if (temp_i<0) temp_i = 0;
if (temp_i>=w) temp_i = w-1;
if (temp_j<0) temp_j = 0;
if (temp_j>=h) temp_j = h-1;
int image_index = temp_i + temp_j*w + t_numz*h*w;
int kernel_index = q+radius + (2*radius+1)*(p+radius);
imgOut_cuda[index] += imgIn_cuda[image_index] * kernel_cuda[kernel_index];
}
}
}
}
__global__ void M_calculation(float *M_out_cuda, float* cuda_v1, float* cuda_v2, int w, int h, int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
int index = t_numx + w*t_numy + w*h*t_numz;
if (t_numx < w && t_numy < h && t_numz < nc){
M_out_cuda[t_numx + w*t_numy + w*h*t_numz] += cuda_v1[index]*cuda_v1[index];
M_out_cuda[t_numx + w*t_numy + w*h*t_numz] += cuda_v1[index]*cuda_v2[index];
M_out_cuda[t_numx + w*t_numy + w*h*t_numz] += cuda_v2[index]*cuda_v2[index];
}
}
int main(int argc, char **argv)
{
hipDeviceSynchronize(); CUDA_CHECK;
#ifdef CAMERA
#else
// input image
string image = "";
float sigma = 1;
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
bool ret2 = getParam("sigma", sigma, argc, argv);
if (!ret2) cerr << "ERROR: no sigma specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> -sigma <sigma> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
convert_mat_to_layered (imgIn, mIn);
// CONVOLUTION ON GPU......START
int radius = ceil(3*sigma);
int total_ele_filter = (int)pow(2*radius + 1, 2);
float* kernel = new float[total_ele_filter];
gaussian_kernel(kernel, sigma, radius);
float *kernel_cuda, *imgIn_cuda, *imgOut_cuda;
hipMalloc((void**)&kernel_cuda, total_ele_filter*sizeof(float));
hipMemcpy(kernel_cuda, kernel, total_ele_filter*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&imgIn_cuda , h*w*nc*sizeof(float));
hipMalloc((void**)&imgOut_cuda , h*w*nc*sizeof(float));
hipMemset(&imgOut , 0, h*w*nc*sizeof(float));
hipMemcpy(imgIn_cuda, imgIn , h*w*nc*sizeof(float) , hipMemcpyHostToDevice);
hipMemcpy(imgOut_cuda, imgOut , h*w*nc*sizeof(float) , hipMemcpyHostToDevice);
dim3 block = dim3(32,32,1);
int grid_x = ((w + block.x - 1)/block.x);
int grid_y = ((h + block.y - 1)/block.y);
int grid_z = ((nc + block.z - 1)/block.z);
dim3 grid = dim3(grid_x, grid_y, grid_z );
hipLaunchKernelGGL(( cuda_convolution) , dim3(grid), dim3(block), 0, 0, imgIn_cuda, imgOut_cuda, kernel_cuda, w, h, nc, radius);
//hipMemcpy(imgOut, imgOut_cuda , w*h*nc*sizeof(float) , hipMemcpyDeviceToHost);
// CONVOLUTION ON GPU......END
// GRADIENT CALCULATION START
int array_size = w*h*nc;
float* cuda_v1;
float* cuda_v2;
hipMalloc((void**) &cuda_v1, array_size*sizeof(float));
hipMalloc((void**) &cuda_v2, array_size*sizeof(float));
hipMemcpy(cuda_v1, imgOut, array_size*sizeof(float) , hipMemcpyHostToDevice);
hipMemcpy(cuda_v2, imgOut , array_size*sizeof(float) , hipMemcpyHostToDevice);
hipLaunchKernelGGL(( gradient_rand) , dim3(grid), dim3(block), 0, 0, imgOut_cuda, cuda_v1, cuda_v2, w, h, nc );
//hipMemcpy(imgOut, cuda_v1 , w*h*nc*sizeof(float) , hipMemcpyDeviceToHost);
// GRADIENT CALCULATION END
float *M_out_cuda;
hipMalloc((void**)&M_out_cuda , h*w*nc*sizeof(float));
hipMemcpy(M_out_cuda, imgOut , h*w*nc*sizeof(float) , hipMemcpyHostToDevice);
hipLaunchKernelGGL(( M_calculation) , dim3(grid), dim3(block) , 0, 0, M_out_cuda, cuda_v1, cuda_v2, w, h, nc );
//hipMemcpy(imgOut, M_out_cuda , w*h*nc*sizeof(float) , hipMemcpyDeviceToHost);
float *T_out_cuda;
hipMalloc((void**)&T_out_cuda , h*w*nc*sizeof(float));
hipMemcpy(T_out_cuda, imgOut , h*w*nc*sizeof(float) , hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_convolution) , dim3(grid), dim3(block) , 0, 0, M_out_cuda, T_out_cuda, kernel_cuda, w, h, nc, radius );
hipMemcpy(imgOut, T_out_cuda , w*h*nc*sizeof(float) , hipMemcpyDeviceToHost);
if (nc == 1){
cv::Mat m11(h,w,CV_32FC1);
convert_layered_to_mat(m11 , imgOut);
showImage("m11", 10*m11, 100+w, 100);
}
else if (nc ==3){
float *m11_flat = new float[(size_t)w*h*1];
float *m12_flat = new float[(size_t)w*h*1];
float *m22_flat = new float[(size_t)w*h*1];
for (int j=0 ; j<h; j++){
for (int i=0 ; i<w; i++){
m11_flat[i + w*j] = imgOut[i + w*j + 0*w*h];
m12_flat[i + w*j] = imgOut[i + w*j + 1*w*h];
m22_flat[i + w*j] = imgOut[i + w*j + 2*w*h];
}
}
cv::Mat m11(h,w,CV_32FC1);
cv::Mat m12(h,w,CV_32FC1);
cv::Mat m22(h,w,CV_32FC1);
convert_layered_to_mat(m11 , m11_flat);
convert_layered_to_mat(m12 , m12_flat);
convert_layered_to_mat(m22 , m22_flat);
showImage("m11", 10*m11, 100+w, 100);
showImage("m12", 10*m12, 100, 100+h);
showImage("m22", 10*m22, 100+w, 100+h);
delete[] m11_flat;
delete[] m12_flat;
delete[] m22_flat;
}
showImage("Input", mIn, 100, 100);
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
delete[] kernel;
hipFree(imgOut_cuda);
hipFree(imgIn_cuda);
hipFree(kernel_cuda);
hipFree(cuda_v1);
hipFree(cuda_v2);
hipFree(T_out_cuda);
hipFree(M_out_cuda);
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
| 757285872a678f5862e085ca207c8c1da29d84b6.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include "helper.h"
#include <iostream>
#include <math.h>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__device__ void eigenvalues (float m11,float m12,float m22, float* lambda){
float T = m11 + m22;
float D = m11*m22 - m12*m12;
lambda[1] = (float)T/2.f + sqrt(pow((float)T,2.f)/4.f - (float)D);
lambda[2] = (float)T/2.f - sqrt(pow((float)T,2.f)/4.f - (float)D);
}
__global__ void T_eigenvalues(float* imgIn_cuda, float* imgOut_cuda,float* T_out_cuda, int w, int h, int nc, float alpha, float beta){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = 0;
float *lambda = new float[2];
float m11, m12, m22;
if (t_numx < w && t_numy < h && t_numz < nc){
m11 = T_out_cuda[t_numx + w*t_numy + w*h*0];
m12 = T_out_cuda[t_numx + w*t_numy + w*h*1];
m22 = T_out_cuda[t_numx + w*t_numy + w*h*2];
eigenvalues(m11, m12, m22, lambda);
if (lambda[2] >= lambda[1] && lambda[1] >= alpha){
imgOut_cuda[t_numx + w*t_numy + w*h*0] = 255;
imgOut_cuda[t_numx + w*t_numy + w*h*1] = 0;
imgOut_cuda[t_numx + w*t_numy + w*h*2] = 0;
}
else if (lambda[1] >= alpha && alpha >=beta && beta >= lambda[2]){
imgOut_cuda[t_numx + w*t_numy + w*h*0] = 255;
imgOut_cuda[t_numx + w*t_numy + w*h*1] = 255;
imgOut_cuda[t_numx + w*t_numy + w*h*2] = 0;
}
else{
imgOut_cuda[t_numx + w*t_numy + w*h*0] = 0.5*imgIn_cuda[t_numx + w*t_numy + w*h*0];
imgOut_cuda[t_numx + w*t_numy + w*h*1] = 0.5*imgIn_cuda[t_numx + w*t_numy + w*h*1];
imgOut_cuda[t_numx + w*t_numy + w*h*2] = 0.5*imgIn_cuda[t_numx + w*t_numy + w*h*2];
}
}
}
__global__ void gradient_fd(float* imgOut_cuda,float* cuda_v1,float* cuda_v2,int w,int h,int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
if (t_numx + 1 < w && t_numy < h && t_numz < nc){
cuda_v1[t_numx + w*t_numy + w*h*t_numz] = imgOut_cuda[t_numx + 1 + w*t_numy + w*h*t_numz] - imgOut_cuda[t_numx + w*t_numy + w*h*t_numz];
}
if (t_numx < w && t_numy + 1< h && t_numz < nc){
cuda_v2[t_numx + w*t_numy + w*h*t_numz] = imgOut_cuda[t_numx + w*(t_numy+1) + w*h*t_numz] - imgOut_cuda[t_numx + w*t_numy + w*h*t_numz];
}
}
__global__ void gradient_rand(float* imgOut_cuda,float* cuda_v1,float* cuda_v2,int w,int h,int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
if (t_numx < w && t_numy < h && t_numz < nc){
int x_neg = t_numx-1;
int x_pos = t_numx+1;
int y_neg = t_numy-1;
int y_pos = t_numy+1;
if (x_neg<0) x_neg = 0;
if (x_pos>=w) x_pos = w-1;
if (y_neg<0) y_neg = 0;
if (y_pos>=h) y_pos = h-1;
cuda_v1[t_numx + w*t_numy + w*h*t_numz] = 1.f/32.f*(3*imgOut_cuda[x_pos + w*y_pos + w*h*t_numz]
+ 10*imgOut_cuda[x_pos + w*t_numy + w*h*t_numz]
+ 3*imgOut_cuda[x_pos + w*y_neg + w*h*t_numz]
- 3*imgOut_cuda[x_neg + w*y_pos + w*h*t_numz]
- 10*imgOut_cuda[x_neg + w*t_numy + w*h*t_numz]
- 3*imgOut_cuda[x_neg + w*y_neg + w*h*t_numz]) ;
cuda_v2[t_numx + w*t_numy + w*h*t_numz] = 1.f/32.f*( 3*imgOut_cuda[x_pos + w*y_pos + w*h*t_numz]
+ 10*imgOut_cuda[t_numx + w*y_pos + w*h*t_numz]
+ 3*imgOut_cuda[x_neg + w*y_pos + w*h*t_numz]
- 3*imgOut_cuda[x_pos + w*y_neg + w*h*t_numz]
- 10*imgOut_cuda[t_numx + w*y_neg + w*h*t_numz]
- 3*imgOut_cuda[x_neg + w*y_neg + w*h*t_numz]) ;
}
}
__host__ void gaussian_kernel(float *kernel, float sigma, int radius){
float sum = 0 ;
for(int j = -radius; j<=radius ; j++){
for (int i=-radius; i<=radius; i++){
int index = i+radius + (2*radius+1)*(j+radius);
kernel[index] = 0.5/3.14159/pow(sigma,2.0)*pow(2.71828,-(pow(i,2) + pow(j,2))/2/(pow(sigma,2)));
sum = sum + kernel[index];
}
}
for (int i=0; i<(2*radius + 1)*(2*radius + 1); i++){
kernel[i] = kernel[i] / sum;
}
}
__global__ void cuda_convolution(float *imgIn_cuda, float* imgOut_cuda, float* kernel_cuda, int w, int h, int nc, int radius){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
if (t_numx < w && t_numy < h && t_numz < nc){
int index = t_numx + w*t_numy + w*h*t_numz;
//imgOut_cuda[index] = 0;
for (int p = -radius; p <= radius; p++) {
for (int q =-radius; q <= radius; q++) {
int temp_j = t_numy + p;
int temp_i = t_numx + q;
if (temp_i<0) temp_i = 0;
if (temp_i>=w) temp_i = w-1;
if (temp_j<0) temp_j = 0;
if (temp_j>=h) temp_j = h-1;
int image_index = temp_i + temp_j*w + t_numz*h*w;
int kernel_index = q+radius + (2*radius+1)*(p+radius);
imgOut_cuda[index] += imgIn_cuda[image_index] * kernel_cuda[kernel_index];
}
}
}
}
__global__ void M_calculation(float *M_out_cuda, float* cuda_v1, float* cuda_v2, int w, int h, int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
int index = t_numx + w*t_numy + w*h*t_numz;
if (t_numx < w && t_numy < h && t_numz < nc){
M_out_cuda[t_numx + w*t_numy + w*h*t_numz] += cuda_v1[index]*cuda_v1[index];
M_out_cuda[t_numx + w*t_numy + w*h*t_numz] += cuda_v1[index]*cuda_v2[index];
M_out_cuda[t_numx + w*t_numy + w*h*t_numz] += cuda_v2[index]*cuda_v2[index];
}
}
int main(int argc, char **argv)
{
cudaDeviceSynchronize(); CUDA_CHECK;
#ifdef CAMERA
#else
// input image
string image = "";
float sigma = 1;
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
bool ret2 = getParam("sigma", sigma, argc, argv);
if (!ret2) cerr << "ERROR: no sigma specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> -sigma <sigma> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
convert_mat_to_layered (imgIn, mIn);
// CONVOLUTION ON GPU......START
int radius = ceil(3*sigma);
int total_ele_filter = (int)pow(2*radius + 1, 2);
float* kernel = new float[total_ele_filter];
gaussian_kernel(kernel, sigma, radius);
float *kernel_cuda, *imgIn_cuda, *imgOut_cuda;
cudaMalloc((void**)&kernel_cuda, total_ele_filter*sizeof(float));
cudaMemcpy(kernel_cuda, kernel, total_ele_filter*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&imgIn_cuda , h*w*nc*sizeof(float));
cudaMalloc((void**)&imgOut_cuda , h*w*nc*sizeof(float));
cudaMemset(&imgOut , 0, h*w*nc*sizeof(float));
cudaMemcpy(imgIn_cuda, imgIn , h*w*nc*sizeof(float) , cudaMemcpyHostToDevice);
cudaMemcpy(imgOut_cuda, imgOut , h*w*nc*sizeof(float) , cudaMemcpyHostToDevice);
dim3 block = dim3(32,32,1);
int grid_x = ((w + block.x - 1)/block.x);
int grid_y = ((h + block.y - 1)/block.y);
int grid_z = ((nc + block.z - 1)/block.z);
dim3 grid = dim3(grid_x, grid_y, grid_z );
cuda_convolution <<<grid, block>>> (imgIn_cuda, imgOut_cuda, kernel_cuda, w, h, nc, radius);
//cudaMemcpy(imgOut, imgOut_cuda , w*h*nc*sizeof(float) , cudaMemcpyDeviceToHost);
// CONVOLUTION ON GPU......END
// GRADIENT CALCULATION START
int array_size = w*h*nc;
float* cuda_v1;
float* cuda_v2;
cudaMalloc((void**) &cuda_v1, array_size*sizeof(float));
cudaMalloc((void**) &cuda_v2, array_size*sizeof(float));
cudaMemcpy(cuda_v1, imgOut, array_size*sizeof(float) , cudaMemcpyHostToDevice);
cudaMemcpy(cuda_v2, imgOut , array_size*sizeof(float) , cudaMemcpyHostToDevice);
gradient_rand <<<grid, block>>>(imgOut_cuda, cuda_v1, cuda_v2, w, h, nc );
//cudaMemcpy(imgOut, cuda_v1 , w*h*nc*sizeof(float) , cudaMemcpyDeviceToHost);
// GRADIENT CALCULATION END
float *M_out_cuda;
cudaMalloc((void**)&M_out_cuda , h*w*nc*sizeof(float));
cudaMemcpy(M_out_cuda, imgOut , h*w*nc*sizeof(float) , cudaMemcpyHostToDevice);
M_calculation <<< grid, block >>> (M_out_cuda, cuda_v1, cuda_v2, w, h, nc );
//cudaMemcpy(imgOut, M_out_cuda , w*h*nc*sizeof(float) , cudaMemcpyDeviceToHost);
float *T_out_cuda;
cudaMalloc((void**)&T_out_cuda , h*w*nc*sizeof(float));
cudaMemcpy(T_out_cuda, imgOut , h*w*nc*sizeof(float) , cudaMemcpyHostToDevice);
cuda_convolution <<< grid, block >>> (M_out_cuda, T_out_cuda, kernel_cuda, w, h, nc, radius );
cudaMemcpy(imgOut, T_out_cuda , w*h*nc*sizeof(float) , cudaMemcpyDeviceToHost);
if (nc == 1){
cv::Mat m11(h,w,CV_32FC1);
convert_layered_to_mat(m11 , imgOut);
showImage("m11", 10*m11, 100+w, 100);
}
else if (nc ==3){
float *m11_flat = new float[(size_t)w*h*1];
float *m12_flat = new float[(size_t)w*h*1];
float *m22_flat = new float[(size_t)w*h*1];
for (int j=0 ; j<h; j++){
for (int i=0 ; i<w; i++){
m11_flat[i + w*j] = imgOut[i + w*j + 0*w*h];
m12_flat[i + w*j] = imgOut[i + w*j + 1*w*h];
m22_flat[i + w*j] = imgOut[i + w*j + 2*w*h];
}
}
cv::Mat m11(h,w,CV_32FC1);
cv::Mat m12(h,w,CV_32FC1);
cv::Mat m22(h,w,CV_32FC1);
convert_layered_to_mat(m11 , m11_flat);
convert_layered_to_mat(m12 , m12_flat);
convert_layered_to_mat(m22 , m22_flat);
showImage("m11", 10*m11, 100+w, 100);
showImage("m12", 10*m12, 100, 100+h);
showImage("m22", 10*m22, 100+w, 100+h);
delete[] m11_flat;
delete[] m12_flat;
delete[] m22_flat;
}
showImage("Input", mIn, 100, 100);
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
delete[] kernel;
cudaFree(imgOut_cuda);
cudaFree(imgIn_cuda);
cudaFree(kernel_cuda);
cudaFree(cuda_v1);
cudaFree(cuda_v2);
cudaFree(T_out_cuda);
cudaFree(M_out_cuda);
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
ea4d07ce1cda9f7d15dda23f5e8de9e9242b9662.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===================================
//
// GPU
//===================================
#include "stdafx.h"
#include "IODataLayerGPU_base.cuh"
#include<vector>
#include<list>
#include<algorithm>
// UUID
#include<boost/uuid/uuid_generators.hpp>
#define BLOCK_SIZE (16)
using namespace Gravisbell;
namespace
{
/** . */
__global__ void cuda_func_calculateError(const F32* i_lpOutputBuffer, const F32* i_lpTeachBuffer, F32* o_lpErrorMax, F32* o_lpErrorAve, F32* o_lpErrorAve2, F32* o_lpErrorCrossEntropy, U32 i_bachNum, U32 i_bufferSize)
{
const U32 inputNum = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(inputNum >= i_bufferSize) // warp...
return;
const U32 bufferPos = i_bachNum * i_bufferSize + inputNum;
F32 teach = i_lpTeachBuffer[bufferPos];
F32 output = i_lpOutputBuffer[bufferPos];
F32 error = (teach - output);
F32 error_abs = abs(error);
F32 crossEntropy = -(F32)(
teach * log(max(0.0001, output)) +
(1 - teach) * log(max(0.0001,1-output))
);
//
o_lpErrorMax[inputNum] = max(o_lpErrorMax[inputNum], error_abs);
o_lpErrorAve[inputNum] += error_abs;
o_lpErrorAve2[inputNum] += error_abs * error_abs;
o_lpErrorCrossEntropy[inputNum] += crossEntropy;
}
}
namespace Gravisbell {
namespace Layer {
namespace IOData {
/** */
IODataLayerGPU_base::IODataLayerGPU_base(Gravisbell::GUID guid, Gravisbell::IODataStruct ioDataStruct)
: guid (guid)
, ioDataStruct (ioDataStruct)
, lpBatchDataNoList (NULL)
, calcErrorCount (0)
{
hipblasCreate(&cublasHandle);
}
/** */
IODataLayerGPU_base::~IODataLayerGPU_base()
{
hipblasDestroy(cublasHandle);
}
//===========================
//
//===========================
/** .
@return 0 */
ErrorCode IODataLayerGPU_base::Initialize(void)
{
return ErrorCode::ERROR_CODE_NONE;
}
//==============================
//
//==============================
/** */
U32 IODataLayerGPU_base::GetLayerKind()const
{
return ELayerKind::LAYER_KIND_GPU | ELayerKind::LAYER_KIND_SINGLE_INPUT | ELayerKind::LAYER_KIND_SINGLE_OUTPUT | ELayerKind::LAYER_KIND_DATA;
}
/** GUID */
Gravisbell::GUID IODataLayerGPU_base::GetGUID(void)const
{
return this->guid;
}
/** .
@param o_layerCode
@return 0 */
Gravisbell::GUID IODataLayerGPU_base::GetLayerCode(void)const
{
Gravisbell::GUID layerCode;
Gravisbell::Layer::IOData::GetLayerCode(layerCode);
return layerCode;
}
/** */
const SettingData::Standard::IData* IODataLayerGPU_base::GetLayerStructure()const
{
return NULL;
}
//==============================
//
//==============================
/** */
IODataStruct IODataLayerGPU_base::GetDataStruct()const
{
return this->ioDataStruct;
}
/** .
@return .F32. */
U32 IODataLayerGPU_base::GetBufferCount()const
{
return this->ioDataStruct.GetDataCount();
}
//==============================
//
//==============================
/** .()
@param batchSize .
NN.
PreProcessLearnLoop. */
Gravisbell::ErrorCode IODataLayerGPU_base::PreProcessLearn(U32 batchSize)
{
//
ErrorCode err = PreProcessCalculate(batchSize);
if(err != ErrorCode::ERROR_CODE_NONE)
return err;
//
this->lpDInputBuffer.resize(batchSize * this->GetBufferCount());
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
/** .()
@param batchSize .
NN.
Calculate. */
Gravisbell::ErrorCode IODataLayerGPU_base::PreProcessCalculate(U32 batchSize)
{
//
this->batchSize = batchSize;
//
this->lpOutputBuffer.resize(batchSize * this->GetBufferCount());
//
this->lpErrorValue_max.resize(this->GetBufferCount());
this->lpErrorValue_ave.resize(this->GetBufferCount());
this->lpErrorValue_ave2.resize(this->GetBufferCount());
this->lpErrorValue_crossEntropy.resize(this->GetBufferCount());
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
/** .
Calculate. */
ErrorCode IODataLayerGPU_base::PreProcessLoop()
{
this->calcErrorCount = 0;
hipMemset(thrust::raw_pointer_cast(&this->lpErrorValue_max[0]), 0, sizeof(F32)*this->lpErrorValue_max.size());
hipMemset(thrust::raw_pointer_cast(&this->lpErrorValue_ave[0]), 0, sizeof(F32)*this->lpErrorValue_max.size());
hipMemset(thrust::raw_pointer_cast(&this->lpErrorValue_ave2[0]), 0, sizeof(F32)*this->lpErrorValue_max.size());
hipMemset(thrust::raw_pointer_cast(&this->lpErrorValue_crossEntropy[0]), 0, sizeof(F32)*this->lpErrorValue_max.size());
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
/** .
@return */
U32 IODataLayerGPU_base::GetBatchSize()const
{
return this->batchSize;
}
//==============================
//
//==============================
/** .
@param i_lppInputBuffer . [GetBatchSize()][GetInputBufferCount()] */
Gravisbell::ErrorCode IODataLayerGPU_base::CalculateLearnError(Gravisbell::CONST_BATCH_BUFFER_POINTER i_lppInputBuffer)
{
U32 inputBufferCount = this->GetInputBufferCount();
if(this->lpDInputBuffer.size())
{
//
this->lpDInputBuffer = this->lpOutputBuffer;
//
{
float alpha = -1.0f;
// y = alphat * x + y;
hipblasSaxpy(
this->cublasHandle,
inputBufferCount * this->batchSize,
&alpha,
i_lppInputBuffer,
1,
thrust::raw_pointer_cast(&this->lpDInputBuffer[0]),
1);
}
}
for(U32 batchNum=0; batchNum<this->batchSize; batchNum++)
{
U32 bufferCount = this->GetBufferCount();
dim3 grid((bufferCount +(BLOCK_SIZE - 1))/BLOCK_SIZE , 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( cuda_func_calculateError), dim3(grid), dim3(block), 0, 0,
i_lppInputBuffer,
thrust::raw_pointer_cast(&this->lpOutputBuffer[0]),
thrust::raw_pointer_cast(&this->lpErrorValue_max[0]),
thrust::raw_pointer_cast(&this->lpErrorValue_ave[0]),
thrust::raw_pointer_cast(&this->lpErrorValue_ave2[0]),
thrust::raw_pointer_cast(&this->lpErrorValue_crossEntropy[0]),
batchNum,
this->GetBufferCount());
this->calcErrorCount++;
}
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
/** .
CalculateLearnError()1.
@param o_min .
@param o_max .
@param o_ave .
@param o_ave2 . */
ErrorCode IODataLayerGPU_base::GetCalculateErrorValue(F32& o_max, F32& o_ave, F32& o_ave2, F32& o_crossEntropy)
{
o_max = 0.0f;
o_ave = 0.0f;
o_ave2 = 0.0f;
o_crossEntropy = 0.0f;
for(U32 inputNum=0; inputNum<this->GetBufferCount(); inputNum++)
{
F32 errorValue_max = this->lpErrorValue_max[inputNum];
F32 errorValue_ave = this->lpErrorValue_ave[inputNum];
F32 errorValue_ave2 = this->lpErrorValue_ave2[inputNum];
F32 errorValue_crossEntropy = this->lpErrorValue_crossEntropy[inputNum];
o_max = max(o_max, errorValue_max);
o_ave += errorValue_ave;
o_ave2 += errorValue_ave2;
o_crossEntropy += errorValue_crossEntropy;
}
o_ave = o_ave / this->calcErrorCount / this->GetBufferCount();
o_ave2 = (F32)sqrt(o_ave2 / this->calcErrorCount / this->GetBufferCount());
o_crossEntropy = o_crossEntropy / this->calcErrorCount / this->GetBufferCount();
return ErrorCode::ERROR_CODE_NONE;
}
/** .
.
CalculateLearnError()1.
[GetBufferCount()].
@param o_lpMin .
@param o_lpMax .
@param o_lpAve .
@param o_lpAve2 . */
ErrorCode IODataLayerGPU_base::GetCalculateErrorValueDetail(F32 o_lpMax[], F32 o_lpAve[], F32 o_lpAve2[])
{
for(U32 inputNum=0; inputNum<this->GetBufferCount(); inputNum++)
{
o_lpMax[inputNum] = this->lpErrorValue_max[inputNum];
o_lpAve[inputNum] += this->lpErrorValue_ave[inputNum] / this->GetDataCount();
o_lpAve2[inputNum] += (F32)sqrt(this->lpErrorValue_ave2[inputNum] / this->GetDataCount());
}
return ErrorCode::ERROR_CODE_NONE;
}
/** .
@return */
IODataStruct IODataLayerGPU_base::GetInputDataStruct()const
{
return this->GetDataStruct();
}
/** . byte */
U32 IODataLayerGPU_base::GetInputBufferCount()const
{
return this->GetBufferCount();
}
/** .
GetInputBufferCount.
@return */
CONST_BATCH_BUFFER_POINTER IODataLayerGPU_base::GetDInputBuffer()const
{
return thrust::raw_pointer_cast(&this->lpDInputBuffer[0]);
}
/** .
@param lpDOutputBuffer .[GetBatchSize()][GetInputBufferCount()] */
Gravisbell::ErrorCode IODataLayerGPU_base::GetDInputBuffer(BATCH_BUFFER_POINTER o_lpDInputBuffer)const
{
if(o_lpDInputBuffer == NULL)
return ErrorCode::ERROR_CODE_COMMON_NULL_REFERENCE;
const U32 batchSize = this->GetBatchSize();
const U32 inputBufferCount = this->GetOutputBufferCount();
hipMemcpy(o_lpDInputBuffer, this->GetDInputBuffer(), sizeof(F32)*batchSize*inputBufferCount, hipMemcpyDeviceToHost);
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
//==============================
//
//==============================
/** */
IODataStruct IODataLayerGPU_base::GetOutputDataStruct()const
{
return this->GetDataStruct();
}
/** . byte */
U32 IODataLayerGPU_base::GetOutputBufferCount()const
{
return this->GetBufferCount();
}
/** .
GetOutputBufferCount.
@return */
CONST_BATCH_BUFFER_POINTER IODataLayerGPU_base::GetOutputBuffer()const
{
return thrust::raw_pointer_cast(&this->lpOutputBuffer[0]);
}
/** .
@param o_lpOutputBuffer . [GetBatchSize()][GetOutputBufferCount()]
@return 0 */
Gravisbell::ErrorCode IODataLayerGPU_base::GetOutputBuffer(BATCH_BUFFER_POINTER o_lpOutputBuffer)const
{
if(o_lpOutputBuffer == NULL)
return ErrorCode::ERROR_CODE_COMMON_NULL_REFERENCE;
const U32 batchSize = this->GetBatchSize();
const U32 outputBufferCount = this->GetOutputBufferCount();
hipMemcpy(o_lpOutputBuffer, this->GetOutputBuffer(), sizeof(F32)*batchSize*outputBufferCount, hipMemcpyDeviceToHost);
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
} // IOData
} // Layer
} // Gravisbell
| ea4d07ce1cda9f7d15dda23f5e8de9e9242b9662.cu | //===================================
// 入出力データを管理するクラス
// GPU制御
//===================================
#include "stdafx.h"
#include "IODataLayerGPU_base.cuh"
#include<vector>
#include<list>
#include<algorithm>
// UUID関連用
#include<boost/uuid/uuid_generators.hpp>
#define BLOCK_SIZE (16)
using namespace Gravisbell;
namespace
{
/** ベクトルの要素同士の掛け算. */
__global__ void cuda_func_calculateError(const F32* i_lpOutputBuffer, const F32* i_lpTeachBuffer, F32* o_lpErrorMax, F32* o_lpErrorAve, F32* o_lpErrorAve2, F32* o_lpErrorCrossEntropy, U32 i_bachNum, U32 i_bufferSize)
{
const U32 inputNum = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(inputNum >= i_bufferSize) // 分岐するが末尾のwarpだけなので、処理速度に影響はないはず...
return;
const U32 bufferPos = i_bachNum * i_bufferSize + inputNum;
F32 teach = i_lpTeachBuffer[bufferPos];
F32 output = i_lpOutputBuffer[bufferPos];
F32 error = (teach - output);
F32 error_abs = abs(error);
F32 crossEntropy = -(F32)(
teach * log(max(0.0001, output)) +
(1 - teach) * log(max(0.0001,1-output))
);
// 誤差を保存
o_lpErrorMax[inputNum] = max(o_lpErrorMax[inputNum], error_abs);
o_lpErrorAve[inputNum] += error_abs;
o_lpErrorAve2[inputNum] += error_abs * error_abs;
o_lpErrorCrossEntropy[inputNum] += crossEntropy;
}
}
namespace Gravisbell {
namespace Layer {
namespace IOData {
/** コンストラクタ */
IODataLayerGPU_base::IODataLayerGPU_base(Gravisbell::GUID guid, Gravisbell::IODataStruct ioDataStruct)
: guid (guid)
, ioDataStruct (ioDataStruct)
, lpBatchDataNoList (NULL)
, calcErrorCount (0)
{
cublasCreate(&cublasHandle);
}
/** デストラクタ */
IODataLayerGPU_base::~IODataLayerGPU_base()
{
cublasDestroy(cublasHandle);
}
//===========================
// 初期化
//===========================
/** 初期化. 各ニューロンの値をランダムに初期化
@return 成功した場合0 */
ErrorCode IODataLayerGPU_base::Initialize(void)
{
return ErrorCode::ERROR_CODE_NONE;
}
//==============================
// レイヤー共通系
//==============================
/** レイヤー種別の取得 */
U32 IODataLayerGPU_base::GetLayerKind()const
{
return ELayerKind::LAYER_KIND_GPU | ELayerKind::LAYER_KIND_SINGLE_INPUT | ELayerKind::LAYER_KIND_SINGLE_OUTPUT | ELayerKind::LAYER_KIND_DATA;
}
/** レイヤー固有のGUIDを取得する */
Gravisbell::GUID IODataLayerGPU_base::GetGUID(void)const
{
return this->guid;
}
/** レイヤー種別識別コードを取得する.
@param o_layerCode 格納先バッファ
@return 成功した場合0 */
Gravisbell::GUID IODataLayerGPU_base::GetLayerCode(void)const
{
Gravisbell::GUID layerCode;
Gravisbell::Layer::IOData::GetLayerCode(layerCode);
return layerCode;
}
/** レイヤーの設定情報を取得する */
const SettingData::Standard::IData* IODataLayerGPU_base::GetLayerStructure()const
{
return NULL;
}
//==============================
// データ管理系
//==============================
/** データの構造情報を取得する */
IODataStruct IODataLayerGPU_base::GetDataStruct()const
{
return this->ioDataStruct;
}
/** データのバッファサイズを取得する.
@return データのバッファサイズ.使用するF32型配列の要素数. */
U32 IODataLayerGPU_base::GetBufferCount()const
{
return this->ioDataStruct.GetDataCount();
}
//==============================
// レイヤー共通系
//==============================
/** 演算前処理を実行する.(学習用)
@param batchSize 同時に演算を行うバッチのサイズ.
NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない.
失敗した場合はPreProcessLearnLoop以降の処理は実行不可. */
Gravisbell::ErrorCode IODataLayerGPU_base::PreProcessLearn(U32 batchSize)
{
// 通常の演算用の処理を実行
ErrorCode err = PreProcessCalculate(batchSize);
if(err != ErrorCode::ERROR_CODE_NONE)
return err;
// 誤差差分データ配列の初期化
this->lpDInputBuffer.resize(batchSize * this->GetBufferCount());
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
/** 演算前処理を実行する.(演算用)
@param batchSize 同時に演算を行うバッチのサイズ.
NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない.
失敗した場合はCalculate以降の処理は実行不可. */
Gravisbell::ErrorCode IODataLayerGPU_base::PreProcessCalculate(U32 batchSize)
{
// バッチサイズの保存
this->batchSize = batchSize;
// バッファの確保とバッチ処理データ配列の初期化
this->lpOutputBuffer.resize(batchSize * this->GetBufferCount());
// 誤差計算用のバッファを初期化
this->lpErrorValue_max.resize(this->GetBufferCount());
this->lpErrorValue_ave.resize(this->GetBufferCount());
this->lpErrorValue_ave2.resize(this->GetBufferCount());
this->lpErrorValue_crossEntropy.resize(this->GetBufferCount());
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
/** ループの初期化処理.データセットの実行開始前に実行する
失敗した場合はCalculate以降の処理は実行不可. */
ErrorCode IODataLayerGPU_base::PreProcessLoop()
{
this->calcErrorCount = 0;
cudaMemset(thrust::raw_pointer_cast(&this->lpErrorValue_max[0]), 0, sizeof(F32)*this->lpErrorValue_max.size());
cudaMemset(thrust::raw_pointer_cast(&this->lpErrorValue_ave[0]), 0, sizeof(F32)*this->lpErrorValue_max.size());
cudaMemset(thrust::raw_pointer_cast(&this->lpErrorValue_ave2[0]), 0, sizeof(F32)*this->lpErrorValue_max.size());
cudaMemset(thrust::raw_pointer_cast(&this->lpErrorValue_crossEntropy[0]), 0, sizeof(F32)*this->lpErrorValue_max.size());
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
/** バッチサイズを取得する.
@return 同時に演算を行うバッチのサイズ */
U32 IODataLayerGPU_base::GetBatchSize()const
{
return this->batchSize;
}
//==============================
// 入力系
//==============================
/** 学習誤差を計算する.
@param i_lppInputBuffer 入力データバッファ. [GetBatchSize()の戻り値][GetInputBufferCount()の戻り値]の要素数が必要 */
Gravisbell::ErrorCode IODataLayerGPU_base::CalculateLearnError(Gravisbell::CONST_BATCH_BUFFER_POINTER i_lppInputBuffer)
{
U32 inputBufferCount = this->GetInputBufferCount();
if(this->lpDInputBuffer.size())
{
// データをコピー
this->lpDInputBuffer = this->lpOutputBuffer;
// データの誤差を計算
{
float alpha = -1.0f;
// y = alphat * x + y;
cublasSaxpy(
this->cublasHandle,
inputBufferCount * this->batchSize,
&alpha,
i_lppInputBuffer,
1,
thrust::raw_pointer_cast(&this->lpDInputBuffer[0]),
1);
}
}
for(U32 batchNum=0; batchNum<this->batchSize; batchNum++)
{
U32 bufferCount = this->GetBufferCount();
dim3 grid((bufferCount +(BLOCK_SIZE - 1))/BLOCK_SIZE , 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
cuda_func_calculateError<<<grid, block>>>(
i_lppInputBuffer,
thrust::raw_pointer_cast(&this->lpOutputBuffer[0]),
thrust::raw_pointer_cast(&this->lpErrorValue_max[0]),
thrust::raw_pointer_cast(&this->lpErrorValue_ave[0]),
thrust::raw_pointer_cast(&this->lpErrorValue_ave2[0]),
thrust::raw_pointer_cast(&this->lpErrorValue_crossEntropy[0]),
batchNum,
this->GetBufferCount());
this->calcErrorCount++;
}
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
/** 誤差の値を取得する.
CalculateLearnError()を1回以上実行していない場合、正常に動作しない.
@param o_min 最小誤差.
@param o_max 最大誤差.
@param o_ave 平均誤差.
@param o_ave2 平均二乗誤差. */
ErrorCode IODataLayerGPU_base::GetCalculateErrorValue(F32& o_max, F32& o_ave, F32& o_ave2, F32& o_crossEntropy)
{
o_max = 0.0f;
o_ave = 0.0f;
o_ave2 = 0.0f;
o_crossEntropy = 0.0f;
for(U32 inputNum=0; inputNum<this->GetBufferCount(); inputNum++)
{
F32 errorValue_max = this->lpErrorValue_max[inputNum];
F32 errorValue_ave = this->lpErrorValue_ave[inputNum];
F32 errorValue_ave2 = this->lpErrorValue_ave2[inputNum];
F32 errorValue_crossEntropy = this->lpErrorValue_crossEntropy[inputNum];
o_max = max(o_max, errorValue_max);
o_ave += errorValue_ave;
o_ave2 += errorValue_ave2;
o_crossEntropy += errorValue_crossEntropy;
}
o_ave = o_ave / this->calcErrorCount / this->GetBufferCount();
o_ave2 = (F32)sqrt(o_ave2 / this->calcErrorCount / this->GetBufferCount());
o_crossEntropy = o_crossEntropy / this->calcErrorCount / this->GetBufferCount();
return ErrorCode::ERROR_CODE_NONE;
}
/** 詳細な誤差の値を取得する.
各入出力の値毎に誤差を取る.
CalculateLearnError()を1回以上実行していない場合、正常に動作しない.
各配列の要素数は[GetBufferCount()]以上である必要がある.
@param o_lpMin 最小誤差.
@param o_lpMax 最大誤差.
@param o_lpAve 平均誤差.
@param o_lpAve2 平均二乗誤差. */
ErrorCode IODataLayerGPU_base::GetCalculateErrorValueDetail(F32 o_lpMax[], F32 o_lpAve[], F32 o_lpAve2[])
{
for(U32 inputNum=0; inputNum<this->GetBufferCount(); inputNum++)
{
o_lpMax[inputNum] = this->lpErrorValue_max[inputNum];
o_lpAve[inputNum] += this->lpErrorValue_ave[inputNum] / this->GetDataCount();
o_lpAve2[inputNum] += (F32)sqrt(this->lpErrorValue_ave2[inputNum] / this->GetDataCount());
}
return ErrorCode::ERROR_CODE_NONE;
}
/** 入力データ構造を取得する.
@return 入力データ構造 */
IODataStruct IODataLayerGPU_base::GetInputDataStruct()const
{
return this->GetDataStruct();
}
/** 入力バッファ数を取得する. byte数では無くデータの数なので注意 */
U32 IODataLayerGPU_base::GetInputBufferCount()const
{
return this->GetBufferCount();
}
/** 学習差分を取得する.
配列の要素数はGetInputBufferCountの戻り値.
@return 誤差差分配列の先頭ポインタ */
CONST_BATCH_BUFFER_POINTER IODataLayerGPU_base::GetDInputBuffer()const
{
return thrust::raw_pointer_cast(&this->lpDInputBuffer[0]);
}
/** 学習差分を取得する.
@param lpDOutputBuffer 学習差分を格納する配列.[GetBatchSize()の戻り値][GetInputBufferCount()の戻り値]の配列が必要 */
Gravisbell::ErrorCode IODataLayerGPU_base::GetDInputBuffer(BATCH_BUFFER_POINTER o_lpDInputBuffer)const
{
if(o_lpDInputBuffer == NULL)
return ErrorCode::ERROR_CODE_COMMON_NULL_REFERENCE;
const U32 batchSize = this->GetBatchSize();
const U32 inputBufferCount = this->GetOutputBufferCount();
cudaMemcpy(o_lpDInputBuffer, this->GetDInputBuffer(), sizeof(F32)*batchSize*inputBufferCount, cudaMemcpyDeviceToHost);
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
//==============================
// 出力系
//==============================
/** 出力データ構造を取得する */
IODataStruct IODataLayerGPU_base::GetOutputDataStruct()const
{
return this->GetDataStruct();
}
/** 出力バッファ数を取得する. byte数では無くデータの数なので注意 */
U32 IODataLayerGPU_base::GetOutputBufferCount()const
{
return this->GetBufferCount();
}
/** 出力データバッファを取得する.
配列の要素数はGetOutputBufferCountの戻り値.
@return 出力データ配列の先頭ポインタ */
CONST_BATCH_BUFFER_POINTER IODataLayerGPU_base::GetOutputBuffer()const
{
return thrust::raw_pointer_cast(&this->lpOutputBuffer[0]);
}
/** 出力データバッファを取得する.
@param o_lpOutputBuffer 出力データ格納先配列. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要
@return 成功した場合0 */
Gravisbell::ErrorCode IODataLayerGPU_base::GetOutputBuffer(BATCH_BUFFER_POINTER o_lpOutputBuffer)const
{
if(o_lpOutputBuffer == NULL)
return ErrorCode::ERROR_CODE_COMMON_NULL_REFERENCE;
const U32 batchSize = this->GetBatchSize();
const U32 outputBufferCount = this->GetOutputBufferCount();
cudaMemcpy(o_lpOutputBuffer, this->GetOutputBuffer(), sizeof(F32)*batchSize*outputBufferCount, cudaMemcpyDeviceToHost);
return Gravisbell::ErrorCode::ERROR_CODE_NONE;
}
} // IOData
} // Layer
} // Gravisbell
|
8355ab2802681c58f9efdbc67a1b043aafbc1fa6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/UnfoldBackward.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/HIPContext.h>
#include <vector>
// Note on naming: it is unconventional.
// grad_in does not mean that it is a gradient wrt to input,
// grad_in/grad_out is just an input/output of unfold_backward kernel.
//
// unfold_backward, the algorithm is described in
// /native/cpu/UnfoldBackwardKernel.cpp
namespace at { namespace native {
namespace {
template <int n_threads, int n_elems_per_thread, typename func_t>
C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread)
__global__ void _unfold_backward_elementwise_kernel(int total_n_elems, func_t f) {
constexpr int total_work_block = n_threads * n_elems_per_thread;
int idx = total_work_block * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < n_elems_per_thread; ++i) {
if (idx < total_n_elems) {
f(idx);
idx += n_threads;
}
}
}
template <int n_threads, int n_elems_per_thread, typename func_t>
static void _launch_unfold_backward_kernel(int total_n_elems, func_t f) {
TORCH_INTERNAL_ASSERT(
total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max()
);
dim3 block(n_threads);
constexpr int total_work_block = n_threads * n_elems_per_thread;
dim3 grid((total_n_elems + total_work_block - 1) / total_work_block);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( _unfold_backward_elementwise_kernel<n_threads, n_elems_per_thread, func_t>)
, dim3(grid), dim3(block), 0, stream, total_n_elems, f);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t>
void _unfold_backward_internal_kernel(
TensorIterator& iter,
int64_t size,
int64_t step,
int64_t grad_in_dim_stride,
int64_t grad_in_last_dim_stride,
int64_t grad_in_dim_size,
int64_t grad_out_dim_stride,
bool is_step_ge_size
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_unfold_backward_internal_kernel<scalar_t>(
sub_iter,
size,
step,
grad_in_dim_stride,
grad_in_last_dim_stride,
grad_in_dim_size,
grad_out_dim_stride,
is_step_ge_size
);
}
return;
}
char* __restrict__ grad_out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ grad_in_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ idx_dim_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
if (is_step_ge_size) {
char* __restrict__ idx_last_dim_ptr = reinterpret_cast<char*>(iter.data_ptr(3));
auto offset_calc = make_offset_calculator<4>(iter);
// this loop simply copies the data
// from proper places in grad_out to grad_in
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ grad_out_data = reinterpret_cast<scalar_t*>(grad_out_ptr + offsets[0]);
auto* __restrict__ grad_in_data = reinterpret_cast<scalar_t*>(grad_in_ptr + offsets[1]);
auto idx_dim = *reinterpret_cast<int64_t*>(idx_dim_ptr + offsets[2]);
auto idx_last_dim = *reinterpret_cast<int64_t*>(idx_last_dim_ptr + offsets[3]);
auto grad_out_idx_dim = idx_dim * step + idx_last_dim;
grad_out_data[grad_out_idx_dim * grad_out_dim_stride] = *grad_in_data;
};
_launch_unfold_backward_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
else {
auto offset_calc = make_offset_calculator<3>(iter);
// The algorithm is: for each index in grad_out find
// the elements contributing to it and sum them up.
// Note: the algorithm does not require any synchronization.
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ grad_out_data = reinterpret_cast<scalar_t*>(grad_out_ptr + offsets[0]);
auto* __restrict__ grad_in_data = reinterpret_cast<scalar_t*>(grad_in_ptr + offsets[1]);
auto idx_dim = *reinterpret_cast<int64_t*>(idx_dim_ptr + offsets[2]);
// left_fold potentially intersecting with idx_dim
// is either (idx_dim - size) / step or the next integer.
int64_t left_fold_idx = (idx_dim > size) ? (idx_dim - size) / step : 0;
if (!(left_fold_idx * step <= idx_dim && idx_dim < left_fold_idx * step + size)) {
++left_fold_idx;
}
auto right_fold_idx = idx_dim / step;
right_fold_idx = (right_fold_idx >= grad_in_dim_size) ?
(grad_in_dim_size - 1) : right_fold_idx;
for (auto fold_idx = left_fold_idx; fold_idx <= right_fold_idx; ++fold_idx) {
auto idx_last_dim = idx_dim - fold_idx * step;
*grad_out_data += grad_in_data[fold_idx * grad_in_dim_stride
+ idx_last_dim * grad_in_last_dim_stride];
}
};
_launch_unfold_backward_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}
void unfold_backward_cuda_kernel(
Tensor& grad_out,
const Tensor& grad_in,
int64_t dim,
int64_t size,
int64_t step
) {
dim = maybe_wrap_dim(dim, grad_out.dim());
// last dim stores the folds
auto last_dim = maybe_wrap_dim(-1, grad_in.dim());
auto grad_in_dim_stride = ensure_nonempty_stride(grad_in, dim);
auto grad_in_last_dim_stride = ensure_nonempty_stride(grad_in, last_dim);
auto grad_in_dim_size = ensure_nonempty_size(grad_in, dim);
auto grad_out_dim_stride = ensure_nonempty_stride(grad_out, dim);
auto is_step_ge_size = (step >= size);
TensorIterator iter =
is_step_ge_size ?
_make_unfold_backward_iter_over_grad_in(
grad_out, grad_in, dim, size, step
) :
_make_unfold_backward_iter_over_grad_out(
grad_out, grad_in, dim, size, step
);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"unfold_backward_cuda", [&] {
_unfold_backward_internal_kernel<scalar_t>(
iter,
size,
step,
grad_in_dim_stride,
grad_in_last_dim_stride,
grad_in_dim_size,
grad_out_dim_stride,
is_step_ge_size
);
}
);
}
}
REGISTER_DISPATCH(unfold_backward_stub, &unfold_backward_cuda_kernel);
}} // namespace at::native
| 8355ab2802681c58f9efdbc67a1b043aafbc1fa6.cu | #include <ATen/native/UnfoldBackward.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <vector>
// Note on naming: it is unconventional.
// grad_in does not mean that it is a gradient wrt to input,
// grad_in/grad_out is just an input/output of unfold_backward kernel.
//
// unfold_backward, the algorithm is described in
// /native/cpu/UnfoldBackwardKernel.cpp
namespace at { namespace native {
namespace {
template <int n_threads, int n_elems_per_thread, typename func_t>
C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread)
__global__ void _unfold_backward_elementwise_kernel(int total_n_elems, func_t f) {
constexpr int total_work_block = n_threads * n_elems_per_thread;
int idx = total_work_block * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < n_elems_per_thread; ++i) {
if (idx < total_n_elems) {
f(idx);
idx += n_threads;
}
}
}
template <int n_threads, int n_elems_per_thread, typename func_t>
static void _launch_unfold_backward_kernel(int total_n_elems, func_t f) {
TORCH_INTERNAL_ASSERT(
total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max()
);
dim3 block(n_threads);
constexpr int total_work_block = n_threads * n_elems_per_thread;
dim3 grid((total_n_elems + total_work_block - 1) / total_work_block);
auto stream = at::cuda::getCurrentCUDAStream();
_unfold_backward_elementwise_kernel<n_threads, n_elems_per_thread, func_t>
<<<grid, block, 0, stream>>>(total_n_elems, f);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t>
void _unfold_backward_internal_kernel(
TensorIterator& iter,
int64_t size,
int64_t step,
int64_t grad_in_dim_stride,
int64_t grad_in_last_dim_stride,
int64_t grad_in_dim_size,
int64_t grad_out_dim_stride,
bool is_step_ge_size
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_unfold_backward_internal_kernel<scalar_t>(
sub_iter,
size,
step,
grad_in_dim_stride,
grad_in_last_dim_stride,
grad_in_dim_size,
grad_out_dim_stride,
is_step_ge_size
);
}
return;
}
char* __restrict__ grad_out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ grad_in_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ idx_dim_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
if (is_step_ge_size) {
char* __restrict__ idx_last_dim_ptr = reinterpret_cast<char*>(iter.data_ptr(3));
auto offset_calc = make_offset_calculator<4>(iter);
// this loop simply copies the data
// from proper places in grad_out to grad_in
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ grad_out_data = reinterpret_cast<scalar_t*>(grad_out_ptr + offsets[0]);
auto* __restrict__ grad_in_data = reinterpret_cast<scalar_t*>(grad_in_ptr + offsets[1]);
auto idx_dim = *reinterpret_cast<int64_t*>(idx_dim_ptr + offsets[2]);
auto idx_last_dim = *reinterpret_cast<int64_t*>(idx_last_dim_ptr + offsets[3]);
auto grad_out_idx_dim = idx_dim * step + idx_last_dim;
grad_out_data[grad_out_idx_dim * grad_out_dim_stride] = *grad_in_data;
};
_launch_unfold_backward_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
else {
auto offset_calc = make_offset_calculator<3>(iter);
// The algorithm is: for each index in grad_out find
// the elements contributing to it and sum them up.
// Note: the algorithm does not require any synchronization.
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ grad_out_data = reinterpret_cast<scalar_t*>(grad_out_ptr + offsets[0]);
auto* __restrict__ grad_in_data = reinterpret_cast<scalar_t*>(grad_in_ptr + offsets[1]);
auto idx_dim = *reinterpret_cast<int64_t*>(idx_dim_ptr + offsets[2]);
// left_fold potentially intersecting with idx_dim
// is either (idx_dim - size) / step or the next integer.
int64_t left_fold_idx = (idx_dim > size) ? (idx_dim - size) / step : 0;
if (!(left_fold_idx * step <= idx_dim && idx_dim < left_fold_idx * step + size)) {
++left_fold_idx;
}
auto right_fold_idx = idx_dim / step;
right_fold_idx = (right_fold_idx >= grad_in_dim_size) ?
(grad_in_dim_size - 1) : right_fold_idx;
for (auto fold_idx = left_fold_idx; fold_idx <= right_fold_idx; ++fold_idx) {
auto idx_last_dim = idx_dim - fold_idx * step;
*grad_out_data += grad_in_data[fold_idx * grad_in_dim_stride
+ idx_last_dim * grad_in_last_dim_stride];
}
};
_launch_unfold_backward_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}
void unfold_backward_cuda_kernel(
Tensor& grad_out,
const Tensor& grad_in,
int64_t dim,
int64_t size,
int64_t step
) {
dim = maybe_wrap_dim(dim, grad_out.dim());
// last dim stores the folds
auto last_dim = maybe_wrap_dim(-1, grad_in.dim());
auto grad_in_dim_stride = ensure_nonempty_stride(grad_in, dim);
auto grad_in_last_dim_stride = ensure_nonempty_stride(grad_in, last_dim);
auto grad_in_dim_size = ensure_nonempty_size(grad_in, dim);
auto grad_out_dim_stride = ensure_nonempty_stride(grad_out, dim);
auto is_step_ge_size = (step >= size);
TensorIterator iter =
is_step_ge_size ?
_make_unfold_backward_iter_over_grad_in(
grad_out, grad_in, dim, size, step
) :
_make_unfold_backward_iter_over_grad_out(
grad_out, grad_in, dim, size, step
);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"unfold_backward_cuda", [&] {
_unfold_backward_internal_kernel<scalar_t>(
iter,
size,
step,
grad_in_dim_stride,
grad_in_last_dim_stride,
grad_in_dim_size,
grad_out_dim_stride,
is_step_ge_size
);
}
);
}
}
REGISTER_DISPATCH(unfold_backward_stub, &unfold_backward_cuda_kernel);
}} // namespace at::native
|
91480a9ddc2bf5032b30d1733907d8a7ed26c952.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* gapped_extender_gpu.cpp
*
* Created on: Aug 11, 2014
* Author: shu
*/
#include "gapped_extender_gpu.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/system/hip/experimental/pinned_allocator.h>
#include <thrust/copy.h>
#include <vector>
#include <assert.h>
#include <iostream>
#include "packed_alphabet_code.h"
#include "group_loader.h"
#include "score_matrix.h"
#include "cuda_common.h"
using namespace std;
namespace gapped_extender_gpu_kernel {
#if 0
const int debug_q_p = 119;
const int debug_db_p = 19959
+ cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
const int debug_thread_id = 0;
#endif
static const size_t kNumberBlocks = 128;
static const size_t kNumberThreads = 128;
static const size_t kLoadLength = 4;
static const size_t kDpRowLength = packed_alphabet_code::kNumberOfCodesInBlock
/ 3 + 1;
template<cuda_common::Direction TDirection>
__device__ int InitSequence(
const packed_alphabet_code::PackedAlphabetCode* sequence_cache_mem,
const packed_alphabet_code::PackedAlphabetCode* sequence_code_block,
const int sequence_position, const uint32_t sequence_delimiter,
AlphabetCoder::Code* cached_sequence) {
const packed_alphabet_code::PackedAlphabetCode * sequence_code_block_cache =
TDirection == cuda_common::kReverse ?
sequence_cache_mem + kLoadLength - 1 : sequence_cache_mem;
int next_bolock_position = packed_alphabet_code::GetBlockPosition(
sequence_position);
int offset = sequence_position
- next_bolock_position
* packed_alphabet_code::kNumberOfCodesInBlock;
int next_bolock_chache_position = 0;
uint32_t block_sift;
uint32_t next_block_sift;
packed_alphabet_code::PackedAlphabetCode temp_code_block;
packed_alphabet_code::InitPackedAlphabetCode<TDirection>(
sequence_code_block_cache, offset, &next_bolock_chache_position,
&block_sift, &next_block_sift, &temp_code_block);
#if 0
if ((debug_q_p - 1) == sequence_position || debug_q_p == sequence_position
|| blockIdx.x * blockDim.x + threadIdx.x == 0) {
printf("temp code block %ld\n", temp_code_block);
}
#endif
int cache_end = ((int) kLoadLength)
* (TDirection == cuda_common::kReverse ? -1 : 1);
uint32_t stop_flag = 0;
int p = 0;
int increment = TDirection == cuda_common::kReverse ? -1 : 1;
while (!stop_flag && (next_bolock_chache_position != cache_end)) {
packed_alphabet_code::PackedAlphabetCode code_block =
packed_alphabet_code::GetPackedAlphabetCode<TDirection>(
sequence_code_block_cache, block_sift, next_block_sift,
&next_bolock_chache_position, &temp_code_block);
#if 0
if ((debug_q_p - 1) == sequence_position
|| debug_q_p == sequence_position
|| blockIdx.x * blockDim.x + threadIdx.x == 0) {
printf("code block %ld\n", code_block);
}
#endif
#pragma unroll
for (uint32_t i = 0; i < packed_alphabet_code::kNumberOfCodesInBlock;
++i) {
const uint32_t c =
packed_alphabet_code::GetAlphabetCode<TDirection>(
code_block, i);
#if 0
if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) {
printf("%d : %d\n", i, c);
}
#endif
stop_flag = stop_flag | c == sequence_delimiter;
cached_sequence[p] = c;
p += increment;
}
}
next_bolock_position += next_bolock_chache_position;
while (!stop_flag) {
packed_alphabet_code::PackedAlphabetCode code_block =
packed_alphabet_code::GetPackedAlphabetCode<TDirection>(
sequence_code_block, block_sift, next_block_sift,
&next_bolock_position, &temp_code_block);
#pragma unroll
for (uint32_t i = 0; i < packed_alphabet_code::kNumberOfCodesInBlock;
++i) {
const uint32_t c =
packed_alphabet_code::GetAlphabetCode<TDirection>(
code_block, i);
stop_flag = stop_flag | c == sequence_delimiter;
cached_sequence[p] = c;
p += increment;
}
}
return 0;
}
__device__ int InitDpColumn(const AlphabetCoder::Code* sequence,
const AlphabetCoder::Code sequence_delimiter, int gap_init,
int gap_extention, int default_cutoff, int increment,
GappedExtenderGpu::DpColumnCell* dp_column) {
int score = -gap_init;
GappedExtenderGpu::DpColumnCell new_cell;
new_cell.h = 0;
new_cell.e = -gap_init;
reinterpret_cast<uint64_t *>(dp_column)[0] =
reinterpret_cast<uint64_t *>(&new_cell)[0];
int sequence_position = 0;
int array_index = 0;
int cutoff = default_cutoff;
if (cutoff < gap_init) {
cutoff = gap_init;
}
for (array_index = 1; sequence[sequence_position] != sequence_delimiter;
++array_index, sequence_position += increment) {
if (score < -cutoff) {
break;
}
new_cell.h = score;
new_cell.e = score - gap_init;
reinterpret_cast<uint64_t *>(dp_column)[array_index] =
reinterpret_cast<uint64_t *>(&new_cell)[0];
score -= gap_extention;
}
return array_index;
}
template<cuda_common::Direction TDirection>
__device__ uint32_t InitDpRow(
const packed_alphabet_code::PackedAlphabetCode seq1_code_block,
const uint32_t seq1_offset,
const AlphabetCoder::Code sequence_delimiter, const int gap_init,
const int gap_extention, const int default_start_dp_h,
const int default_start_dp_e, const int threshold,
AlphabetCoder::Code *seq_1_subsequence, int *dp_h, int *dp_f,
int *last_dp_e) {
int start_dp_h = default_start_dp_h;
int start_dp_e = default_start_dp_e;
if (threshold > default_start_dp_h) {
start_dp_h = GappedExtenderGpu::kInitScore;
start_dp_e = GappedExtenderGpu::kInitScore;
}
dp_h[0] = start_dp_h;
dp_f[0] = start_dp_h - gap_init;
int dp_e = start_dp_e;
const int s_dp_row_skip = blockDim.x;
int s_dp_row_i = s_dp_row_skip;
#pragma unroll
for (uint32_t i = 1; i < kDpRowLength; ++i, s_dp_row_i += s_dp_row_skip) {
if (threshold > dp_e) {
dp_e = GappedExtenderGpu::kInitScore;
}
dp_h[s_dp_row_i] = dp_e;
dp_f[s_dp_row_i] = dp_e - gap_init;
dp_e -= gap_extention;
}
*last_dp_e = dp_e;
const uint32_t seq1_subsequence_i_end = seq1_offset + kDpRowLength - 1;
uint32_t seq1_subsequence_i = seq1_offset;
uint32_t seq_1_subsequence_length = 0;
for (; seq1_subsequence_i < seq1_subsequence_i_end; ++seq1_subsequence_i) {
const AlphabetCoder::Code c = packed_alphabet_code::GetAlphabetCode<
TDirection>(seq1_code_block, seq1_subsequence_i);
if (c == sequence_delimiter) {
break;
}
seq_1_subsequence[seq_1_subsequence_length] = c;
++seq_1_subsequence_length;
}
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("start dp_h %d, start dp_e %d\n", default_start_dp_h,
default_start_dp_e);
printf("sizeof(int) = %d\n", sizeof(int));
printf(" ");
printf(" ");
for (int i = 0; i < seq_1_subsequence_length; ++i) {
printf("%5d", seq_1_subsequence[i]);
}
printf("\n");
printf(" ");
for (int i = 0; i < kDpRowLength; ++i) {
printf("%5d", dp_h[i * blockDim.x]);
}
printf("\n");
}
#endif
return seq_1_subsequence_length;
}
__device__ int UpdateDpRow(const AlphabetCoder::Code *seq_1_subsequence,
const int sequence0_position, const int sequence1_offset,
const uint32_t seq_1_subsequence_length, int increment, int cutoff,
const int *score_matrix_row, const int gap_init,
const int gap_extention, int* dp_row_h, int* dp_row_f, int start_dp_e,
int* max_score_sequence0_position, int* max_score_sequence1_position,
int* max_score_ptr, int* last_dp_e) {
int max_score_sequence1_position_in_row = -1;
int max_score = *max_score_ptr;
int dp_prev_h = dp_row_h[0];
int dp_e = start_dp_e;
const int dp_row_length = seq_1_subsequence_length + 1;
int start_update_cell = dp_row_length;
const int s_dp_row_skip = blockDim.x;
int s_dp_row_i = s_dp_row_skip;
int dp_f = GappedExtenderGpu::kInitScore;
for (int i = 1; i < dp_row_length; ++i, s_dp_row_i += s_dp_row_skip) {
int score = dp_prev_h
+ __ldg(&score_matrix_row[seq_1_subsequence[i - 1]]);
dp_prev_h = dp_row_h[s_dp_row_i];
dp_f = dp_row_f[s_dp_row_i];
score = max(score, dp_f);
score = max(score, dp_e);
if (score > max_score) {
max_score = score;
max_score_sequence1_position_in_row = i;
}
if (max_score - score > cutoff) {
score = GappedExtenderGpu::kInitScore;
dp_f = GappedExtenderGpu::kInitScore;
dp_e = GappedExtenderGpu::kInitScore;
} else {
start_update_cell = min(start_update_cell, i);
}
dp_row_h[s_dp_row_i] = score;
dp_row_f[s_dp_row_i] = max(score - gap_init, dp_f - gap_extention);
dp_e = max(score - gap_init, dp_e - gap_extention);
}
*last_dp_e = dp_e;
if (max_score_sequence1_position_in_row >= 0) {
*max_score_ptr = max_score;
*max_score_sequence0_position = sequence0_position;
*max_score_sequence1_position = sequence1_offset
+ (max_score_sequence1_position_in_row - 1) * increment;
}
return start_update_cell == dp_row_length;
}
template<cuda_common::Direction TDirection>
__device__ int ExtendOneSideScoreOnlyDevice(
const AlphabetCoder::Code* sequence0,
const packed_alphabet_code::PackedAlphabetCode* sequence1_code_block,
const uint32_t sequence1_offset,
const packed_alphabet_code::PackedAlphabetCode* sequence1_cache_mem,
const bool reverse, const AlphabetCoder::Code sequence_delimiter,
const int* score_matrix, const uint32_t number_letters,
const int gap_open, const int gap_extention, const int cutoff,
char shared_mem[], uint32_t* best_sequence0_position,
uint32_t* best_sequence1_position, int* best_score) {
const packed_alphabet_code::PackedAlphabetCode * sequence1_code_block_cache =
reverse ?
sequence1_cache_mem + kLoadLength - 1 : sequence1_cache_mem;
int next_bolock_position = packed_alphabet_code::GetBlockPosition(
sequence1_offset);
int sequence1_code_block_offset = sequence1_offset
- next_bolock_position
* packed_alphabet_code::kNumberOfCodesInBlock;
int next_bolock_chache_position = 0;
uint32_t block_sift;
uint32_t next_block_sift;
packed_alphabet_code::PackedAlphabetCode temp_code_block;
packed_alphabet_code::InitPackedAlphabetCode<TDirection>(
sequence1_code_block_cache, sequence1_code_block_offset,
&next_bolock_chache_position, &block_sift, &next_block_sift,
&temp_code_block);
int increment = reverse ? -1 : 1;
GappedExtenderGpu::DpColumnCell dp_column[GappedExtenderGpu::kMaxSequence0Length];
int sequence0_position_start = 0;
int dp_column_end = 0;
int gap_init = gap_open + gap_extention;
dp_column_end = InitDpColumn(sequence0, sequence_delimiter, gap_init,
gap_extention, cutoff, increment, dp_column);
int cache_end = ((int) kLoadLength)
* (TDirection == cuda_common::kReverse ? -1 : 1);
uint32_t stop_flag = 0;
int max_score = 0;
int max_score_sequence0_position = -increment;
int max_score_sequence1_position = -increment;
int sequence1_position = 0;
AlphabetCoder::Code seq_1_subsequence[kDpRowLength];
int *dp_row_mem = (int*) shared_mem;
int *dp_row_h = &dp_row_mem[threadIdx.x];
int *dp_row_f = &dp_row_mem[blockDim.x * kDpRowLength + threadIdx.x];
uint32_t seq_1_subsequence_offset =
packed_alphabet_code::kNumberOfCodesInBlock;
packed_alphabet_code::PackedAlphabetCode code_block = 0;
while (!stop_flag && (next_bolock_chache_position != cache_end)) {
if (seq_1_subsequence_offset
>= packed_alphabet_code::kNumberOfCodesInBlock) {
code_block =
packed_alphabet_code::GetPackedAlphabetCode<TDirection>(
sequence1_code_block_cache, block_sift,
next_block_sift, &next_bolock_chache_position,
&temp_code_block);
seq_1_subsequence_offset = 0;
}
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("code block %ld \n", code_block);
}
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("column %d:%2d\n", 0, dp_column_end);
}
#endif
int last_dp_e = 0;
GappedExtenderGpu::DpColumnCell dp_column_cell;
reinterpret_cast<uint64_t *>(&dp_column_cell)[0] =
reinterpret_cast<uint64_t *>(dp_column)[0];
uint32_t seq_1_subsequence_length = InitDpRow<TDirection>(code_block,
seq_1_subsequence_offset, sequence_delimiter, gap_init,
gap_extention, dp_column_cell.h, dp_column_cell.e,
max_score - cutoff, seq_1_subsequence, dp_row_h, dp_row_f,
&last_dp_e);
seq_1_subsequence_offset += seq_1_subsequence_length;
int last_s_dp_row_i = seq_1_subsequence_length * blockDim.x;
dp_column_cell.h = dp_row_h[last_s_dp_row_i];
dp_column_cell.e = last_dp_e;
reinterpret_cast<uint64_t *>(dp_column)[0] =
reinterpret_cast<uint64_t *>(&dp_column_cell)[0];
int start_drop_flag = 1;
int dp_column_stored_i = 1;
int last_updated_dp_column_stored_i = -1;
int ret = 0;
int sequence0_position = sequence0_position_start;
AlphabetCoder::Code s0_c;
for (int column_i = 1; column_i < dp_column_end | !ret;
++column_i, sequence0_position += increment) {
s0_c = sequence0[sequence0_position];
if (s0_c == sequence_delimiter) {
break;
}
reinterpret_cast<uint64_t *>(&dp_column_cell)[0] =
reinterpret_cast<uint64_t *>(dp_column)[column_i];
int start_dp_e =
column_i < dp_column_end ?
dp_column_cell.e : GappedExtenderGpu::kInitScore;
const int *score_matrix_row = &score_matrix[s0_c * number_letters];
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("%2d:%2d", column_i, s0_c);
}
#endif
last_dp_e = 0;
ret = UpdateDpRow(seq_1_subsequence, sequence0_position,
sequence1_position, seq_1_subsequence_length, increment,
cutoff, score_matrix_row, gap_init, gap_extention, dp_row_h,
dp_row_f, start_dp_e, &max_score_sequence0_position,
&max_score_sequence1_position, &max_score, &last_dp_e);
if (start_drop_flag
& dp_row_h[last_s_dp_row_i]
== GappedExtenderGpu::kInitScore) {
dp_column_stored_i = 0;
sequence0_position_start += increment;
} else {
start_drop_flag = 0;
}
int temp_score =
column_i < dp_column_end ?
dp_column_cell.h : GappedExtenderGpu::kInitScore;
dp_column_cell.h =
ret ? GappedExtenderGpu::kInitScore : dp_row_h[last_s_dp_row_i];
dp_column_cell.e = ret ? GappedExtenderGpu::kInitScore : last_dp_e;
reinterpret_cast<uint64_t *>(dp_column)[dp_column_stored_i] =
reinterpret_cast<uint64_t *>(&dp_column_cell)[0];
++dp_column_stored_i;
last_updated_dp_column_stored_i =
ret ? last_updated_dp_column_stored_i : dp_column_stored_i;
dp_row_h[0] = temp_score;
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
for (int i = 0; i <= seq_1_subsequence_length; ++i) {
printf("%5d", dp_row_h[i * blockDim.x]);
}
printf(" (ret = %d, dp_column_h[column_i] = %d)", ret,
dp_column_h[column_i]);
printf("\n");
}
#endif
}
if (seq_1_subsequence_length < (kDpRowLength - 1)
|| 1 >= dp_column_stored_i) {
stop_flag = true;
break;
}
dp_column_end = last_updated_dp_column_stored_i + 1;
sequence1_position += seq_1_subsequence_length * increment;
}
next_bolock_position += next_bolock_chache_position;
while (!stop_flag) {
if (seq_1_subsequence_offset
>= packed_alphabet_code::kNumberOfCodesInBlock) {
code_block =
packed_alphabet_code::GetPackedAlphabetCode<TDirection>(
sequence1_code_block, block_sift, next_block_sift,
&next_bolock_position, &temp_code_block);
seq_1_subsequence_offset = 0;
}
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("code block %ld \n", code_block);
}
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("column %d:%2d\n", 0, dp_column_end);
}
#endif
int last_dp_e = 0;
GappedExtenderGpu::DpColumnCell dp_column_cell;
reinterpret_cast<uint64_t *>(&dp_column_cell)[0] =
reinterpret_cast<uint64_t *>(dp_column)[0];
uint32_t seq_1_subsequence_length = InitDpRow<TDirection>(code_block,
seq_1_subsequence_offset, sequence_delimiter, gap_init,
gap_extention, dp_column_cell.h, dp_column_cell.e,
max_score - cutoff, seq_1_subsequence, dp_row_h, dp_row_f,
&last_dp_e);
seq_1_subsequence_offset += seq_1_subsequence_length;
int last_s_dp_row_i = seq_1_subsequence_length * blockDim.x;
dp_column_cell.h = dp_row_h[last_s_dp_row_i];
dp_column_cell.e = last_dp_e;
reinterpret_cast<uint64_t *>(dp_column)[0] =
reinterpret_cast<uint64_t *>(&dp_column_cell)[0];
int start_drop_flag = 1;
int dp_column_stored_i = 1;
int last_updated_dp_column_stored_i = -1;
int ret = 0;
int sequence0_position = sequence0_position_start;
AlphabetCoder::Code s0_c;
for (int column_i = 1; column_i < dp_column_end | !ret;
++column_i, sequence0_position += increment) {
s0_c = sequence0[sequence0_position];
if (s0_c == sequence_delimiter) {
break;
}
reinterpret_cast<uint64_t *>(&dp_column_cell)[0] =
reinterpret_cast<uint64_t *>(dp_column)[column_i];
int start_dp_e =
column_i < dp_column_end ?
dp_column_cell.e : GappedExtenderGpu::kInitScore;
const int *score_matrix_row = &score_matrix[s0_c * number_letters];
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("%2d:%2d", column_i, s0_c);
}
#endif
last_dp_e = 0;
ret = UpdateDpRow(seq_1_subsequence, sequence0_position,
sequence1_position, seq_1_subsequence_length, increment,
cutoff, score_matrix_row, gap_init, gap_extention, dp_row_h,
dp_row_f, start_dp_e, &max_score_sequence0_position,
&max_score_sequence1_position, &max_score, &last_dp_e);
if (start_drop_flag
& dp_row_h[last_s_dp_row_i]
== GappedExtenderGpu::kInitScore) {
dp_column_stored_i = 0;
sequence0_position_start += increment;
} else {
start_drop_flag = 0;
}
int temp_score =
column_i < dp_column_end ?
dp_column_cell.h : GappedExtenderGpu::kInitScore;
dp_column_cell.h =
ret ? GappedExtenderGpu::kInitScore : dp_row_h[last_s_dp_row_i];
dp_column_cell.e = ret ? GappedExtenderGpu::kInitScore : last_dp_e;
reinterpret_cast<uint64_t *>(dp_column)[dp_column_stored_i] =
reinterpret_cast<uint64_t *>(&dp_column_cell)[0];
++dp_column_stored_i;
last_updated_dp_column_stored_i =
ret ? last_updated_dp_column_stored_i : dp_column_stored_i;
dp_row_h[0] = temp_score;
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
for (int i = 0; i <= seq_1_subsequence_length; ++i) {
printf("%5d", dp_row_h[i * blockDim.x]);
}
printf(" (ret = %d, dp_column_h[column_i] = %d)", ret,
dp_column_h[column_i]);
printf("\n");
}
#endif
}
if (seq_1_subsequence_length < (kDpRowLength - 1)
|| 1 >= dp_column_stored_i) {
stop_flag = true;
break;
}
dp_column_end = last_updated_dp_column_stored_i + 1;
sequence1_position += seq_1_subsequence_length * increment;
}
*best_score = max_score;
*best_sequence0_position = max_score_sequence0_position;
*best_sequence1_position = max_score_sequence1_position;
return 0;
}
__global__ void
__launch_bounds__(gapped_extender_gpu_kernel::kNumberThreads, 10) ExtendOneSideScoreOnlyKernel(
const packed_alphabet_code::PackedAlphabetCode* sequence0_code_block,
const packed_alphabet_code::PackedAlphabetCode* sequence1_code_block,
const uint32_t number_extensions, const bool reverse,
const AlphabetCoder::Code sequence_delimiter, const int* score_matrix,
const uint32_t number_letters, const int gap_open,
const int gap_extention, const int cutoff,
uint32_t* sequence0_positions, uint32_t* sequence1_positions,
int* best_scores) {
extern __shared__ char shared_mem[];
uint32_t* s_sequence_positions = (uint32_t *) shared_mem;
packed_alphabet_code::PackedAlphabetCode* s_group_loader =
(packed_alphabet_code::PackedAlphabetCode *) &s_sequence_positions[blockDim.x];
GroupLoader<const packed_alphabet_code::PackedAlphabetCode *, kLoadLength> group_loader;
int group_idx = group_loader.GetGroupId();
int idx_in_group = group_loader.GetIdInGroup();
int number_members = group_loader.GetNumberMembers();
int group_work_skip = group_loader.GetNumberGroups();
int number_group_in_block = blockDim.x / number_members;
int number_group_works = (number_extensions + number_members - 1)
/ number_members;
int remain_group_in_block = number_group_works % number_group_in_block;
int group_work_end = number_group_works
+ (remain_group_in_block != 0 ?
(number_group_in_block - remain_group_in_block) : 0);
uint32_t* s_group_sequence_positions = &s_sequence_positions[(threadIdx.x
/ number_members) * number_members];
for (int group_work_i = group_idx; group_work_i < group_work_end;
group_work_i += group_work_skip) {
const int group_extension_begin = group_work_i * number_members;
const int thread_work_id = group_extension_begin + idx_in_group;
const int extension_id =
thread_work_id < number_extensions ? thread_work_id : 0;
packed_alphabet_code::PackedAlphabetCode sequence_cache[kLoadLength];
const uint32_t sequence0_position = sequence0_positions[extension_id]
+ cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
const uint32_t sequence0_brock_position =
packed_alphabet_code::GetBlockPosition(sequence0_position)
+ (reverse ? -kLoadLength + 1 : 0);
s_group_sequence_positions[idx_in_group] = sequence0_brock_position;
//__syncthreads();
group_loader.Load(sequence0_code_block, s_group_sequence_positions,
min((int) number_extensions - group_extension_begin,
number_members), s_group_loader, sequence_cache);
#if 0
// debug /////////////////////////////////////
if (debug_db_p == sequence0_position
|| blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("block position : %d\n",
s_group_sequence_positions[idx_in_group]);
for (int i = 0; i < kLoadLength; ++i) {
printf("%d : %ld\n", i, sequence_cache[i]);
}
}
/////////////////////////////////////////////
#endif
AlphabetCoder::Code cached_sequence0_mem[GappedExtenderGpu::kMaxSequence0Length];
AlphabetCoder::Code *cached_sequence0 =
reverse ?
&cached_sequence0_mem[GappedExtenderGpu::kMaxSequence0Length
- 1] :
&cached_sequence0_mem[0];
if (thread_work_id < number_extensions) {
if (reverse) {
InitSequence<cuda_common::kReverse>(sequence_cache,
sequence0_code_block, sequence0_position,
sequence_delimiter, cached_sequence0);
} else {
InitSequence<cuda_common::kFoward>(sequence_cache,
sequence0_code_block, sequence0_position,
sequence_delimiter, cached_sequence0);
}
}
const uint32_t sequence1_position = sequence1_positions[extension_id]
+ cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
uint32_t sequence1_brock_position =
packed_alphabet_code::GetBlockPosition(sequence1_position)
+ (reverse ? -kLoadLength + 1 : 0);
s_group_sequence_positions[idx_in_group] = sequence1_brock_position;
//__syncthreads();
group_loader.Load(sequence1_code_block, s_group_sequence_positions,
min((int) number_extensions - group_extension_begin,
number_members), s_group_loader, sequence_cache);
#if 0
// debug /////////////////////////////////////
if (debug_db_p == sequence1_position
|| blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("thread id : %d\n", blockIdx.x * blockDim.x + threadIdx.x);
printf("sequence1_position : %d\n", sequence1_position);
printf("block position : %d\n",
s_group_sequence_positions[idx_in_group]);
for (int i = 0; i < kLoadLength; ++i) {
printf("%d : %ld\n", i, sequence_cache[i]);
}
}
/////////////////////////////////////////////
#endif
__syncthreads();
if (thread_work_id < number_extensions) {
uint32_t best_sequence_0_p = 0;
uint32_t best_sequence_1_p = 0;
int best_score = 0;
if (reverse) {
ExtendOneSideScoreOnlyDevice<cuda_common::kReverse>(
cached_sequence0, sequence1_code_block,
sequence1_position, sequence_cache, reverse,
(uint32_t) sequence_delimiter, score_matrix,
number_letters, gap_open, gap_extention, cutoff,
shared_mem, &best_sequence_0_p, &best_sequence_1_p,
&best_score);
sequence0_positions[extension_id] = sequence0_position
+ best_sequence_0_p
- cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
sequence1_positions[extension_id] = sequence1_position
+ best_sequence_1_p
- cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
best_scores[extension_id] = best_score;
#if 0
// debug /////////////////////////////////////
if (debug_db_p == sequence1_position
|| blockIdx.x * blockDim.x + threadIdx.x
== debug_thread_id) {
printf("reverse gapped extend score : %d\n", best_score);
}
/////////////////////////////////////////////
#endif
} else {
ExtendOneSideScoreOnlyDevice<cuda_common::kFoward>(
cached_sequence0, sequence1_code_block,
sequence1_position, sequence_cache, reverse,
(uint32_t) sequence_delimiter, score_matrix,
number_letters, gap_open, gap_extention, cutoff,
shared_mem, &best_sequence_0_p, &best_sequence_1_p,
&best_score);
sequence0_positions[extension_id] = sequence0_position
+ best_sequence_0_p
- cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
sequence1_positions[extension_id] = sequence1_position
+ best_sequence_1_p
- cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
best_scores[extension_id] = best_score;
#if 0
// debug /////////////////////////////////////
if (debug_db_p == sequence1_position
|| blockIdx.x * blockDim.x + threadIdx.x
== debug_thread_id) {
printf("forward gapped extend score : %d\n", best_score);
}
/////////////////////////////////////////////
#endif
}
}
__syncthreads();
}
return;
}
__global__ void ConvertToGappedExtensionSeeds(const uint32_t size, bool reverse,
const uint32_t *seed_positions,
uint32_t* ungapped_extension_sequence_positions,
uint32_t* gapped_extension_sequence_positions) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int skip = blockDim.x * gridDim.x;
for (int i = idx; i < size; i += skip) {
uint32_t seed_position = seed_positions[i];
uint32_t sequence_position =
ungapped_extension_sequence_positions[seed_position];
gapped_extension_sequence_positions[i] =
reverse ?
sequence_position:
sequence_position;
}
}
}
GappedExtenderGpu::GappedExtenderGpu() :
number_letters_(0), gap_open_(0), gap_extention_(0), cutoff_(0), sequence_delimiter_(
0), d_database_sequence_(NULL), d_concatenated_query_sequence_(
0), d_score_matrix_(0) {
}
GappedExtenderGpu::~GappedExtenderGpu() {
}
int GappedExtenderGpu::SetQueries(AlphabetCoder::Code sequence_delimiter,
const packed_alphabet_code::PackedAlphabetCode *d_concatenated_query_sequence) {
sequence_delimiter_ = sequence_delimiter;
d_concatenated_query_sequence_ = d_concatenated_query_sequence;
return 0;
}
int GappedExtenderGpu::SetDatabase(
const packed_alphabet_code::PackedAlphabetCode *d_database_sequence) {
d_database_sequence_ = d_database_sequence;
return 0;
}
int GappedExtenderGpu::SetScoreParameters(const int *d_score_matrix,
uint32_t number_letters, int gap_open, int gap_extention, int cutoff) {
number_letters_ = number_letters;
d_score_matrix_ = d_score_matrix;
gap_open_ = gap_open;
gap_extention_ = gap_extention;
cutoff_ = cutoff;
return 0;
}
int GappedExtenderGpu::ConvertToGappedExtensionSeedsAsync(size_t size,
bool reverse, uint32_t* seed_positions,
uint32_t* query_concatenated_positions, uint32_t* database_positions,
uint32_t* d_seed_positions, uint32_t* d_temp_array,
uint32_t* d_query_concatenated_positions,
uint32_t* d_database_positions, hipStream_t &stream) const {
CUDA_CHECK_RETURN(
hipMemcpyAsync(d_seed_positions, seed_positions,
sizeof(seed_positions[0]) * size, hipMemcpyDefault,
stream));
CUDA_CHECK_RETURN(
hipMemcpyAsync(d_temp_array, query_concatenated_positions,
sizeof(query_concatenated_positions[0]) * size,
hipMemcpyDefault, stream));
hipLaunchKernelGGL(( gapped_extender_gpu_kernel::ConvertToGappedExtensionSeeds), dim3(128), dim3(256), 0, stream,
size, reverse, d_seed_positions, d_temp_array, d_query_concatenated_positions
);
CUDA_CHECK_RETURN(
hipMemcpyAsync(d_temp_array, database_positions,
sizeof(database_positions[0]) * size, hipMemcpyDefault,
stream));
hipLaunchKernelGGL(( gapped_extender_gpu_kernel::ConvertToGappedExtensionSeeds), dim3(128), dim3(256), 0, stream,
size, reverse, d_seed_positions, d_temp_array, d_database_positions
);
return 0;
}
int GappedExtenderGpu::ExtendOneSideScoreOnlyAsync(size_t size, bool reverse,
uint32_t* query_concatenated_positions, uint32_t* database_positions,
int* scores, uint32_t* d_query_concatenated_positions,
uint32_t* d_database_positions, int* d_scores,
hipStream_t &stream) const {
/*
CUDA_CHECK_RETURN(
hipMemcpyAsync(d_query_concatenated_positions,
query_concatenated_positions,
sizeof(query_concatenated_positions[0]) * size,
hipMemcpyDefault, stream));
CUDA_CHECK_RETURN(
hipMemcpyAsync(d_database_positions, database_positions,
sizeof(database_positions[0]) * size, hipMemcpyDefault,
stream));
*/
size_t loader_shared_memory_size =
sizeof(uint32_t) * gapped_extender_gpu_kernel::kNumberThreads
+ GroupLoader<
const packed_alphabet_code::PackedAlphabetCode *,
gapped_extender_gpu_kernel::kLoadLength>::GetTotalSharedMemorySize(
gapped_extender_gpu_kernel::kNumberThreads);
size_t dp_row_shared_memory_size =
gapped_extender_gpu_kernel::kNumberThreads
* (gapped_extender_gpu_kernel::kDpRowLength * sizeof(int)
* 2);
size_t shared_memory_size = max(loader_shared_memory_size,
dp_row_shared_memory_size);
hipLaunchKernelGGL(( gapped_extender_gpu_kernel::ExtendOneSideScoreOnlyKernel),
dim3( gapped_extender_gpu_kernel::kNumberBlocks),
dim3( gapped_extender_gpu_kernel::kNumberThreads), shared_memory_size,
stream, d_concatenated_query_sequence_, d_database_sequence_,
size, reverse, sequence_delimiter_, d_score_matrix_,
number_letters_ + 1, gap_open_, gap_extention_, cutoff_,
d_query_concatenated_positions, d_database_positions, d_scores);
CUDA_CHECK_RETURN(
hipMemcpyAsync(query_concatenated_positions,
d_query_concatenated_positions,
sizeof(query_concatenated_positions[0]) * size,
hipMemcpyDefault, stream));
CUDA_CHECK_RETURN(
hipMemcpyAsync(database_positions, d_database_positions,
sizeof(database_positions[0]) * size, hipMemcpyDefault,
stream));
CUDA_CHECK_RETURN(
hipMemcpyAsync(scores, d_scores, sizeof(scores[0]) * size,
hipMemcpyDefault, stream));
return 0;
}
| 91480a9ddc2bf5032b30d1733907d8a7ed26c952.cu | /*
* gapped_extender_gpu.cpp
*
* Created on: Aug 11, 2014
* Author: shu
*/
#include "gapped_extender_gpu.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/system/cuda/experimental/pinned_allocator.h>
#include <thrust/copy.h>
#include <vector>
#include <assert.h>
#include <iostream>
#include "packed_alphabet_code.h"
#include "group_loader.h"
#include "score_matrix.h"
#include "cuda_common.h"
using namespace std;
namespace gapped_extender_gpu_kernel {
#if 0
const int debug_q_p = 119;
const int debug_db_p = 19959
+ cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
const int debug_thread_id = 0;
#endif
static const size_t kNumberBlocks = 128;
static const size_t kNumberThreads = 128;
static const size_t kLoadLength = 4;
static const size_t kDpRowLength = packed_alphabet_code::kNumberOfCodesInBlock
/ 3 + 1;
template<cuda_common::Direction TDirection>
__device__ int InitSequence(
const packed_alphabet_code::PackedAlphabetCode* sequence_cache_mem,
const packed_alphabet_code::PackedAlphabetCode* sequence_code_block,
const int sequence_position, const uint32_t sequence_delimiter,
AlphabetCoder::Code* cached_sequence) {
const packed_alphabet_code::PackedAlphabetCode * sequence_code_block_cache =
TDirection == cuda_common::kReverse ?
sequence_cache_mem + kLoadLength - 1 : sequence_cache_mem;
int next_bolock_position = packed_alphabet_code::GetBlockPosition(
sequence_position);
int offset = sequence_position
- next_bolock_position
* packed_alphabet_code::kNumberOfCodesInBlock;
int next_bolock_chache_position = 0;
uint32_t block_sift;
uint32_t next_block_sift;
packed_alphabet_code::PackedAlphabetCode temp_code_block;
packed_alphabet_code::InitPackedAlphabetCode<TDirection>(
sequence_code_block_cache, offset, &next_bolock_chache_position,
&block_sift, &next_block_sift, &temp_code_block);
#if 0
if ((debug_q_p - 1) == sequence_position || debug_q_p == sequence_position
|| blockIdx.x * blockDim.x + threadIdx.x == 0) {
printf("temp code block %ld\n", temp_code_block);
}
#endif
int cache_end = ((int) kLoadLength)
* (TDirection == cuda_common::kReverse ? -1 : 1);
uint32_t stop_flag = 0;
int p = 0;
int increment = TDirection == cuda_common::kReverse ? -1 : 1;
while (!stop_flag && (next_bolock_chache_position != cache_end)) {
packed_alphabet_code::PackedAlphabetCode code_block =
packed_alphabet_code::GetPackedAlphabetCode<TDirection>(
sequence_code_block_cache, block_sift, next_block_sift,
&next_bolock_chache_position, &temp_code_block);
#if 0
if ((debug_q_p - 1) == sequence_position
|| debug_q_p == sequence_position
|| blockIdx.x * blockDim.x + threadIdx.x == 0) {
printf("code block %ld\n", code_block);
}
#endif
#pragma unroll
for (uint32_t i = 0; i < packed_alphabet_code::kNumberOfCodesInBlock;
++i) {
const uint32_t c =
packed_alphabet_code::GetAlphabetCode<TDirection>(
code_block, i);
#if 0
if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) {
printf("%d : %d\n", i, c);
}
#endif
stop_flag = stop_flag | c == sequence_delimiter;
cached_sequence[p] = c;
p += increment;
}
}
next_bolock_position += next_bolock_chache_position;
while (!stop_flag) {
packed_alphabet_code::PackedAlphabetCode code_block =
packed_alphabet_code::GetPackedAlphabetCode<TDirection>(
sequence_code_block, block_sift, next_block_sift,
&next_bolock_position, &temp_code_block);
#pragma unroll
for (uint32_t i = 0; i < packed_alphabet_code::kNumberOfCodesInBlock;
++i) {
const uint32_t c =
packed_alphabet_code::GetAlphabetCode<TDirection>(
code_block, i);
stop_flag = stop_flag | c == sequence_delimiter;
cached_sequence[p] = c;
p += increment;
}
}
return 0;
}
__device__ int InitDpColumn(const AlphabetCoder::Code* sequence,
const AlphabetCoder::Code sequence_delimiter, int gap_init,
int gap_extention, int default_cutoff, int increment,
GappedExtenderGpu::DpColumnCell* dp_column) {
int score = -gap_init;
GappedExtenderGpu::DpColumnCell new_cell;
new_cell.h = 0;
new_cell.e = -gap_init;
reinterpret_cast<uint64_t *>(dp_column)[0] =
reinterpret_cast<uint64_t *>(&new_cell)[0];
int sequence_position = 0;
int array_index = 0;
int cutoff = default_cutoff;
if (cutoff < gap_init) {
cutoff = gap_init;
}
for (array_index = 1; sequence[sequence_position] != sequence_delimiter;
++array_index, sequence_position += increment) {
if (score < -cutoff) {
break;
}
new_cell.h = score;
new_cell.e = score - gap_init;
reinterpret_cast<uint64_t *>(dp_column)[array_index] =
reinterpret_cast<uint64_t *>(&new_cell)[0];
score -= gap_extention;
}
return array_index;
}
template<cuda_common::Direction TDirection>
__device__ uint32_t InitDpRow(
const packed_alphabet_code::PackedAlphabetCode seq1_code_block,
const uint32_t seq1_offset,
const AlphabetCoder::Code sequence_delimiter, const int gap_init,
const int gap_extention, const int default_start_dp_h,
const int default_start_dp_e, const int threshold,
AlphabetCoder::Code *seq_1_subsequence, int *dp_h, int *dp_f,
int *last_dp_e) {
int start_dp_h = default_start_dp_h;
int start_dp_e = default_start_dp_e;
if (threshold > default_start_dp_h) {
start_dp_h = GappedExtenderGpu::kInitScore;
start_dp_e = GappedExtenderGpu::kInitScore;
}
dp_h[0] = start_dp_h;
dp_f[0] = start_dp_h - gap_init;
int dp_e = start_dp_e;
const int s_dp_row_skip = blockDim.x;
int s_dp_row_i = s_dp_row_skip;
#pragma unroll
for (uint32_t i = 1; i < kDpRowLength; ++i, s_dp_row_i += s_dp_row_skip) {
if (threshold > dp_e) {
dp_e = GappedExtenderGpu::kInitScore;
}
dp_h[s_dp_row_i] = dp_e;
dp_f[s_dp_row_i] = dp_e - gap_init;
dp_e -= gap_extention;
}
*last_dp_e = dp_e;
const uint32_t seq1_subsequence_i_end = seq1_offset + kDpRowLength - 1;
uint32_t seq1_subsequence_i = seq1_offset;
uint32_t seq_1_subsequence_length = 0;
for (; seq1_subsequence_i < seq1_subsequence_i_end; ++seq1_subsequence_i) {
const AlphabetCoder::Code c = packed_alphabet_code::GetAlphabetCode<
TDirection>(seq1_code_block, seq1_subsequence_i);
if (c == sequence_delimiter) {
break;
}
seq_1_subsequence[seq_1_subsequence_length] = c;
++seq_1_subsequence_length;
}
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("start dp_h %d, start dp_e %d\n", default_start_dp_h,
default_start_dp_e);
printf("sizeof(int) = %d\n", sizeof(int));
printf(" ");
printf(" ");
for (int i = 0; i < seq_1_subsequence_length; ++i) {
printf("%5d", seq_1_subsequence[i]);
}
printf("\n");
printf(" ");
for (int i = 0; i < kDpRowLength; ++i) {
printf("%5d", dp_h[i * blockDim.x]);
}
printf("\n");
}
#endif
return seq_1_subsequence_length;
}
__device__ int UpdateDpRow(const AlphabetCoder::Code *seq_1_subsequence,
const int sequence0_position, const int sequence1_offset,
const uint32_t seq_1_subsequence_length, int increment, int cutoff,
const int *score_matrix_row, const int gap_init,
const int gap_extention, int* dp_row_h, int* dp_row_f, int start_dp_e,
int* max_score_sequence0_position, int* max_score_sequence1_position,
int* max_score_ptr, int* last_dp_e) {
int max_score_sequence1_position_in_row = -1;
int max_score = *max_score_ptr;
int dp_prev_h = dp_row_h[0];
int dp_e = start_dp_e;
const int dp_row_length = seq_1_subsequence_length + 1;
int start_update_cell = dp_row_length;
const int s_dp_row_skip = blockDim.x;
int s_dp_row_i = s_dp_row_skip;
int dp_f = GappedExtenderGpu::kInitScore;
for (int i = 1; i < dp_row_length; ++i, s_dp_row_i += s_dp_row_skip) {
int score = dp_prev_h
+ __ldg(&score_matrix_row[seq_1_subsequence[i - 1]]);
dp_prev_h = dp_row_h[s_dp_row_i];
dp_f = dp_row_f[s_dp_row_i];
score = max(score, dp_f);
score = max(score, dp_e);
if (score > max_score) {
max_score = score;
max_score_sequence1_position_in_row = i;
}
if (max_score - score > cutoff) {
score = GappedExtenderGpu::kInitScore;
dp_f = GappedExtenderGpu::kInitScore;
dp_e = GappedExtenderGpu::kInitScore;
} else {
start_update_cell = min(start_update_cell, i);
}
dp_row_h[s_dp_row_i] = score;
dp_row_f[s_dp_row_i] = max(score - gap_init, dp_f - gap_extention);
dp_e = max(score - gap_init, dp_e - gap_extention);
}
*last_dp_e = dp_e;
if (max_score_sequence1_position_in_row >= 0) {
*max_score_ptr = max_score;
*max_score_sequence0_position = sequence0_position;
*max_score_sequence1_position = sequence1_offset
+ (max_score_sequence1_position_in_row - 1) * increment;
}
return start_update_cell == dp_row_length;
}
template<cuda_common::Direction TDirection>
__device__ int ExtendOneSideScoreOnlyDevice(
const AlphabetCoder::Code* sequence0,
const packed_alphabet_code::PackedAlphabetCode* sequence1_code_block,
const uint32_t sequence1_offset,
const packed_alphabet_code::PackedAlphabetCode* sequence1_cache_mem,
const bool reverse, const AlphabetCoder::Code sequence_delimiter,
const int* score_matrix, const uint32_t number_letters,
const int gap_open, const int gap_extention, const int cutoff,
char shared_mem[], uint32_t* best_sequence0_position,
uint32_t* best_sequence1_position, int* best_score) {
const packed_alphabet_code::PackedAlphabetCode * sequence1_code_block_cache =
reverse ?
sequence1_cache_mem + kLoadLength - 1 : sequence1_cache_mem;
int next_bolock_position = packed_alphabet_code::GetBlockPosition(
sequence1_offset);
int sequence1_code_block_offset = sequence1_offset
- next_bolock_position
* packed_alphabet_code::kNumberOfCodesInBlock;
int next_bolock_chache_position = 0;
uint32_t block_sift;
uint32_t next_block_sift;
packed_alphabet_code::PackedAlphabetCode temp_code_block;
packed_alphabet_code::InitPackedAlphabetCode<TDirection>(
sequence1_code_block_cache, sequence1_code_block_offset,
&next_bolock_chache_position, &block_sift, &next_block_sift,
&temp_code_block);
int increment = reverse ? -1 : 1;
GappedExtenderGpu::DpColumnCell dp_column[GappedExtenderGpu::kMaxSequence0Length];
int sequence0_position_start = 0;
int dp_column_end = 0;
int gap_init = gap_open + gap_extention;
dp_column_end = InitDpColumn(sequence0, sequence_delimiter, gap_init,
gap_extention, cutoff, increment, dp_column);
int cache_end = ((int) kLoadLength)
* (TDirection == cuda_common::kReverse ? -1 : 1);
uint32_t stop_flag = 0;
int max_score = 0;
int max_score_sequence0_position = -increment;
int max_score_sequence1_position = -increment;
int sequence1_position = 0;
AlphabetCoder::Code seq_1_subsequence[kDpRowLength];
int *dp_row_mem = (int*) shared_mem;
int *dp_row_h = &dp_row_mem[threadIdx.x];
int *dp_row_f = &dp_row_mem[blockDim.x * kDpRowLength + threadIdx.x];
uint32_t seq_1_subsequence_offset =
packed_alphabet_code::kNumberOfCodesInBlock;
packed_alphabet_code::PackedAlphabetCode code_block = 0;
while (!stop_flag && (next_bolock_chache_position != cache_end)) {
if (seq_1_subsequence_offset
>= packed_alphabet_code::kNumberOfCodesInBlock) {
code_block =
packed_alphabet_code::GetPackedAlphabetCode<TDirection>(
sequence1_code_block_cache, block_sift,
next_block_sift, &next_bolock_chache_position,
&temp_code_block);
seq_1_subsequence_offset = 0;
}
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("code block %ld \n", code_block);
}
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("column %d:%2d\n", 0, dp_column_end);
}
#endif
int last_dp_e = 0;
GappedExtenderGpu::DpColumnCell dp_column_cell;
reinterpret_cast<uint64_t *>(&dp_column_cell)[0] =
reinterpret_cast<uint64_t *>(dp_column)[0];
uint32_t seq_1_subsequence_length = InitDpRow<TDirection>(code_block,
seq_1_subsequence_offset, sequence_delimiter, gap_init,
gap_extention, dp_column_cell.h, dp_column_cell.e,
max_score - cutoff, seq_1_subsequence, dp_row_h, dp_row_f,
&last_dp_e);
seq_1_subsequence_offset += seq_1_subsequence_length;
int last_s_dp_row_i = seq_1_subsequence_length * blockDim.x;
dp_column_cell.h = dp_row_h[last_s_dp_row_i];
dp_column_cell.e = last_dp_e;
reinterpret_cast<uint64_t *>(dp_column)[0] =
reinterpret_cast<uint64_t *>(&dp_column_cell)[0];
int start_drop_flag = 1;
int dp_column_stored_i = 1;
int last_updated_dp_column_stored_i = -1;
int ret = 0;
int sequence0_position = sequence0_position_start;
AlphabetCoder::Code s0_c;
for (int column_i = 1; column_i < dp_column_end | !ret;
++column_i, sequence0_position += increment) {
s0_c = sequence0[sequence0_position];
if (s0_c == sequence_delimiter) {
break;
}
reinterpret_cast<uint64_t *>(&dp_column_cell)[0] =
reinterpret_cast<uint64_t *>(dp_column)[column_i];
int start_dp_e =
column_i < dp_column_end ?
dp_column_cell.e : GappedExtenderGpu::kInitScore;
const int *score_matrix_row = &score_matrix[s0_c * number_letters];
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("%2d:%2d", column_i, s0_c);
}
#endif
last_dp_e = 0;
ret = UpdateDpRow(seq_1_subsequence, sequence0_position,
sequence1_position, seq_1_subsequence_length, increment,
cutoff, score_matrix_row, gap_init, gap_extention, dp_row_h,
dp_row_f, start_dp_e, &max_score_sequence0_position,
&max_score_sequence1_position, &max_score, &last_dp_e);
if (start_drop_flag
& dp_row_h[last_s_dp_row_i]
== GappedExtenderGpu::kInitScore) {
dp_column_stored_i = 0;
sequence0_position_start += increment;
} else {
start_drop_flag = 0;
}
int temp_score =
column_i < dp_column_end ?
dp_column_cell.h : GappedExtenderGpu::kInitScore;
dp_column_cell.h =
ret ? GappedExtenderGpu::kInitScore : dp_row_h[last_s_dp_row_i];
dp_column_cell.e = ret ? GappedExtenderGpu::kInitScore : last_dp_e;
reinterpret_cast<uint64_t *>(dp_column)[dp_column_stored_i] =
reinterpret_cast<uint64_t *>(&dp_column_cell)[0];
++dp_column_stored_i;
last_updated_dp_column_stored_i =
ret ? last_updated_dp_column_stored_i : dp_column_stored_i;
dp_row_h[0] = temp_score;
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
for (int i = 0; i <= seq_1_subsequence_length; ++i) {
printf("%5d", dp_row_h[i * blockDim.x]);
}
printf(" (ret = %d, dp_column_h[column_i] = %d)", ret,
dp_column_h[column_i]);
printf("\n");
}
#endif
}
if (seq_1_subsequence_length < (kDpRowLength - 1)
|| 1 >= dp_column_stored_i) {
stop_flag = true;
break;
}
dp_column_end = last_updated_dp_column_stored_i + 1;
sequence1_position += seq_1_subsequence_length * increment;
}
next_bolock_position += next_bolock_chache_position;
while (!stop_flag) {
if (seq_1_subsequence_offset
>= packed_alphabet_code::kNumberOfCodesInBlock) {
code_block =
packed_alphabet_code::GetPackedAlphabetCode<TDirection>(
sequence1_code_block, block_sift, next_block_sift,
&next_bolock_position, &temp_code_block);
seq_1_subsequence_offset = 0;
}
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("code block %ld \n", code_block);
}
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("column %d:%2d\n", 0, dp_column_end);
}
#endif
int last_dp_e = 0;
GappedExtenderGpu::DpColumnCell dp_column_cell;
reinterpret_cast<uint64_t *>(&dp_column_cell)[0] =
reinterpret_cast<uint64_t *>(dp_column)[0];
uint32_t seq_1_subsequence_length = InitDpRow<TDirection>(code_block,
seq_1_subsequence_offset, sequence_delimiter, gap_init,
gap_extention, dp_column_cell.h, dp_column_cell.e,
max_score - cutoff, seq_1_subsequence, dp_row_h, dp_row_f,
&last_dp_e);
seq_1_subsequence_offset += seq_1_subsequence_length;
int last_s_dp_row_i = seq_1_subsequence_length * blockDim.x;
dp_column_cell.h = dp_row_h[last_s_dp_row_i];
dp_column_cell.e = last_dp_e;
reinterpret_cast<uint64_t *>(dp_column)[0] =
reinterpret_cast<uint64_t *>(&dp_column_cell)[0];
int start_drop_flag = 1;
int dp_column_stored_i = 1;
int last_updated_dp_column_stored_i = -1;
int ret = 0;
int sequence0_position = sequence0_position_start;
AlphabetCoder::Code s0_c;
for (int column_i = 1; column_i < dp_column_end | !ret;
++column_i, sequence0_position += increment) {
s0_c = sequence0[sequence0_position];
if (s0_c == sequence_delimiter) {
break;
}
reinterpret_cast<uint64_t *>(&dp_column_cell)[0] =
reinterpret_cast<uint64_t *>(dp_column)[column_i];
int start_dp_e =
column_i < dp_column_end ?
dp_column_cell.e : GappedExtenderGpu::kInitScore;
const int *score_matrix_row = &score_matrix[s0_c * number_letters];
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("%2d:%2d", column_i, s0_c);
}
#endif
last_dp_e = 0;
ret = UpdateDpRow(seq_1_subsequence, sequence0_position,
sequence1_position, seq_1_subsequence_length, increment,
cutoff, score_matrix_row, gap_init, gap_extention, dp_row_h,
dp_row_f, start_dp_e, &max_score_sequence0_position,
&max_score_sequence1_position, &max_score, &last_dp_e);
if (start_drop_flag
& dp_row_h[last_s_dp_row_i]
== GappedExtenderGpu::kInitScore) {
dp_column_stored_i = 0;
sequence0_position_start += increment;
} else {
start_drop_flag = 0;
}
int temp_score =
column_i < dp_column_end ?
dp_column_cell.h : GappedExtenderGpu::kInitScore;
dp_column_cell.h =
ret ? GappedExtenderGpu::kInitScore : dp_row_h[last_s_dp_row_i];
dp_column_cell.e = ret ? GappedExtenderGpu::kInitScore : last_dp_e;
reinterpret_cast<uint64_t *>(dp_column)[dp_column_stored_i] =
reinterpret_cast<uint64_t *>(&dp_column_cell)[0];
++dp_column_stored_i;
last_updated_dp_column_stored_i =
ret ? last_updated_dp_column_stored_i : dp_column_stored_i;
dp_row_h[0] = temp_score;
#if 0
if (blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
for (int i = 0; i <= seq_1_subsequence_length; ++i) {
printf("%5d", dp_row_h[i * blockDim.x]);
}
printf(" (ret = %d, dp_column_h[column_i] = %d)", ret,
dp_column_h[column_i]);
printf("\n");
}
#endif
}
if (seq_1_subsequence_length < (kDpRowLength - 1)
|| 1 >= dp_column_stored_i) {
stop_flag = true;
break;
}
dp_column_end = last_updated_dp_column_stored_i + 1;
sequence1_position += seq_1_subsequence_length * increment;
}
*best_score = max_score;
*best_sequence0_position = max_score_sequence0_position;
*best_sequence1_position = max_score_sequence1_position;
return 0;
}
__global__ void
__launch_bounds__(gapped_extender_gpu_kernel::kNumberThreads, 10) ExtendOneSideScoreOnlyKernel(
const packed_alphabet_code::PackedAlphabetCode* sequence0_code_block,
const packed_alphabet_code::PackedAlphabetCode* sequence1_code_block,
const uint32_t number_extensions, const bool reverse,
const AlphabetCoder::Code sequence_delimiter, const int* score_matrix,
const uint32_t number_letters, const int gap_open,
const int gap_extention, const int cutoff,
uint32_t* sequence0_positions, uint32_t* sequence1_positions,
int* best_scores) {
extern __shared__ char shared_mem[];
uint32_t* s_sequence_positions = (uint32_t *) shared_mem;
packed_alphabet_code::PackedAlphabetCode* s_group_loader =
(packed_alphabet_code::PackedAlphabetCode *) &s_sequence_positions[blockDim.x];
GroupLoader<const packed_alphabet_code::PackedAlphabetCode *, kLoadLength> group_loader;
int group_idx = group_loader.GetGroupId();
int idx_in_group = group_loader.GetIdInGroup();
int number_members = group_loader.GetNumberMembers();
int group_work_skip = group_loader.GetNumberGroups();
int number_group_in_block = blockDim.x / number_members;
int number_group_works = (number_extensions + number_members - 1)
/ number_members;
int remain_group_in_block = number_group_works % number_group_in_block;
int group_work_end = number_group_works
+ (remain_group_in_block != 0 ?
(number_group_in_block - remain_group_in_block) : 0);
uint32_t* s_group_sequence_positions = &s_sequence_positions[(threadIdx.x
/ number_members) * number_members];
for (int group_work_i = group_idx; group_work_i < group_work_end;
group_work_i += group_work_skip) {
const int group_extension_begin = group_work_i * number_members;
const int thread_work_id = group_extension_begin + idx_in_group;
const int extension_id =
thread_work_id < number_extensions ? thread_work_id : 0;
packed_alphabet_code::PackedAlphabetCode sequence_cache[kLoadLength];
const uint32_t sequence0_position = sequence0_positions[extension_id]
+ cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
const uint32_t sequence0_brock_position =
packed_alphabet_code::GetBlockPosition(sequence0_position)
+ (reverse ? -kLoadLength + 1 : 0);
s_group_sequence_positions[idx_in_group] = sequence0_brock_position;
//__syncthreads();
group_loader.Load(sequence0_code_block, s_group_sequence_positions,
min((int) number_extensions - group_extension_begin,
number_members), s_group_loader, sequence_cache);
#if 0
// debug /////////////////////////////////////
if (debug_db_p == sequence0_position
|| blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("block position : %d\n",
s_group_sequence_positions[idx_in_group]);
for (int i = 0; i < kLoadLength; ++i) {
printf("%d : %ld\n", i, sequence_cache[i]);
}
}
/////////////////////////////////////////////
#endif
AlphabetCoder::Code cached_sequence0_mem[GappedExtenderGpu::kMaxSequence0Length];
AlphabetCoder::Code *cached_sequence0 =
reverse ?
&cached_sequence0_mem[GappedExtenderGpu::kMaxSequence0Length
- 1] :
&cached_sequence0_mem[0];
if (thread_work_id < number_extensions) {
if (reverse) {
InitSequence<cuda_common::kReverse>(sequence_cache,
sequence0_code_block, sequence0_position,
sequence_delimiter, cached_sequence0);
} else {
InitSequence<cuda_common::kFoward>(sequence_cache,
sequence0_code_block, sequence0_position,
sequence_delimiter, cached_sequence0);
}
}
const uint32_t sequence1_position = sequence1_positions[extension_id]
+ cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
uint32_t sequence1_brock_position =
packed_alphabet_code::GetBlockPosition(sequence1_position)
+ (reverse ? -kLoadLength + 1 : 0);
s_group_sequence_positions[idx_in_group] = sequence1_brock_position;
//__syncthreads();
group_loader.Load(sequence1_code_block, s_group_sequence_positions,
min((int) number_extensions - group_extension_begin,
number_members), s_group_loader, sequence_cache);
#if 0
// debug /////////////////////////////////////
if (debug_db_p == sequence1_position
|| blockIdx.x * blockDim.x + threadIdx.x == debug_thread_id) {
printf("thread id : %d\n", blockIdx.x * blockDim.x + threadIdx.x);
printf("sequence1_position : %d\n", sequence1_position);
printf("block position : %d\n",
s_group_sequence_positions[idx_in_group]);
for (int i = 0; i < kLoadLength; ++i) {
printf("%d : %ld\n", i, sequence_cache[i]);
}
}
/////////////////////////////////////////////
#endif
__syncthreads();
if (thread_work_id < number_extensions) {
uint32_t best_sequence_0_p = 0;
uint32_t best_sequence_1_p = 0;
int best_score = 0;
if (reverse) {
ExtendOneSideScoreOnlyDevice<cuda_common::kReverse>(
cached_sequence0, sequence1_code_block,
sequence1_position, sequence_cache, reverse,
(uint32_t) sequence_delimiter, score_matrix,
number_letters, gap_open, gap_extention, cutoff,
shared_mem, &best_sequence_0_p, &best_sequence_1_p,
&best_score);
sequence0_positions[extension_id] = sequence0_position
+ best_sequence_0_p
- cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
sequence1_positions[extension_id] = sequence1_position
+ best_sequence_1_p
- cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
best_scores[extension_id] = best_score;
#if 0
// debug /////////////////////////////////////
if (debug_db_p == sequence1_position
|| blockIdx.x * blockDim.x + threadIdx.x
== debug_thread_id) {
printf("reverse gapped extend score : %d\n", best_score);
}
/////////////////////////////////////////////
#endif
} else {
ExtendOneSideScoreOnlyDevice<cuda_common::kFoward>(
cached_sequence0, sequence1_code_block,
sequence1_position, sequence_cache, reverse,
(uint32_t) sequence_delimiter, score_matrix,
number_letters, gap_open, gap_extention, cutoff,
shared_mem, &best_sequence_0_p, &best_sequence_1_p,
&best_score);
sequence0_positions[extension_id] = sequence0_position
+ best_sequence_0_p
- cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
sequence1_positions[extension_id] = sequence1_position
+ best_sequence_1_p
- cuda_common::kMaxLoadLength
* packed_alphabet_code::kNumberOfCodesInBlock;
best_scores[extension_id] = best_score;
#if 0
// debug /////////////////////////////////////
if (debug_db_p == sequence1_position
|| blockIdx.x * blockDim.x + threadIdx.x
== debug_thread_id) {
printf("forward gapped extend score : %d\n", best_score);
}
/////////////////////////////////////////////
#endif
}
}
__syncthreads();
}
return;
}
__global__ void ConvertToGappedExtensionSeeds(const uint32_t size, bool reverse,
const uint32_t *seed_positions,
uint32_t* ungapped_extension_sequence_positions,
uint32_t* gapped_extension_sequence_positions) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int skip = blockDim.x * gridDim.x;
for (int i = idx; i < size; i += skip) {
uint32_t seed_position = seed_positions[i];
uint32_t sequence_position =
ungapped_extension_sequence_positions[seed_position];
gapped_extension_sequence_positions[i] =
reverse ?
sequence_position:
sequence_position;
}
}
}
GappedExtenderGpu::GappedExtenderGpu() :
number_letters_(0), gap_open_(0), gap_extention_(0), cutoff_(0), sequence_delimiter_(
0), d_database_sequence_(NULL), d_concatenated_query_sequence_(
0), d_score_matrix_(0) {
}
GappedExtenderGpu::~GappedExtenderGpu() {
}
int GappedExtenderGpu::SetQueries(AlphabetCoder::Code sequence_delimiter,
const packed_alphabet_code::PackedAlphabetCode *d_concatenated_query_sequence) {
sequence_delimiter_ = sequence_delimiter;
d_concatenated_query_sequence_ = d_concatenated_query_sequence;
return 0;
}
int GappedExtenderGpu::SetDatabase(
const packed_alphabet_code::PackedAlphabetCode *d_database_sequence) {
d_database_sequence_ = d_database_sequence;
return 0;
}
int GappedExtenderGpu::SetScoreParameters(const int *d_score_matrix,
uint32_t number_letters, int gap_open, int gap_extention, int cutoff) {
number_letters_ = number_letters;
d_score_matrix_ = d_score_matrix;
gap_open_ = gap_open;
gap_extention_ = gap_extention;
cutoff_ = cutoff;
return 0;
}
int GappedExtenderGpu::ConvertToGappedExtensionSeedsAsync(size_t size,
bool reverse, uint32_t* seed_positions,
uint32_t* query_concatenated_positions, uint32_t* database_positions,
uint32_t* d_seed_positions, uint32_t* d_temp_array,
uint32_t* d_query_concatenated_positions,
uint32_t* d_database_positions, cudaStream_t &stream) const {
CUDA_CHECK_RETURN(
cudaMemcpyAsync(d_seed_positions, seed_positions,
sizeof(seed_positions[0]) * size, cudaMemcpyDefault,
stream));
CUDA_CHECK_RETURN(
cudaMemcpyAsync(d_temp_array, query_concatenated_positions,
sizeof(query_concatenated_positions[0]) * size,
cudaMemcpyDefault, stream));
gapped_extender_gpu_kernel::ConvertToGappedExtensionSeeds<<<128, 256, 0, stream>>>(
size, reverse, d_seed_positions, d_temp_array, d_query_concatenated_positions
);
CUDA_CHECK_RETURN(
cudaMemcpyAsync(d_temp_array, database_positions,
sizeof(database_positions[0]) * size, cudaMemcpyDefault,
stream));
gapped_extender_gpu_kernel::ConvertToGappedExtensionSeeds<<<128, 256, 0, stream>>>(
size, reverse, d_seed_positions, d_temp_array, d_database_positions
);
return 0;
}
int GappedExtenderGpu::ExtendOneSideScoreOnlyAsync(size_t size, bool reverse,
uint32_t* query_concatenated_positions, uint32_t* database_positions,
int* scores, uint32_t* d_query_concatenated_positions,
uint32_t* d_database_positions, int* d_scores,
cudaStream_t &stream) const {
/*
CUDA_CHECK_RETURN(
cudaMemcpyAsync(d_query_concatenated_positions,
query_concatenated_positions,
sizeof(query_concatenated_positions[0]) * size,
cudaMemcpyDefault, stream));
CUDA_CHECK_RETURN(
cudaMemcpyAsync(d_database_positions, database_positions,
sizeof(database_positions[0]) * size, cudaMemcpyDefault,
stream));
*/
size_t loader_shared_memory_size =
sizeof(uint32_t) * gapped_extender_gpu_kernel::kNumberThreads
+ GroupLoader<
const packed_alphabet_code::PackedAlphabetCode *,
gapped_extender_gpu_kernel::kLoadLength>::GetTotalSharedMemorySize(
gapped_extender_gpu_kernel::kNumberThreads);
size_t dp_row_shared_memory_size =
gapped_extender_gpu_kernel::kNumberThreads
* (gapped_extender_gpu_kernel::kDpRowLength * sizeof(int)
* 2);
size_t shared_memory_size = max(loader_shared_memory_size,
dp_row_shared_memory_size);
gapped_extender_gpu_kernel::ExtendOneSideScoreOnlyKernel<<<
gapped_extender_gpu_kernel::kNumberBlocks,
gapped_extender_gpu_kernel::kNumberThreads, shared_memory_size,
stream>>>(d_concatenated_query_sequence_, d_database_sequence_,
size, reverse, sequence_delimiter_, d_score_matrix_,
number_letters_ + 1, gap_open_, gap_extention_, cutoff_,
d_query_concatenated_positions, d_database_positions, d_scores);
CUDA_CHECK_RETURN(
cudaMemcpyAsync(query_concatenated_positions,
d_query_concatenated_positions,
sizeof(query_concatenated_positions[0]) * size,
cudaMemcpyDefault, stream));
CUDA_CHECK_RETURN(
cudaMemcpyAsync(database_positions, d_database_positions,
sizeof(database_positions[0]) * size, cudaMemcpyDefault,
stream));
CUDA_CHECK_RETURN(
cudaMemcpyAsync(scores, d_scores, sizeof(scores[0]) * size,
cudaMemcpyDefault, stream));
return 0;
}
|
82d13e73815c5b151aef656589bb1d807cef4146.hip | // !!! This is a file automatically generated by hipify!!!
/*
*See if we can get a GPU to throw hipErrorMemoryAllocation
*/
#include <iostream>
#include <cstdio>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool
abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n",
hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(void) {
float* ptr = NULL;
size_t size = pow(10, 9) * sizeof(float);
gpuErrchk( hipMalloc((void**)&ptr, size) );
printf("Successfully allocated %zu bytes.\n", size);
hipFree(ptr);
/*Matrix mat = AllocateMatrix(n, k, 1);*/
/*printMatrix(mat);*/
/*FreeMatrix(&mat);*/
/*cv::Mat image = cv::imread( "outputImages/result.jpg", 1 );*/
/*printf("size = (%i, %i)\n", image.rows, image.cols);*/
return 0;
}
| 82d13e73815c5b151aef656589bb1d807cef4146.cu | /*
*See if we can get a GPU to throw cudaErrorMemoryAllocation
*/
#include <iostream>
#include <cstdio>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool
abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(void) {
float* ptr = NULL;
size_t size = pow(10, 9) * sizeof(float);
gpuErrchk( cudaMalloc((void**)&ptr, size) );
printf("Successfully allocated %zu bytes.\n", size);
cudaFree(ptr);
/*Matrix mat = AllocateMatrix(n, k, 1);*/
/*printMatrix(mat);*/
/*FreeMatrix(&mat);*/
/*cv::Mat image = cv::imread( "outputImages/result.jpg", 1 );*/
/*printf("size = (%i, %i)\n", image.rows, image.cols);*/
return 0;
}
|
1d6ade0428b5edeb44bee91f80eafb68a58650aa.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "bboxUtils.h"
#include "hip/hip_runtime_api.h"
#include "gatherNMSOutputs.h"
#include "kernel.h"
#include "nmsUtils.h"
pluginStatus_t nmsInference(hipStream_t stream, const int N, const int perBatchBoxesSize, const int perBatchScoresSize,
const bool shareLocation, const int backgroundLabelId, const int numPredsPerClass, const int numClasses,
const int topK, const int keepTopK, const float scoreThreshold, const float iouThreshold, const DataType DT_BBOX,
const void* locData, const DataType DT_SCORE, const void* confData, void* keepCount, void* nmsedBoxes,
void* nmsedScores, void* nmsedClasses, void* workspace, bool isNormalized, bool confSigmoid, bool clipBoxes)
{
// locCount = batch_size * number_boxes_per_sample * 4
const int locCount = N * perBatchBoxesSize;
/*
* shareLocation
* Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class.
* Otherwise
* Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or
* not (binary classification).
*/
const int numLocClasses = shareLocation ? 1 : numClasses;
size_t bboxDataSize = detectionForwardBBoxDataSize(N, perBatchBoxesSize, DT_BBOX);
void* bboxDataRaw = workspace;
hipMemcpyAsync(bboxDataRaw, locData, bboxDataSize, hipMemcpyDeviceToDevice, stream);
pluginStatus_t status;
/*
* bboxDataRaw format:
* [batch size, numPriors (per sample), numLocClasses, 4]
*/
// float for now
void* bboxData;
size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, perBatchBoxesSize, DT_BBOX);
void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize);
/*
* After permutation, bboxData format:
* [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4]
* This is equivalent to swapping axis
*/
if (!shareLocation)
{
status = permuteData(
stream, locCount, numLocClasses, numPredsPerClass, 4, DT_BBOX, false, bboxDataRaw, bboxPermute);
ASSERT_FAILURE(status == STATUS_SUCCESS);
bboxData = bboxPermute;
}
/*
* If shareLocation, numLocClasses = 1
* No need to permute data on linear memory
*/
else
{
bboxData = bboxDataRaw;
}
/*
* Conf data format
* [batch size, numPriors * param.numClasses, 1, 1]
*/
const int numScores = N * perBatchScoresSize;
size_t totalScoresSize = detectionForwardPreNMSSize(N, perBatchScoresSize);
if(DT_SCORE == DataType::kHALF) totalScoresSize /= 2; // detectionForwardPreNMSSize is implemented in terms of kFLOAT
void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize);
// need a conf_scores
/*
* After permutation, bboxData format:
* [batch_size, numClasses, numPredsPerClass, 1]
*/
status = permuteData(
stream, numScores, numClasses, numPredsPerClass, 1, DT_BBOX, confSigmoid, confData, scores);
ASSERT_FAILURE(status == STATUS_SUCCESS);
size_t indicesSize = detectionForwardPreNMSSize(N, perBatchScoresSize);
void* indices = nextWorkspacePtr((int8_t*) scores, totalScoresSize);
size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK);
if(DT_SCORE == DataType::kHALF) postNMSScoresSize /= 2; // detectionForwardPostNMSSize is implemented in terms of kFLOAT
size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK); // indices are full int32
void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize);
void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize);
void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize);
// Sort the scores so that the following NMS could be applied.
status = sortScoresPerClass(stream, N, numClasses, numPredsPerClass, backgroundLabelId, scoreThreshold,
DT_SCORE, scores, indices, sortingWorkspace);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// This is set to true as the input bounding boxes are of the format [ymin,
// xmin, ymax, xmax]. The default implementation assumes [xmin, ymin, xmax, ymax]
bool flipXY = true;
// NMS
status = allClassNMS(stream, N, numClasses, numPredsPerClass, topK, iouThreshold, shareLocation, isNormalized,
DT_SCORE, DT_BBOX, bboxData, scores, indices, postNMSScores, postNMSIndices, flipXY);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Sort the bounding boxes after NMS using scores
status = sortScoresPerImage(stream, N, numClasses * topK, DT_SCORE, postNMSScores, postNMSIndices, scores,
indices, sortingWorkspace);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Gather data from the sorted bounding boxes after NMS
status = gatherNMSOutputs(stream, shareLocation, N, numPredsPerClass, numClasses, topK, keepTopK, DT_BBOX,
DT_SCORE, indices, scores, bboxData, keepCount, nmsedBoxes, nmsedScores, nmsedClasses, clipBoxes);
ASSERT_FAILURE(status == STATUS_SUCCESS);
return STATUS_SUCCESS;
}
| 1d6ade0428b5edeb44bee91f80eafb68a58650aa.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "bboxUtils.h"
#include "cuda_runtime_api.h"
#include "gatherNMSOutputs.h"
#include "kernel.h"
#include "nmsUtils.h"
pluginStatus_t nmsInference(cudaStream_t stream, const int N, const int perBatchBoxesSize, const int perBatchScoresSize,
const bool shareLocation, const int backgroundLabelId, const int numPredsPerClass, const int numClasses,
const int topK, const int keepTopK, const float scoreThreshold, const float iouThreshold, const DataType DT_BBOX,
const void* locData, const DataType DT_SCORE, const void* confData, void* keepCount, void* nmsedBoxes,
void* nmsedScores, void* nmsedClasses, void* workspace, bool isNormalized, bool confSigmoid, bool clipBoxes)
{
// locCount = batch_size * number_boxes_per_sample * 4
const int locCount = N * perBatchBoxesSize;
/*
* shareLocation
* Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class.
* Otherwise
* Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or
* not (binary classification).
*/
const int numLocClasses = shareLocation ? 1 : numClasses;
size_t bboxDataSize = detectionForwardBBoxDataSize(N, perBatchBoxesSize, DT_BBOX);
void* bboxDataRaw = workspace;
cudaMemcpyAsync(bboxDataRaw, locData, bboxDataSize, cudaMemcpyDeviceToDevice, stream);
pluginStatus_t status;
/*
* bboxDataRaw format:
* [batch size, numPriors (per sample), numLocClasses, 4]
*/
// float for now
void* bboxData;
size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, perBatchBoxesSize, DT_BBOX);
void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize);
/*
* After permutation, bboxData format:
* [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4]
* This is equivalent to swapping axis
*/
if (!shareLocation)
{
status = permuteData(
stream, locCount, numLocClasses, numPredsPerClass, 4, DT_BBOX, false, bboxDataRaw, bboxPermute);
ASSERT_FAILURE(status == STATUS_SUCCESS);
bboxData = bboxPermute;
}
/*
* If shareLocation, numLocClasses = 1
* No need to permute data on linear memory
*/
else
{
bboxData = bboxDataRaw;
}
/*
* Conf data format
* [batch size, numPriors * param.numClasses, 1, 1]
*/
const int numScores = N * perBatchScoresSize;
size_t totalScoresSize = detectionForwardPreNMSSize(N, perBatchScoresSize);
if(DT_SCORE == DataType::kHALF) totalScoresSize /= 2; // detectionForwardPreNMSSize is implemented in terms of kFLOAT
void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize);
// need a conf_scores
/*
* After permutation, bboxData format:
* [batch_size, numClasses, numPredsPerClass, 1]
*/
status = permuteData(
stream, numScores, numClasses, numPredsPerClass, 1, DT_BBOX, confSigmoid, confData, scores);
ASSERT_FAILURE(status == STATUS_SUCCESS);
size_t indicesSize = detectionForwardPreNMSSize(N, perBatchScoresSize);
void* indices = nextWorkspacePtr((int8_t*) scores, totalScoresSize);
size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK);
if(DT_SCORE == DataType::kHALF) postNMSScoresSize /= 2; // detectionForwardPostNMSSize is implemented in terms of kFLOAT
size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK); // indices are full int32
void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize);
void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize);
void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize);
// Sort the scores so that the following NMS could be applied.
status = sortScoresPerClass(stream, N, numClasses, numPredsPerClass, backgroundLabelId, scoreThreshold,
DT_SCORE, scores, indices, sortingWorkspace);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// This is set to true as the input bounding boxes are of the format [ymin,
// xmin, ymax, xmax]. The default implementation assumes [xmin, ymin, xmax, ymax]
bool flipXY = true;
// NMS
status = allClassNMS(stream, N, numClasses, numPredsPerClass, topK, iouThreshold, shareLocation, isNormalized,
DT_SCORE, DT_BBOX, bboxData, scores, indices, postNMSScores, postNMSIndices, flipXY);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Sort the bounding boxes after NMS using scores
status = sortScoresPerImage(stream, N, numClasses * topK, DT_SCORE, postNMSScores, postNMSIndices, scores,
indices, sortingWorkspace);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Gather data from the sorted bounding boxes after NMS
status = gatherNMSOutputs(stream, shareLocation, N, numPredsPerClass, numClasses, topK, keepTopK, DT_BBOX,
DT_SCORE, indices, scores, bboxData, keepCount, nmsedBoxes, nmsedScores, nmsedClasses, clipBoxes);
ASSERT_FAILURE(status == STATUS_SUCCESS);
return STATUS_SUCCESS;
}
|
a4f6c1ff0dbb0fc16286f6fc53bfc80b6c1d82b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bits/mexutils.h"
#include "bits/datamex.hpp"
#if ENABLE_GPU
#include "bits/datacu.hpp"
#endif
#include <math_constants.h>
#undef printf
#include <stdio.h>
/* option codes */
enum {
opt_stride = 0,
opt_pad,
opt_pool_switches,
opt_verbose,
} ;
/* options */
vlmxOption options [] = {
{"Stride", 1, opt_stride },
{"Pad", 1, opt_pad },
{"PoolSwitches", 1, opt_pool_switches },
{"Verbose", 0, opt_verbose },
{0, 0, 0 }
} ;
/* ---------------------------------------------------------------- */
/* Context */
/* ---------------------------------------------------------------- */
vl::MexContext context ;
/*
Resetting the context here resolves a crash when MATLAB quits and
the ~Context function is implicitly called on unloading the MEX file.
*/
void atExit()
{
context.clear() ;
}
/* ---------------------------------------------------------------- */
/* MEX driver */
/* ---------------------------------------------------------------- */
enum {
IN_DATA = 0, IN_SIZE, IN_DEROUTPUT, IN_END
} ;
enum {
OUT_RESULT = 0, OUT_POOL_SWITCHES, OUT_END
} ;
/* ---------------------------------------------------------------- */
/* pooling_max_switches_dm_kernel */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
pooling_max_switches_dm_kernel
(T* pooled,
uint8_t* poolSwitches,
const T* data,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
data += pz * (width*height) ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = x1 + poolWidth ;
int y2 = y1 + poolHeight ;
//int x2 = min(x1 + poolWidth, width) ;
//int y2 = min(y1 + poolHeight, height) ;
//x1 = max(x1, 0) ;
//y1 = max(y1, 0) ;
T bestValue = (T)(-CUDART_INF_F) ;
uint8_t switchLocation = 1 ;
int loc = 1 ;
int bx=-1;
int by=-1;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
if(x >= 0 && y >= 0 && x < width && y < height
&& bestValue < data[y * width + x]) {
bestValue = data[y * width + x] ;
switchLocation = loc ;
bx = x; by = y;
}
loc += 1 ;
}
}
//if (by*width+bx +pz *(width*height) == 1234) {
// printf("index %d data[1234] best %f loc %d\n", pooledIndex, bestValue, (int) switchLocation);
//}
pooled[pooledIndex] = bestValue ;
poolSwitches[pooledIndex] = switchLocation;
}
}
/* ---------------------------------------------------------------- */
/* pooling_max_backward_switches_dm_kernel */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
pooling_max_backward_switches_dm_kernel
(T* derData,
const uint8_t* poolSwitches,
const T* derPooled,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T gradient = 0 ;
derPooled += z * pooledHeight * pooledWidth;
poolSwitches += z * pooledHeight * pooledWidth;
for (int py = py1; py <= py2; ++py) {
for (int px = px1; px <= px2; ++px) {
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int loc = poolSwitches[py * pooledWidth + px] - 1 ;
int lx = loc % poolWidth ;
int ly = loc / poolWidth ;
//if (index == 1234) {
// printf("index %d loc %d lx %d ly %d x1 %d y1 %d x_data %d y_data %d isloc %d\n", index, loc+1, lx, ly, x1, y1, x_data, y_data, x_data == (x1 + lx) && y_data == (y1 +ly));
// printf("py1 %d py2 %d px1 %d px2 %d\n", py1,py2,px1,px2);
//}
if(x_data == (x1 + lx) && y_data == (y1 +ly)) {
gradient += derPooled[py * pooledWidth + px] ;
}
}
}
derData[index] = gradient;
}
}
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
int poolWidth ;
int poolHeight ;
int strideX = 1 ;
int strideY = 1 ;
int padLeft = 0 ;
int padRight = 0 ;
int padTop = 0 ;
int padBottom = 0 ;
bool backMode = false ;
mxArray const *poolSwitchesIn = NULL ;
int verbosity = 0 ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 2) {
mexErrMsgTxt("The arguments are less than two.") ;
}
if (nin > 2 && vlmxIsString(in[2],-1)) {
next = 2 ;
backMode = 0 ;
} else {
backMode = (nin >= 3) ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_stride :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("STRIDE is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = strideY ;
break ;
case 2:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = (int)mxGetPr(optarg)[1] ;
break ;
default:
mexErrMsgTxt("STRIDE has neither one nor two elements.") ;
}
break ;
case opt_pad :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("PAD is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
padLeft = (int)mxGetPr(optarg)[0] ;
padRight = padLeft ;
padTop = padLeft ;
padBottom = padLeft ;
break ;
case 4:
padTop = (int)mxGetPr(optarg)[0] ;
padBottom = (int)mxGetPr(optarg)[1] ;
padLeft = (int)mxGetPr(optarg)[2] ;
padRight = (int)mxGetPr(optarg)[3] ;
break ;
default:
mexErrMsgTxt("PAD has neither one nor four elements.") ;
}
break;
case opt_pool_switches :
poolSwitchesIn = optarg ;
break ;
default:
break ;
}
}
vl::MexTensor data(context) ;
vl::MexTensor derOutput(context) ;
data.init(in[IN_DATA]) ;
data.reshape(4) ; // -> 4 dimensions
if (backMode) {
derOutput.init(in[IN_DEROUTPUT]) ;
derOutput.reshape(4) ; // -> 4 dimensions
}
//if (backMode && ! vl::areCompatible(data, derOutput)) {
//mexErrMsgTxt("DATA and DEROUTPUT do not have compatible formats.") ;
//}
if (backMode && poolSwitchesIn == NULL) {
mexErrMsgTxt("Backward requires PoolSwitches") ;
}
if (!vlmxIsPlainMatrix(in[IN_SIZE],-1,-1)) {
mexErrMsgTxt("SIZE is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(in[IN_SIZE])) {
case 1:
poolHeight = mxGetPr(in[IN_SIZE])[0] ;
poolWidth = poolHeight ;
break ;
case 2:
poolHeight = mxGetPr(in[IN_SIZE])[0] ;
poolWidth = mxGetPr(in[IN_SIZE])[1] ;
break ;
default:
mexErrMsgTxt("SIZE has neither one nor two elements.") ;
}
/* Basic compatibility of Shape */
if (strideX < 1 || strideY < 1) {
mexErrMsgTxt("At least one element of STRIDE is smaller than one.") ;
}
if (poolHeight == 0 || poolWidth == 0) {
mexErrMsgTxt("A dimension of the pooling SIZE is void.") ;
}
if (data.getHeight() + (padTop+padBottom) < poolHeight ||
data.getWidth() + (padLeft+padRight) < poolWidth) {
mexErrMsgTxt("The pooling window is larger than the DATA (including padding).") ;
}
if (padLeft < 0 ||
padRight < 0 ||
padTop < 0 ||
padBottom < 0) {
mexErrMsgTxt("An element of PAD is negative.") ;
}
if (padLeft >= poolWidth ||
padRight >= poolWidth ||
padTop >= poolHeight ||
padBottom >= poolHeight) {
mexErrMsgTxt("A padding value is larger or equal to the size of the pooling window.") ;
}
/* Get the output Shape */
vl::TensorShape outputShape((data.getHeight() + (padTop+padBottom) - poolHeight)/strideY + 1,
(data.getWidth() + (padLeft+padRight) - poolWidth)/strideX + 1,
data.getDepth(),
data.getSize()) ;
if (backMode && (derOutput != outputShape)) {
mexErrMsgTxt("DEROUTPUT dimensions are incompatible with X and POOL.") ;
}
/* Create output buffers */
vl::Device deviceType ;
if (backMode) {
// data can be CPU since its memory is not used
deviceType = derOutput.getDeviceType() ;
} else {
deviceType = data.getDeviceType() ;
}
vl::Type dataType = data.getDataType() ;
vl::MexTensor output(context) ;
vl::MexTensor poolSwitches(context) ;
vl::MexTensor derData(context) ;
if (deviceType != vl::GPU) {
mexErrMsgTxt("Only GPU supported") ;
}
if (poolSwitchesIn != NULL) {
poolSwitches.init(poolSwitchesIn) ;
if (poolSwitches.getDeviceType() != deviceType) {
mexErrMsgTxt("PoolSwitches and data have different device type") ;
}
}
if (!backMode) {
output.initWithZeros(deviceType, dataType, outputShape) ;
poolSwitches.initWithZeros(deviceType, vl::vlTypeUInt8, outputShape) ;
} else {
derData.initWithZeros(deviceType, dataType, data.getShape()) ;
}
// Dispatch
int height = data.getHeight() ;
int width = data.getWidth() ;
int depth = data.getDepth() * data.getSize() ;
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int pooledVolume = pooledWidth * pooledHeight * depth ;
int dataVolume = width * height * depth ;
if (!backMode) {
if (dataType == vl::vlTypeFloat) {
hipLaunchKernelGGL(( pooling_max_switches_dm_kernel<float>)
, dim3(vl::divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
(float*) output.getMemory(), (uint8_t*) poolSwitches.getMemory(),
(float const*) data.getMemory(),
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
} else if (dataType == vl::vlTypeDouble) {
#ifdef ENABLE_DOUBLE
hipLaunchKernelGGL(( pooling_max_switches_dm_kernel<double>)
, dim3(vl::divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
(double*) output.getMemory(), (uint8_t*) poolSwitches.getMemory(),
(double const*) data.getMemory(),
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
#endif
}
} else {
// Backward
if (dataType == vl::vlTypeFloat) {
hipLaunchKernelGGL(( pooling_max_backward_switches_dm_kernel<float>)
, dim3(vl::divideUpwards(dataVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
(float*) derData.getMemory(), (uint8_t const*) poolSwitches.getMemory(),
(float*) derOutput.getMemory(),
dataVolume,
pooledHeight, pooledWidth,
height, width, dataVolume,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
} else if (dataType == vl::vlTypeDouble) {
#ifdef ENABLE_DOUBLE
hipLaunchKernelGGL(( pooling_max_backward_switches_dm_kernel<double>)
, dim3(vl::divideUpwards(dataVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
(double*) derData.getMemory(), (uint8_t const*) poolSwitches.getMemory(),
(double*) derOutput.getMemory(),
dataVolume,
pooledHeight, pooledWidth,
height, width, dataVolume,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
#endif
}
}
hipError_t status = hipPeekAtLastError() ;
if (status != hipSuccess) {
mexErrMsgTxt(context.getLastErrorMessage().c_str()) ;
}
if (backMode) {
out[OUT_RESULT] = derData.relinquish() ;
} else {
out[OUT_RESULT] = output.relinquish() ;
if (nout > 1) {
out[OUT_POOL_SWITCHES] = poolSwitches.relinquish() ;
}
}
}
| a4f6c1ff0dbb0fc16286f6fc53bfc80b6c1d82b8.cu | #include "bits/mexutils.h"
#include "bits/datamex.hpp"
#if ENABLE_GPU
#include "bits/datacu.hpp"
#endif
#include <math_constants.h>
#undef printf
#include <stdio.h>
/* option codes */
enum {
opt_stride = 0,
opt_pad,
opt_pool_switches,
opt_verbose,
} ;
/* options */
vlmxOption options [] = {
{"Stride", 1, opt_stride },
{"Pad", 1, opt_pad },
{"PoolSwitches", 1, opt_pool_switches },
{"Verbose", 0, opt_verbose },
{0, 0, 0 }
} ;
/* ---------------------------------------------------------------- */
/* Context */
/* ---------------------------------------------------------------- */
vl::MexContext context ;
/*
Resetting the context here resolves a crash when MATLAB quits and
the ~Context function is implicitly called on unloading the MEX file.
*/
void atExit()
{
context.clear() ;
}
/* ---------------------------------------------------------------- */
/* MEX driver */
/* ---------------------------------------------------------------- */
enum {
IN_DATA = 0, IN_SIZE, IN_DEROUTPUT, IN_END
} ;
enum {
OUT_RESULT = 0, OUT_POOL_SWITCHES, OUT_END
} ;
/* ---------------------------------------------------------------- */
/* pooling_max_switches_dm_kernel */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
pooling_max_switches_dm_kernel
(T* pooled,
uint8_t* poolSwitches,
const T* data,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
data += pz * (width*height) ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = x1 + poolWidth ;
int y2 = y1 + poolHeight ;
//int x2 = min(x1 + poolWidth, width) ;
//int y2 = min(y1 + poolHeight, height) ;
//x1 = max(x1, 0) ;
//y1 = max(y1, 0) ;
T bestValue = (T)(-CUDART_INF_F) ;
uint8_t switchLocation = 1 ;
int loc = 1 ;
int bx=-1;
int by=-1;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
if(x >= 0 && y >= 0 && x < width && y < height
&& bestValue < data[y * width + x]) {
bestValue = data[y * width + x] ;
switchLocation = loc ;
bx = x; by = y;
}
loc += 1 ;
}
}
//if (by*width+bx +pz *(width*height) == 1234) {
// printf("index %d data[1234] best %f loc %d\n", pooledIndex, bestValue, (int) switchLocation);
//}
pooled[pooledIndex] = bestValue ;
poolSwitches[pooledIndex] = switchLocation;
}
}
/* ---------------------------------------------------------------- */
/* pooling_max_backward_switches_dm_kernel */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
pooling_max_backward_switches_dm_kernel
(T* derData,
const uint8_t* poolSwitches,
const T* derPooled,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T gradient = 0 ;
derPooled += z * pooledHeight * pooledWidth;
poolSwitches += z * pooledHeight * pooledWidth;
for (int py = py1; py <= py2; ++py) {
for (int px = px1; px <= px2; ++px) {
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int loc = poolSwitches[py * pooledWidth + px] - 1 ;
int lx = loc % poolWidth ;
int ly = loc / poolWidth ;
//if (index == 1234) {
// printf("index %d loc %d lx %d ly %d x1 %d y1 %d x_data %d y_data %d isloc %d\n", index, loc+1, lx, ly, x1, y1, x_data, y_data, x_data == (x1 + lx) && y_data == (y1 +ly));
// printf("py1 %d py2 %d px1 %d px2 %d\n", py1,py2,px1,px2);
//}
if(x_data == (x1 + lx) && y_data == (y1 +ly)) {
gradient += derPooled[py * pooledWidth + px] ;
}
}
}
derData[index] = gradient;
}
}
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
int poolWidth ;
int poolHeight ;
int strideX = 1 ;
int strideY = 1 ;
int padLeft = 0 ;
int padRight = 0 ;
int padTop = 0 ;
int padBottom = 0 ;
bool backMode = false ;
mxArray const *poolSwitchesIn = NULL ;
int verbosity = 0 ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 2) {
mexErrMsgTxt("The arguments are less than two.") ;
}
if (nin > 2 && vlmxIsString(in[2],-1)) {
next = 2 ;
backMode = 0 ;
} else {
backMode = (nin >= 3) ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_stride :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("STRIDE is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = strideY ;
break ;
case 2:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = (int)mxGetPr(optarg)[1] ;
break ;
default:
mexErrMsgTxt("STRIDE has neither one nor two elements.") ;
}
break ;
case opt_pad :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("PAD is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
padLeft = (int)mxGetPr(optarg)[0] ;
padRight = padLeft ;
padTop = padLeft ;
padBottom = padLeft ;
break ;
case 4:
padTop = (int)mxGetPr(optarg)[0] ;
padBottom = (int)mxGetPr(optarg)[1] ;
padLeft = (int)mxGetPr(optarg)[2] ;
padRight = (int)mxGetPr(optarg)[3] ;
break ;
default:
mexErrMsgTxt("PAD has neither one nor four elements.") ;
}
break;
case opt_pool_switches :
poolSwitchesIn = optarg ;
break ;
default:
break ;
}
}
vl::MexTensor data(context) ;
vl::MexTensor derOutput(context) ;
data.init(in[IN_DATA]) ;
data.reshape(4) ; // -> 4 dimensions
if (backMode) {
derOutput.init(in[IN_DEROUTPUT]) ;
derOutput.reshape(4) ; // -> 4 dimensions
}
//if (backMode && ! vl::areCompatible(data, derOutput)) {
//mexErrMsgTxt("DATA and DEROUTPUT do not have compatible formats.") ;
//}
if (backMode && poolSwitchesIn == NULL) {
mexErrMsgTxt("Backward requires PoolSwitches") ;
}
if (!vlmxIsPlainMatrix(in[IN_SIZE],-1,-1)) {
mexErrMsgTxt("SIZE is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(in[IN_SIZE])) {
case 1:
poolHeight = mxGetPr(in[IN_SIZE])[0] ;
poolWidth = poolHeight ;
break ;
case 2:
poolHeight = mxGetPr(in[IN_SIZE])[0] ;
poolWidth = mxGetPr(in[IN_SIZE])[1] ;
break ;
default:
mexErrMsgTxt("SIZE has neither one nor two elements.") ;
}
/* Basic compatibility of Shape */
if (strideX < 1 || strideY < 1) {
mexErrMsgTxt("At least one element of STRIDE is smaller than one.") ;
}
if (poolHeight == 0 || poolWidth == 0) {
mexErrMsgTxt("A dimension of the pooling SIZE is void.") ;
}
if (data.getHeight() + (padTop+padBottom) < poolHeight ||
data.getWidth() + (padLeft+padRight) < poolWidth) {
mexErrMsgTxt("The pooling window is larger than the DATA (including padding).") ;
}
if (padLeft < 0 ||
padRight < 0 ||
padTop < 0 ||
padBottom < 0) {
mexErrMsgTxt("An element of PAD is negative.") ;
}
if (padLeft >= poolWidth ||
padRight >= poolWidth ||
padTop >= poolHeight ||
padBottom >= poolHeight) {
mexErrMsgTxt("A padding value is larger or equal to the size of the pooling window.") ;
}
/* Get the output Shape */
vl::TensorShape outputShape((data.getHeight() + (padTop+padBottom) - poolHeight)/strideY + 1,
(data.getWidth() + (padLeft+padRight) - poolWidth)/strideX + 1,
data.getDepth(),
data.getSize()) ;
if (backMode && (derOutput != outputShape)) {
mexErrMsgTxt("DEROUTPUT dimensions are incompatible with X and POOL.") ;
}
/* Create output buffers */
vl::Device deviceType ;
if (backMode) {
// data can be CPU since its memory is not used
deviceType = derOutput.getDeviceType() ;
} else {
deviceType = data.getDeviceType() ;
}
vl::Type dataType = data.getDataType() ;
vl::MexTensor output(context) ;
vl::MexTensor poolSwitches(context) ;
vl::MexTensor derData(context) ;
if (deviceType != vl::GPU) {
mexErrMsgTxt("Only GPU supported") ;
}
if (poolSwitchesIn != NULL) {
poolSwitches.init(poolSwitchesIn) ;
if (poolSwitches.getDeviceType() != deviceType) {
mexErrMsgTxt("PoolSwitches and data have different device type") ;
}
}
if (!backMode) {
output.initWithZeros(deviceType, dataType, outputShape) ;
poolSwitches.initWithZeros(deviceType, vl::vlTypeUInt8, outputShape) ;
} else {
derData.initWithZeros(deviceType, dataType, data.getShape()) ;
}
// Dispatch
int height = data.getHeight() ;
int width = data.getWidth() ;
int depth = data.getDepth() * data.getSize() ;
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int pooledVolume = pooledWidth * pooledHeight * depth ;
int dataVolume = width * height * depth ;
if (!backMode) {
if (dataType == vl::vlTypeFloat) {
pooling_max_switches_dm_kernel<float>
<<< vl::divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
((float*) output.getMemory(), (uint8_t*) poolSwitches.getMemory(),
(float const*) data.getMemory(),
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
} else if (dataType == vl::vlTypeDouble) {
#ifdef ENABLE_DOUBLE
pooling_max_switches_dm_kernel<double>
<<< vl::divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
((double*) output.getMemory(), (uint8_t*) poolSwitches.getMemory(),
(double const*) data.getMemory(),
pooledHeight, pooledWidth, pooledVolume,
height, width,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
#endif
}
} else {
// Backward
if (dataType == vl::vlTypeFloat) {
pooling_max_backward_switches_dm_kernel<float>
<<< vl::divideUpwards(dataVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
((float*) derData.getMemory(), (uint8_t const*) poolSwitches.getMemory(),
(float*) derOutput.getMemory(),
dataVolume,
pooledHeight, pooledWidth,
height, width, dataVolume,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
} else if (dataType == vl::vlTypeDouble) {
#ifdef ENABLE_DOUBLE
pooling_max_backward_switches_dm_kernel<double>
<<< vl::divideUpwards(dataVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
((double*) derData.getMemory(), (uint8_t const*) poolSwitches.getMemory(),
(double*) derOutput.getMemory(),
dataVolume,
pooledHeight, pooledWidth,
height, width, dataVolume,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
#endif
}
}
cudaError_t status = cudaPeekAtLastError() ;
if (status != cudaSuccess) {
mexErrMsgTxt(context.getLastErrorMessage().c_str()) ;
}
if (backMode) {
out[OUT_RESULT] = derData.relinquish() ;
} else {
out[OUT_RESULT] = output.relinquish() ;
if (nout > 1) {
out[OUT_POOL_SWITCHES] = poolSwitches.relinquish() ;
}
}
}
|
9c48d06a508189d49dd3e6518ed319a23b76b0e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/highgui/highgui.hpp>
#include <cstdio>
#include <cassert>
#include "highgui.h"
#include "gpu_util.h"
gcube load_gcube(const std::string &image_name) {
gcube G;
G.load(image_name);
return G;
}
void save_gcube(const std::string &image_name, gcube &image) {
image.save(image_name);
}
void print_gcube(gcube &image) {
float *mem = new float[image.n_elem];
checkCudaErrors(hipMemcpy(mem, image.d_pixels, sizeof(float) * image.n_elem, hipMemcpyDeviceToHost));
for (size_t k = 0; k < image.n_slices; k++) {
printf("slice %zu\n", k);
for (size_t i = 0; i < image.n_rows; i++) {
for (size_t j = 0; j < image.n_cols; j++) {
printf("%f, ", mem[IJK2C(i, j, k, image.n_rows, image.n_cols)]);
}
printf("\n");
}
printf("\n");
}
delete mem;
}
void disp_gcube(const std::string &window_name, gcube &image) {
cv::namedWindow(window_name);
cv::Mat I = image.cv_img();
cv::imshow(window_name, I);
}
void disp_wait(void) {
cv::waitKey(0);
}
int disp_keyPressed(void) {
return cv::waitKey(30);
}
__global__ void GPU_rgb2gray(float *G, float *F, int n_rows, int n_cols) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= n_rows || j >= n_cols) {
return;
}
float red = F[IJK2C(i, j, 0, n_rows, n_cols)];
float green = F[IJK2C(i, j, 1, n_rows, n_cols)];
float blue = F[IJK2C(i, j, 2, n_rows, n_cols)];
G[IJ2C(i, j, n_rows)] = red * 0.3f + green * 0.6f + blue * 0.1f;
}
gcube gpu_rgb2gray(const gcube &image) {
assert(image.n_slices == 3);
gcube G(image.n_rows, image.n_cols, 1);
dim3 gridSize((image.n_rows-1)/16+1, (image.n_cols-1)/16+1, 1);
dim3 blockSize(16, 16, 1);
hipLaunchKernelGGL(( GPU_rgb2gray), dim3(gridSize), dim3(blockSize), 0, 0,
G.d_pixels, image.d_pixels,
image.n_rows, image.n_cols);
checkCudaErrors(hipGetLastError());
return G;
}
gcube gpu_gray2rgb(const gcube &image) {
assert(image.n_slices == 1);
gcube G(image.n_rows, image.n_cols, 3);
checkCudaErrors(hipMemcpy(&G.d_pixels[IJK2C(0, 0, 0, G.n_rows, G.n_cols)],
image.d_pixels, sizeof(float) * image.n_elem, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(&G.d_pixels[IJK2C(0, 0, 1, G.n_rows, G.n_cols)],
image.d_pixels, sizeof(float) * image.n_elem, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(&G.d_pixels[IJK2C(0, 0, 2, G.n_rows, G.n_cols)],
image.d_pixels, sizeof(float) * image.n_elem, hipMemcpyDeviceToDevice));
return G;
}
| 9c48d06a508189d49dd3e6518ed319a23b76b0e8.cu | #include <opencv2/highgui/highgui.hpp>
#include <cstdio>
#include <cassert>
#include "highgui.h"
#include "gpu_util.h"
gcube load_gcube(const std::string &image_name) {
gcube G;
G.load(image_name);
return G;
}
void save_gcube(const std::string &image_name, gcube &image) {
image.save(image_name);
}
void print_gcube(gcube &image) {
float *mem = new float[image.n_elem];
checkCudaErrors(cudaMemcpy(mem, image.d_pixels, sizeof(float) * image.n_elem, cudaMemcpyDeviceToHost));
for (size_t k = 0; k < image.n_slices; k++) {
printf("slice %zu\n", k);
for (size_t i = 0; i < image.n_rows; i++) {
for (size_t j = 0; j < image.n_cols; j++) {
printf("%f, ", mem[IJK2C(i, j, k, image.n_rows, image.n_cols)]);
}
printf("\n");
}
printf("\n");
}
delete mem;
}
void disp_gcube(const std::string &window_name, gcube &image) {
cv::namedWindow(window_name);
cv::Mat I = image.cv_img();
cv::imshow(window_name, I);
}
void disp_wait(void) {
cv::waitKey(0);
}
int disp_keyPressed(void) {
return cv::waitKey(30);
}
__global__ void GPU_rgb2gray(float *G, float *F, int n_rows, int n_cols) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= n_rows || j >= n_cols) {
return;
}
float red = F[IJK2C(i, j, 0, n_rows, n_cols)];
float green = F[IJK2C(i, j, 1, n_rows, n_cols)];
float blue = F[IJK2C(i, j, 2, n_rows, n_cols)];
G[IJ2C(i, j, n_rows)] = red * 0.3f + green * 0.6f + blue * 0.1f;
}
gcube gpu_rgb2gray(const gcube &image) {
assert(image.n_slices == 3);
gcube G(image.n_rows, image.n_cols, 1);
dim3 gridSize((image.n_rows-1)/16+1, (image.n_cols-1)/16+1, 1);
dim3 blockSize(16, 16, 1);
GPU_rgb2gray<<<gridSize, blockSize>>>(
G.d_pixels, image.d_pixels,
image.n_rows, image.n_cols);
checkCudaErrors(cudaGetLastError());
return G;
}
gcube gpu_gray2rgb(const gcube &image) {
assert(image.n_slices == 1);
gcube G(image.n_rows, image.n_cols, 3);
checkCudaErrors(cudaMemcpy(&G.d_pixels[IJK2C(0, 0, 0, G.n_rows, G.n_cols)],
image.d_pixels, sizeof(float) * image.n_elem, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(&G.d_pixels[IJK2C(0, 0, 1, G.n_rows, G.n_cols)],
image.d_pixels, sizeof(float) * image.n_elem, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(&G.d_pixels[IJK2C(0, 0, 2, G.n_rows, G.n_cols)],
image.d_pixels, sizeof(float) * image.n_elem, cudaMemcpyDeviceToDevice));
return G;
}
|
71c20a00d22c2ac68e9e279e526b59dfea902951.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Plantilla para la multiplicacin de matrices
* con memoria compartida
* Jose Incera. Adaptado del cdigo
* de Robert Hochberg
* Abril 2016
*
* Based nearly entirely on the code from the CUDA C Programming Guide
*/
#include <stdio.h>
#include <sys/time.h>
#include <sys/resource.h>
// Estructura Matriz.
typedef struct{
int nRen;
int nCol;
int *elementos;
int salto; // stride para recorrer columnas
} Matriz;
// dimensin de un bloque
// El tamao es TAM_BLOQUE * TAM_BLOQUE
#define TAM_BLOQUE 16
// Prototipo de funcin
__global__ void MatMultKernel(const Matriz, const Matriz, Matriz);
// Por facilidad, las dimensiones de la matriz son mltiplos de TAM_BLOQUE
void MatMult(const Matriz A, const Matriz B, Matriz C) {
// Carga A y B en memoria GPU
Matriz d_A;
d_A.nRen = d_A.salto = A.nRen;
d_A.nCol = A.nCol;
size_t tam= A.nRen * A.nCol * sizeof(int);
hipError_t err = hipMalloc((void **)&(d_A.elementos),tam); // AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
hipMemcpy(d_A.elementos,A.elementos,tam,hipMemcpyHostToDevice); // AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
Matriz d_B;
d_B.nRen = d_B.salto = B.nRen;
d_B.nCol = B.nCol;
tam= B.nRen * B.nCol * sizeof(int);
hipMalloc((void **)&(d_B.elementos),tam); // AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
hipMemcpy(d_B.elementos,B.elementos,tam,hipMemcpyHostToDevice); // AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
// Asigna espacio para C en GPU
Matriz d_C;
d_C.nRen = d_C.salto = C.nRen;
d_C.nCol = C.nCol;
tam = C.nRen * C.nCol * sizeof(int);
hipMalloc((void **)&(d_C.elementos),tam); // AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
// Llama al kernel
dim3 dimBlock(TAM_BLOQUE, TAM_BLOQUE);
dim3 dimGrid(B.nRen / dimBlock.x, A.nCol / dimBlock.y);
// Descomenta y AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
hipLaunchKernelGGL(( MatMultKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A,d_B,d_C);
// Espera a que todos terminen
hipDeviceSynchronize();
// Lee C from del GPU
hipMemcpy(C.elementos,d_C.elementos,tam,hipMemcpyDeviceToHost);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
// Libera memoria GPU
hipFree(d_A.elementos);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
hipFree(d_B.elementos);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
hipFree(d_C.elementos);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
}
// Toma un elemento de la matriz
__device__ int GetElement(const Matriz A, int ren, int col) {
return A.elementos[ren* A.salto + col];
}
// Pon un elemento en la matriz
__device__ void SetElement(Matriz A, int ren, int col, int value) {
A.elementos[ren* A.salto + col] = value;
}
// Toma una submatriz de A de tamao TAM_BLOQUExTAM_BLOQUE
// localizada col sub-matrices a la derecha y ren sub-matrices abajo
// desde la esquina superior izquierda
__device__ Matriz LeeSubMatriz(Matriz A, int ren, int col) {
Matriz Asub;
Asub.nRen = TAM_BLOQUE;
Asub.nCol = TAM_BLOQUE;
Asub.salto = A.salto;
Asub.elementos = &A.elementos[A.salto * TAM_BLOQUE * ren+ TAM_BLOQUE * col];
return Asub;
}
// Kernel multiplicacin de Matriz
__global__ void MatMultKernel(Matriz A, Matriz B, Matriz C) {
// Renglon y columna del bloque
int blockRen = blockIdx.y;
int blockCol = blockIdx.x;
// Cada bloque calcula una submatriz Csub de C
Matriz Csub = LeeSubMatriz(C, blockRen, blockCol);
// Cada thread calcula un elemento de Csub
// acumulando elementos en valorC
int valorC= 0;
// Thread ren y col dentro de Csub
int ren = threadIdx.y;
int col = threadIdx.x;
// Loop sobre todas las sub-matrices de A y B necesarias
// para calcular Csub
// Multiplica cada par de sub-matrices y acumula resultados
for (int m = 0; m < (A.nRen / TAM_BLOQUE); ++m) {
// Toma sub-Matriz Asub de A
Matriz Asub = LeeSubMatriz(A,blockRen,m);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
// Toma sub-Matriz Bsub de B
Matriz Bsub = LeeSubMatriz(B,m,blockCol);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
// La memoria compartida donde se almacenan Asub y Bsub
__shared__ int As[TAM_BLOQUE][TAM_BLOQUE];
__shared__ int Bs[TAM_BLOQUE][TAM_BLOQUE];
// Transfiere Asub y Bsub de memoria global a shared
// Cada thread carga un elemento de cada submatriz
As[ren][col] = GetElement(Asub, ren, col);
Bs[ren][col] = GetElement(Bsub, ren, col);
// Punto de sincronizacin: Espera a que todas las
// sub-matrices se hayan cargado antes de continuar
__syncthreads();
// Multiplica Asub y Bsub
for (int e = 0; e < TAM_BLOQUE; ++e)
// Descomenta y agrega la operacin apropiada
valorC += As[ren][e]*Bs[e][col];
// Punto de sincronizacin antes de iniciar otra iteracin
__syncthreads();
}
// Escribe Csub a memoria global
// Cada thread escribe un elemento
SetElement(Csub, ren, col, valorC);
}
int main(int argc, char* argv[]){
clock_t begin=clock(); // Para medir cunto tarda
char *verbose;
if(argc > 2) verbose = argv[2];
else verbose = NULL;
Matriz A, B, C;
int a1, a2, b1, b2; // Solo matrices cuadradas
a1 = atoi(argv[1]); /* nCol de A */
// a2 = atoi(argv[2]); /* nRen de A */
// b1 = a2; /* nCol de B */
// b2 = atoi(argv[3]); /* nRen de B */
a2 = a1; /* nRen de A */
b1 = a1; /* nCol de B */
b2 = a1; /* nRen de B */
if(argc > 2) verbose = argv[2];
else verbose = NULL;
A.nCol = a1;
A.nRen = a2;
A.elementos = (int*)malloc(A.nRen * A.nCol * sizeof(int));
B.nCol = b1;
B.nRen = b2;
B.elementos = (int*)malloc(B.nRen * B.nCol * sizeof(int));
C.nCol = A.nCol;
C.nRen = B.nRen;
C.elementos = (int*)malloc(C.nRen * C.nCol * sizeof(int));
// Llena las matrices con 1's
for(int i = 0; i < A.nCol; i++)
for(int j = 0; j < A.nRen; j++)
// A.elementos[i*A.nRen + j] = (rand() % 3);
A.elementos[i*A.nRen + j] = 1;
for(int i = 0; i < B.nCol; i++)
for(int j = 0; j < B.nRen; j++)
// B.elementos[i*B.nRen + j] = (rand() % 2);
B.elementos[i*B.nRen + j] = 1;
MatMult(A, B, C);
clock_t end=clock(); // Checa el tiempo inmediatamente despus de terminar
double diffticks=end-begin;
double diffms=(diffticks*10)/CLOCKS_PER_SEC;
// Imprime hasta porciones de 10x10 de las tres matrices
if(verbose != NULL && verbose[1] == 'v'){
for(int i = 0; i < min(10, C.nCol); i++){
for(int j = 0; j < min(10, C.nRen); j++)
printf("%d ", C.elementos[i*C.nRen + j]);
printf("\n");
}
printf("\n");
}
printf("Tiempo usado: %f mSeg\n\n", diffms);
} | 71c20a00d22c2ac68e9e279e526b59dfea902951.cu | /*
* Plantilla para la multiplicación de matrices
* con memoria compartida
* Jose Incera. Adaptado del código
* de Robert Hochberg
* Abril 2016
*
* Based nearly entirely on the code from the CUDA C Programming Guide
*/
#include <stdio.h>
#include <sys/time.h>
#include <sys/resource.h>
// Estructura Matriz.
typedef struct{
int nRen;
int nCol;
int *elementos;
int salto; // stride para recorrer columnas
} Matriz;
// dimensión de un bloque
// El tamaño es TAM_BLOQUE * TAM_BLOQUE
#define TAM_BLOQUE 16
// Prototipo de función
__global__ void MatMultKernel(const Matriz, const Matriz, Matriz);
// Por facilidad, las dimensiones de la matriz son múltiplos de TAM_BLOQUE
void MatMult(const Matriz A, const Matriz B, Matriz C) {
// Carga A y B en memoria GPU
Matriz d_A;
d_A.nRen = d_A.salto = A.nRen;
d_A.nCol = A.nCol;
size_t tam= A.nRen * A.nCol * sizeof(int);
cudaError_t err = cudaMalloc((void **)&(d_A.elementos),tam); // AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
cudaMemcpy(d_A.elementos,A.elementos,tam,cudaMemcpyHostToDevice); // AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
Matriz d_B;
d_B.nRen = d_B.salto = B.nRen;
d_B.nCol = B.nCol;
tam= B.nRen * B.nCol * sizeof(int);
cudaMalloc((void **)&(d_B.elementos),tam); // AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
cudaMemcpy(d_B.elementos,B.elementos,tam,cudaMemcpyHostToDevice); // AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
// Asigna espacio para C en GPU
Matriz d_C;
d_C.nRen = d_C.salto = C.nRen;
d_C.nCol = C.nCol;
tam = C.nRen * C.nCol * sizeof(int);
cudaMalloc((void **)&(d_C.elementos),tam); // AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
// Llama al kernel
dim3 dimBlock(TAM_BLOQUE, TAM_BLOQUE);
dim3 dimGrid(B.nRen / dimBlock.x, A.nCol / dimBlock.y);
// Descomenta y AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
MatMultKernel<<<dimGrid,dimBlock>>>(d_A,d_B,d_C);
// Espera a que todos terminen
cudaThreadSynchronize();
// Lee C from del GPU
cudaMemcpy(C.elementos,d_C.elementos,tam,cudaMemcpyDeviceToHost);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
// Libera memoria GPU
cudaFree(d_A.elementos);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
cudaFree(d_B.elementos);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
cudaFree(d_C.elementos);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
}
// Toma un elemento de la matriz
__device__ int GetElement(const Matriz A, int ren, int col) {
return A.elementos[ren* A.salto + col];
}
// Pon un elemento en la matriz
__device__ void SetElement(Matriz A, int ren, int col, int value) {
A.elementos[ren* A.salto + col] = value;
}
// Toma una submatriz de A de tamaño TAM_BLOQUExTAM_BLOQUE
// localizada col sub-matrices a la derecha y ren sub-matrices abajo
// desde la esquina superior izquierda
__device__ Matriz LeeSubMatriz(Matriz A, int ren, int col) {
Matriz Asub;
Asub.nRen = TAM_BLOQUE;
Asub.nCol = TAM_BLOQUE;
Asub.salto = A.salto;
Asub.elementos = &A.elementos[A.salto * TAM_BLOQUE * ren+ TAM_BLOQUE * col];
return Asub;
}
// Kernel multiplicación de Matriz
__global__ void MatMultKernel(Matriz A, Matriz B, Matriz C) {
// Renglon y columna del bloque
int blockRen = blockIdx.y;
int blockCol = blockIdx.x;
// Cada bloque calcula una submatriz Csub de C
Matriz Csub = LeeSubMatriz(C, blockRen, blockCol);
// Cada thread calcula un elemento de Csub
// acumulando elementos en valorC
int valorC= 0;
// Thread ren y col dentro de Csub
int ren = threadIdx.y;
int col = threadIdx.x;
// Loop sobre todas las sub-matrices de A y B necesarias
// para calcular Csub
// Multiplica cada par de sub-matrices y acumula resultados
for (int m = 0; m < (A.nRen / TAM_BLOQUE); ++m) {
// Toma sub-Matriz Asub de A
Matriz Asub = LeeSubMatriz(A,blockRen,m);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
// Toma sub-Matriz Bsub de B
Matriz Bsub = LeeSubMatriz(B,m,blockCol);// AGREGA LOS ARGUMENTOS QUE CORRESPONDAN
// La memoria compartida donde se almacenan Asub y Bsub
__shared__ int As[TAM_BLOQUE][TAM_BLOQUE];
__shared__ int Bs[TAM_BLOQUE][TAM_BLOQUE];
// Transfiere Asub y Bsub de memoria global a shared
// Cada thread carga un elemento de cada submatriz
As[ren][col] = GetElement(Asub, ren, col);
Bs[ren][col] = GetElement(Bsub, ren, col);
// Punto de sincronización: Espera a que todas las
// sub-matrices se hayan cargado antes de continuar
__syncthreads();
// Multiplica Asub y Bsub
for (int e = 0; e < TAM_BLOQUE; ++e)
// Descomenta y agrega la operación apropiada
valorC += As[ren][e]*Bs[e][col];
// Punto de sincronización antes de iniciar otra iteración
__syncthreads();
}
// Escribe Csub a memoria global
// Cada thread escribe un elemento
SetElement(Csub, ren, col, valorC);
}
int main(int argc, char* argv[]){
clock_t begin=clock(); // Para medir cuánto tarda
char *verbose;
if(argc > 2) verbose = argv[2];
else verbose = NULL;
Matriz A, B, C;
int a1, a2, b1, b2; // Solo matrices cuadradas
a1 = atoi(argv[1]); /* nCol de A */
// a2 = atoi(argv[2]); /* nRen de A */
// b1 = a2; /* nCol de B */
// b2 = atoi(argv[3]); /* nRen de B */
a2 = a1; /* nRen de A */
b1 = a1; /* nCol de B */
b2 = a1; /* nRen de B */
if(argc > 2) verbose = argv[2];
else verbose = NULL;
A.nCol = a1;
A.nRen = a2;
A.elementos = (int*)malloc(A.nRen * A.nCol * sizeof(int));
B.nCol = b1;
B.nRen = b2;
B.elementos = (int*)malloc(B.nRen * B.nCol * sizeof(int));
C.nCol = A.nCol;
C.nRen = B.nRen;
C.elementos = (int*)malloc(C.nRen * C.nCol * sizeof(int));
// Llena las matrices con 1's
for(int i = 0; i < A.nCol; i++)
for(int j = 0; j < A.nRen; j++)
// A.elementos[i*A.nRen + j] = (rand() % 3);
A.elementos[i*A.nRen + j] = 1;
for(int i = 0; i < B.nCol; i++)
for(int j = 0; j < B.nRen; j++)
// B.elementos[i*B.nRen + j] = (rand() % 2);
B.elementos[i*B.nRen + j] = 1;
MatMult(A, B, C);
clock_t end=clock(); // Checa el tiempo inmediatamente después de terminar
double diffticks=end-begin;
double diffms=(diffticks*10)/CLOCKS_PER_SEC;
// Imprime hasta porciones de 10x10 de las tres matrices
if(verbose != NULL && verbose[1] == 'v'){
for(int i = 0; i < min(10, C.nCol); i++){
for(int j = 0; j < min(10, C.nRen); j++)
printf("%d ", C.elementos[i*C.nRen + j]);
printf("\n");
}
printf("\n");
}
printf("Tiempo usado: %f mSeg\n\n", diffms);
} |
d6f7483301e45caf2fd5b352a720f86bb325ae3a.hip | // !!! This is a file automatically generated by hipify!!!
// #include <thrust/device_vector.h>
// #include <thrust/for_each.h>
#include <thrust/sequence.h>
#include <thrust/constant_memory.h>
#include <thrust/scan.h>
#include <thrust/transform_reduce.h>
#include <thrust/shared_algorithms.h>
// #include <thrust/shared_reduce.h>
using namespace thrust;
class printFunctor
{
public:
__device__ void operator() ( float &a)
{
printf("%f\n",a);
}
};
class copyFunctor
{
public:
__device__ float operator() ( float &a)
{
return a*a;
}
};
class binaryFunctor
{
public:
__device__ float operator() ( float &a,float &b)
{
return a*b;
}
};
int main(int argc, char ** argv)
{
if(argc!=2)
exit(0);
device_vector<float> a(atoi(argv[1]));
device_vector<float> b(atoi(argv[1]));
device_vector<float> c(atoi(argv[1]));
// device_vector<int> c(1200);
//
sequence(a.begin(),a.end());
sequence(b.begin(),b.end());
// printf("%d ",reduce(cuda::shared,a.begin(),a.end()));
// printf("%d ",reduce(a.begin(),a.end()));
// exclusive_scan(cuda::shared,a.begin(),a.end(),b.begin());
// hipDeviceSynchronize();
transform(cuda::shared_first,a.begin(),a.end(),b.begin(),c.begin(),binaryFunctor());
// hipDeviceSynchronize();
// printf("\n");
for_each(cuda::shared,c.begin(),c.end(),printFunctor());
// hipDeviceSynchronize();
// printf("\n");
// printf("Thrust = %f\n",transform_reduce(a.begin(),a.end(),copyFunctor(),0.0f, thrust::plus<float>()));
// printf("Shared = %f\n",transform_reduce(cuda::shared,a.begin(),a.end(),b.begin(),binaryFunctor()));
// printf("Shared = %f \n",reduce(cuda::shared,a.begin(),a.end()-10));
}
| d6f7483301e45caf2fd5b352a720f86bb325ae3a.cu | // #include <thrust/device_vector.h>
// #include <thrust/for_each.h>
#include <thrust/sequence.h>
#include <thrust/constant_memory.h>
#include <thrust/scan.h>
#include <thrust/transform_reduce.h>
#include <thrust/shared_algorithms.h>
// #include <thrust/shared_reduce.h>
using namespace thrust;
class printFunctor
{
public:
__device__ void operator() ( float &a)
{
printf("%f\n",a);
}
};
class copyFunctor
{
public:
__device__ float operator() ( float &a)
{
return a*a;
}
};
class binaryFunctor
{
public:
__device__ float operator() ( float &a,float &b)
{
return a*b;
}
};
int main(int argc, char ** argv)
{
if(argc!=2)
exit(0);
device_vector<float> a(atoi(argv[1]));
device_vector<float> b(atoi(argv[1]));
device_vector<float> c(atoi(argv[1]));
// device_vector<int> c(1200);
//
sequence(a.begin(),a.end());
sequence(b.begin(),b.end());
// printf("%d ",reduce(cuda::shared,a.begin(),a.end()));
// printf("%d ",reduce(a.begin(),a.end()));
// exclusive_scan(cuda::shared,a.begin(),a.end(),b.begin());
// cudaDeviceSynchronize();
transform(cuda::shared_first,a.begin(),a.end(),b.begin(),c.begin(),binaryFunctor());
// cudaDeviceSynchronize();
// printf("\n");
for_each(cuda::shared,c.begin(),c.end(),printFunctor());
// cudaDeviceSynchronize();
// printf("\n");
// printf("Thrust = %f\n",transform_reduce(a.begin(),a.end(),copyFunctor(),0.0f, thrust::plus<float>()));
// printf("Shared = %f\n",transform_reduce(cuda::shared,a.begin(),a.end(),b.begin(),binaryFunctor()));
// printf("Shared = %f \n",reduce(cuda::shared,a.begin(),a.end()-10));
}
|
40b9d6ab60d4ebc8b615056891a619d19549720c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
x[index] = (x[index] - mean[f])/(sqrtf(variance[f]) + .000001f);
} | 40b9d6ab60d4ebc8b615056891a619d19549720c.cu | #include "includes.h"
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
x[index] = (x[index] - mean[f])/(sqrtf(variance[f]) + .000001f);
} |
50582d887fb9f0062edb5450fd672f8da2c7e88c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* GPU kernel to re-order data output from F-engine, to input to cuBLAS
* library routine (CUDA C)
* -- Strategy 2 --
*
* Copyright (c) 2020 Nitish Ragoomundun
* lrugratz gmail com
* @ .
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
/*
* NpolsxNelements: number of elements in the array x number of polarisations
* this flexibility allows to process for a single
* polarisation if needed,
* Nchannels: number of frequency channels in each spectrum,
* FOutput: array output from F-engine,
* XInput: array to be input to cuBLAS kernel.
*
* NumThreadx = (Npols*Nelements >= 32) ? 32 : (Npols*Nelements);
* NumThready = 32;
* NumThreadz = 1;
*
* NumBlockx = (Npols*Nelements)/NumThreadx + (((Npols*Nelements)%NumThreadx != 0) ? 1 : 0);
* NumBlocky = Nchannels/NumThready + ((Nchannels%NumThready != 0) ? 1 : 0);
* NumBlockz = Nspectra;
*
*/
__global__ void ReorderFOutput(int NpolsxNelements,
int Nchannels,
hipfftComplex *FOutput,
hipComplex *XInput)
{
__shared__ hipfftComplex sh_Temp[32][33];
int channelIdx, elementIdx;
/* Read data from output of F-engine */
channelIdx = blockIdx.y*blockDim.y + threadIdx.x;
elementIdx = blockIdx.x*blockDim.x + threadIdx.y;
if (channelIdx < Nchannels && elementIdx < NpolsxNelements)
sh_Temp[threadIdx.x][threadIdx.y] = FOutput[ (blockIdx.z*NpolsxNelements + elementIdx)*Nchannels + channelIdx ];
/* Make sure that all data reads are completed before proceeding */
__syncthreads();
/* Write data to input array for X-engine */
channelIdx = channelIdx - threadIdx.x + threadIdx.y;
elementIdx = elementIdx - threadIdx.y + threadIdx.x;
if (channelIdx < Nchannels && elementIdx < NpolsxNelements)
XInput[ (channelIdx*gridDim.z + blockIdx.z)*NpolsxNelements + elementIdx ] = sh_Temp[threadIdx.y][threadIdx.x];
}
| 50582d887fb9f0062edb5450fd672f8da2c7e88c.cu | /*
* GPU kernel to re-order data output from F-engine, to input to cuBLAS
* library routine (CUDA C)
* -- Strategy 2 --
*
* Copyright (c) 2020 Nitish Ragoomundun
* lrugratz gmail com
* @ .
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
/*
* NpolsxNelements: number of elements in the array x number of polarisations
* this flexibility allows to process for a single
* polarisation if needed,
* Nchannels: number of frequency channels in each spectrum,
* FOutput: array output from F-engine,
* XInput: array to be input to cuBLAS kernel.
*
* NumThreadx = (Npols*Nelements >= 32) ? 32 : (Npols*Nelements);
* NumThready = 32;
* NumThreadz = 1;
*
* NumBlockx = (Npols*Nelements)/NumThreadx + (((Npols*Nelements)%NumThreadx != 0) ? 1 : 0);
* NumBlocky = Nchannels/NumThready + ((Nchannels%NumThready != 0) ? 1 : 0);
* NumBlockz = Nspectra;
*
*/
__global__ void ReorderFOutput(int NpolsxNelements,
int Nchannels,
cufftComplex *FOutput,
cuComplex *XInput)
{
__shared__ cufftComplex sh_Temp[32][33];
int channelIdx, elementIdx;
/* Read data from output of F-engine */
channelIdx = blockIdx.y*blockDim.y + threadIdx.x;
elementIdx = blockIdx.x*blockDim.x + threadIdx.y;
if (channelIdx < Nchannels && elementIdx < NpolsxNelements)
sh_Temp[threadIdx.x][threadIdx.y] = FOutput[ (blockIdx.z*NpolsxNelements + elementIdx)*Nchannels + channelIdx ];
/* Make sure that all data reads are completed before proceeding */
__syncthreads();
/* Write data to input array for X-engine */
channelIdx = channelIdx - threadIdx.x + threadIdx.y;
elementIdx = elementIdx - threadIdx.y + threadIdx.x;
if (channelIdx < Nchannels && elementIdx < NpolsxNelements)
XInput[ (channelIdx*gridDim.z + blockIdx.z)*NpolsxNelements + elementIdx ] = sh_Temp[threadIdx.y][threadIdx.x];
}
|
d7385ab1d3977d330390f3fe026a781959c926e8.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <stdio.h>
#include <math.h>
#include <GL/freeglut.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include "Field.h"
#define BLOCK_SIZE 256
//#define TIMER
__device__ inline float distanceSqrd(short x1, short y1, short x2, short y2)
{
return (float)((x2 - x1)*(x2 - x1) + (y2 - y1)*(y2 - y1));
}
// funkcje uywane do ewentualnego lepszego dostpu do pamidzi dzielonej
//__device__ inline bool getSign(bool* arr, int i)
//{
// //return arr[128 * (i / 128) + ((4 * (i % 128)) % 128) + ((i / 32) % 4)];
// return arr[4*i];
//}
//
//__device__ inline void setSign(bool* arr, int i, bool val)
//{
// //arr[128 * (i / 128) + ((4 * (i % 128)) % 128) + ((i / 32) % 4)] = val;
// arr[4*i] = val;
//}
__global__ void ColorPixel_kernel(int N, ushort2* d_positions, bool* d_sign, int k, GLubyte* d_pixels, float2* d_field, int width, int height)
{
short x = blockIdx.x * blockDim.x + threadIdx.x;
short y = blockIdx.y * blockDim.y + threadIdx.y;
float Potential = 0; // agregat na potencja w danym pixelu
float2 F = make_float2(0.f, 0.f); // agregat na wektor natenia w danym pixelu
short maxP = N / BLOCK_SIZE; // licznik przebiegw "najwikszego" fora
if (N % BLOCK_SIZE != 0)
maxP++;
for (short p = 0; p < maxP; p++)
{
__shared__ ushort2 position[BLOCK_SIZE];
__shared__ bool sign[BLOCK_SIZE];
//__shared__ bool sign[4*BLOCK_SIZE];
short th_ind = threadIdx.y * blockDim.x + threadIdx.x; // index wtku wew. bloku
short p_ind = p*BLOCK_SIZE + th_ind; // index kopiowanej czstki
// kopiowanie do shared memory
if (p_ind < N)
{
position[th_ind] = d_positions[p_ind];
sign[th_ind] = d_sign[p_ind];
//setSign(sign, th_ind, d_sign[p_ind]);
}
__syncthreads();
if (x < width && y < height)
{
short I;
if (p == maxP - 1 && N < maxP*BLOCK_SIZE)
I = N - (p*BLOCK_SIZE);
else
I = BLOCK_SIZE;
float flen, distSqrd, dist;
// waciwe obliczenia dla czstki pod indeksem i
for (int i = 0; i < I; i++)
{
if (x == position[i].x && y == position[i].y)
continue;
distSqrd = distanceSqrd(x, y, position[i].x, position[i].y);
dist = sqrt(distSqrd);
flen = 1.f / distSqrd;
//if (getSign(sign, i))
if (sign[i])
{
Potential += 1.f / dist; // przyczynek do potencjau pola w punkcie. Potencja jest uywany do wizualizacji.
F.x -= (flen / dist) * (position[i].x - x); // przyczynek do natenia pola w punkcie, skadowa x. Wektor natenia jest uywany do przemieszczania si czstek.
F.y -= (flen / dist) * (position[i].y - y); // przyczynek do natenia pola w punkcie, skadowa y
}
else
{
Potential -= 1.f / dist;
F.x += (flen / dist) * (position[i].x - x);
F.y += (flen / dist) * (position[i].y - y);
}
}
}
__syncthreads();
}
// kocowe zapisywanie danych
if (x < width && y < height)
{
Potential = Potential * k;
int mult = 10;
F.x *= mult;
F.y *= mult;
int pixel_idx = y * width + x;
d_field[pixel_idx] = F;
if (Potential > 0)
{
((uchar3*)d_pixels)[pixel_idx] = make_uchar3(255, 255 - Potential, 255 - Potential); // R G B
}
else
{
((uchar3*)d_pixels)[pixel_idx] = make_uchar3(255 + Potential, 255 + Potential, 255); // R G B
}
}
}
void ColorPixel_caller(cudaGraphicsResource* pixelsPBO, Field& f)
{
// mnoznik potencjau pola
int k = 1000;
int s = 16, t = 16;
dim3 threadsPerBlock(s, t);
dim3 blocks(f.width / s, f.height / t);
if (f.width % s != 0)
blocks.x += 1;
if (f.height % t != 0)
blocks.y += 1;
GLubyte* d_pixels;
size_t num_bytes;
hipGraphicsMapResources(1, &pixelsPBO, 0);
hipGraphicsResourceGetMappedPointer((void**)&d_pixels, &num_bytes, pixelsPBO);
#ifdef TIMER
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
hipLaunchKernelGGL(( ColorPixel_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, f.N, f.d_positions, f.d_sign, k, d_pixels, f.d_fieldForce, f.width, f.height);
#ifdef TIMER
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("%f\n", time);
#endif
hipError_t cudaStatus;
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "ColorPixel_kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
exit(EXIT_FAILURE);
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching ColorPixel_kernel!\n", cudaStatus);
fprintf(stderr, "%s\n", hipGetErrorString(cudaStatus));
exit(EXIT_FAILURE);
}
hipGraphicsUnmapResources(1, &pixelsPBO, 0);
}
| d7385ab1d3977d330390f3fe026a781959c926e8.cu | #pragma once
#include <stdio.h>
#include <math.h>
#include <GL/freeglut.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include "Field.h"
#define BLOCK_SIZE 256
//#define TIMER
__device__ inline float distanceSqrd(short x1, short y1, short x2, short y2)
{
return (float)((x2 - x1)*(x2 - x1) + (y2 - y1)*(y2 - y1));
}
// funkcje używane do ewentualnego lepszego dostępu do pamiędzi dzielonej
//__device__ inline bool getSign(bool* arr, int i)
//{
// //return arr[128 * (i / 128) + ((4 * (i % 128)) % 128) + ((i / 32) % 4)];
// return arr[4*i];
//}
//
//__device__ inline void setSign(bool* arr, int i, bool val)
//{
// //arr[128 * (i / 128) + ((4 * (i % 128)) % 128) + ((i / 32) % 4)] = val;
// arr[4*i] = val;
//}
__global__ void ColorPixel_kernel(int N, ushort2* d_positions, bool* d_sign, int k, GLubyte* d_pixels, float2* d_field, int width, int height)
{
short x = blockIdx.x * blockDim.x + threadIdx.x;
short y = blockIdx.y * blockDim.y + threadIdx.y;
float Potential = 0; // agregat na potencjał w danym pixelu
float2 F = make_float2(0.f, 0.f); // agregat na wektor natężenia w danym pixelu
short maxP = N / BLOCK_SIZE; // licznik przebiegów "największego" fora
if (N % BLOCK_SIZE != 0)
maxP++;
for (short p = 0; p < maxP; p++)
{
__shared__ ushort2 position[BLOCK_SIZE];
__shared__ bool sign[BLOCK_SIZE];
//__shared__ bool sign[4*BLOCK_SIZE];
short th_ind = threadIdx.y * blockDim.x + threadIdx.x; // index wątku wew. bloku
short p_ind = p*BLOCK_SIZE + th_ind; // index kopiowanej cząstki
// kopiowanie do shared memory
if (p_ind < N)
{
position[th_ind] = d_positions[p_ind];
sign[th_ind] = d_sign[p_ind];
//setSign(sign, th_ind, d_sign[p_ind]);
}
__syncthreads();
if (x < width && y < height)
{
short I;
if (p == maxP - 1 && N < maxP*BLOCK_SIZE)
I = N - (p*BLOCK_SIZE);
else
I = BLOCK_SIZE;
float flen, distSqrd, dist;
// właściwe obliczenia dla cząstki pod indeksem i
for (int i = 0; i < I; i++)
{
if (x == position[i].x && y == position[i].y)
continue;
distSqrd = distanceSqrd(x, y, position[i].x, position[i].y);
dist = sqrt(distSqrd);
flen = 1.f / distSqrd;
//if (getSign(sign, i))
if (sign[i])
{
Potential += 1.f / dist; // przyczynek do potencjału pola w punkcie. Potencjał jest używany do wizualizacji.
F.x -= (flen / dist) * (position[i].x - x); // przyczynek do natężenia pola w punkcie, składowa x. Wektor natężenia jest używany do przemieszczania się cząstek.
F.y -= (flen / dist) * (position[i].y - y); // przyczynek do natężenia pola w punkcie, składowa y
}
else
{
Potential -= 1.f / dist;
F.x += (flen / dist) * (position[i].x - x);
F.y += (flen / dist) * (position[i].y - y);
}
}
}
__syncthreads();
}
// końcowe zapisywanie danych
if (x < width && y < height)
{
Potential = Potential * k;
int mult = 10;
F.x *= mult;
F.y *= mult;
int pixel_idx = y * width + x;
d_field[pixel_idx] = F;
if (Potential > 0)
{
((uchar3*)d_pixels)[pixel_idx] = make_uchar3(255, 255 - Potential, 255 - Potential); // R G B
}
else
{
((uchar3*)d_pixels)[pixel_idx] = make_uchar3(255 + Potential, 255 + Potential, 255); // R G B
}
}
}
void ColorPixel_caller(cudaGraphicsResource* pixelsPBO, Field& f)
{
// mnoznik potencjału pola
int k = 1000;
int s = 16, t = 16;
dim3 threadsPerBlock(s, t);
dim3 blocks(f.width / s, f.height / t);
if (f.width % s != 0)
blocks.x += 1;
if (f.height % t != 0)
blocks.y += 1;
GLubyte* d_pixels;
size_t num_bytes;
cudaGraphicsMapResources(1, &pixelsPBO, 0);
cudaGraphicsResourceGetMappedPointer((void**)&d_pixels, &num_bytes, pixelsPBO);
#ifdef TIMER
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
ColorPixel_kernel<<<blocks, threadsPerBlock>>>(f.N, f.d_positions, f.d_sign, k, d_pixels, f.d_fieldForce, f.width, f.height);
#ifdef TIMER
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("%f\n", time);
#endif
cudaError_t cudaStatus;
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "ColorPixel_kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
exit(EXIT_FAILURE);
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching ColorPixel_kernel!\n", cudaStatus);
fprintf(stderr, "%s\n", cudaGetErrorString(cudaStatus));
exit(EXIT_FAILURE);
}
cudaGraphicsUnmapResources(1, &pixelsPBO, 0);
}
|
796e58de2a4cd44a9a8b79f99d64f8a8b854764b.hip | // !!! This is a file automatically generated by hipify!!!
// MP 1
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
//#include <wb.h>
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < len) {
out[idx] = in1[idx] + in2[idx];
}
}
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1;
float * deviceInput2;
float * deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
int inputSize = inputLength * sizeof(float);
hipMalloc((void **) &deviceInput1, inputSize);
hipMalloc((void **) &deviceInput2, inputSize);
hipMalloc((void **) &deviceOutput, inputSize);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
hipMemcpy(deviceInput1, hostInput1, inputSize, hipMemcpyHostToDevice);
hipMemcpy(deviceInput2, hostInput2, inputSize, hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
int blockSize = 256;
struct dim3 DimGrid((inputLength - 1)/blockSize + 1, 1, 1);
struct dim3 DimBlock(blockSize, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
hipLaunchKernelGGL(( vecAdd), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceInput1, deviceInput2, deviceOutput, inputLength);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
hipMemcpy(hostOutput, deviceOutput, inputSize, hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceInput1);
hipFree(deviceInput2);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
| 796e58de2a4cd44a9a8b79f99d64f8a8b854764b.cu | // MP 1
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
//#include <wb.h>
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < len) {
out[idx] = in1[idx] + in2[idx];
}
}
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1;
float * deviceInput2;
float * deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
int inputSize = inputLength * sizeof(float);
cudaMalloc((void **) &deviceInput1, inputSize);
cudaMalloc((void **) &deviceInput2, inputSize);
cudaMalloc((void **) &deviceOutput, inputSize);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
cudaMemcpy(deviceInput1, hostInput1, inputSize, cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput2, inputSize, cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
int blockSize = 256;
struct dim3 DimGrid((inputLength - 1)/blockSize + 1, 1, 1);
struct dim3 DimBlock(blockSize, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
vecAdd<<<DimGrid, DimBlock>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
cudaMemcpy(hostOutput, deviceOutput, inputSize, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceInput1);
cudaFree(deviceInput2);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
|
4da8c23e819730c03e644cf5691211fa6bb5b067.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper_timer.h" // helper functions for timers
#include "helper_cuda.h" // helper functions (cuda error checking and initialization)
#include "newuoa_h.h"
#include <hiprand/hiprand_kernel.h>
// Thread block size
#define THREAD_N 128
__device__ double4 reduce_sum(double4 in, int n) {
extern __shared__ double4 sdata[];
// Perform first level of reduction:
// - Write to shared memory
int ltid = threadIdx.x;
sdata[ltid] = in;
__syncthreads();
// Do reduction in shared mem
for (int s = blockDim.x / 2 ; s > 0 ; s >>= 1) {
if (ltid < s) {
double d = sdata[ltid + s].x - sdata[ltid].x, dn = d / 2, dn2 = dn * dn, d2 = d * dn;
sdata[ltid].w += sdata[ltid + s].w + d2 * dn2 * n + 6 * dn2 * (sdata[ltid].y + sdata[ltid + s].y) - 4 * dn * (sdata[ltid].z - sdata[ltid + s].z);
sdata[ltid].z += sdata[ltid + s].z - 3 * dn * (sdata[ltid].y - sdata[ltid + s].y);
sdata[ltid].y += sdata[ltid + s].y + d2 * n;
sdata[ltid].x += dn;
n <<= 1;
}
__syncthreads();
}
return sdata[0];
}
__device__ double4 reduce_fractions(double4 in) {
extern __shared__ double4 sdata[];
// Perform first level of reduction:
// - Write to shared memory
int ltid = threadIdx.x;
sdata[ltid] = in;
__syncthreads();
// Do reduction in shared mem
for (int s = blockDim.x / 2 ; s > 0 ; s >>= 1) {
if (ltid < s) {
sdata[ltid].x += (sdata[ltid + s].x - sdata[ltid].x) / 2;
sdata[ltid].y += (sdata[ltid + s].y - sdata[ltid].y) / 2;
sdata[ltid].z += (sdata[ltid + s].z - sdata[ltid].z) / 2;
sdata[ltid].w += (sdata[ltid + s].w - sdata[ltid].w) / 2;
}
__syncthreads();
}
return sdata[0];
}
__device__ inline void computeMoments(double4 *m, double x, int n) {
double d, d2, dn, dn2;
d = x - m->x;
dn = d / (n + 1);
dn2 = dn * dn;
d2 = d * dn * n;
m->w += d2 * dn2 * (n*n - n + 1) + 6 * dn2 * m->y - 4 * dn * m->z;
m->z += d2 * dn * (n - 1) - 3 * dn * m->y;
m->y += d2;
m->x += dn;
}
__device__ inline void computeFractions(double4 *m, double x, int n) {
m->x += ((x < 0.05) - m->x) / (n + 1);
m->y += ((x < 0.1) - m->y) / (n + 1);
m->z += ((x < 0.2) - m->z) / (n + 1);
m->w += ((x < 0.5) - m->w) / (n + 1);
}
// Simulation kernel
__launch_bounds__(1024)
__global__ void simulate(hiprandState_t *const rngStates1, curandStatePhilox4_32_10 *const rngStates2,
double4* moments, const int nsim, const double2 lambda, const double2 sigma, const double2 delta) {
// Determine thread ID
int bid = blockIdx.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int step = gridDim.x * blockDim.x;
double4 m[4] = { make_double4(0,0,0,0) };
// Initialise the RNG
hiprandState_t state1 = rngStates1[tid];
curandStatePhilox4_32_10 state2 = rngStates2[tid];
for (int i = tid; i < nsim; i += step) {
// draw initial from normal distribution with same mean and variance
double2 z = hiprand_normal2_double(&state1);
z.x = sigma.x/sqrt(1+2*delta.x/lambda.x)*z.x;
z.y = sigma.y/sqrt(1+2*delta.y/lambda.y)*z.y;
// simulate income path in dt increments
double zann[5] = { 0.0 };
for (int t=-25; t<5; t++) // burn 25 years, only need 5 years
for (int q=0; q<16; q++) {
// Generate pseudo-random numbers
double2 rand = hiprand_normal2_double(&state1);
double2 jumprand = hiprand_uniform2_double(&state2);
z.x = jumprand.x > 1 - lambda.x/4 ? sigma.x*rand.x : (1 - delta.x/4) * z.x;
z.y = jumprand.y > 1 - lambda.y/4 ? sigma.y*rand.y : (1 - delta.y/4) * z.y;
if (t >= 0) zann[t] += exp(z.x + z.y); // aggregate to annual income
}
//if (tid == 0) printf("%d/%d% d/%d: %.15g %.15g %.15g\n",threadIdx.x,blockDim.x,blockIdx.x,gridDim.x,log(zann[0]),log(zann[1]/zann[0]),log(zann[4]/zann[0]));
// Compute central moments
computeMoments(&m[0],log(zann[0]),i/step); // logs
computeMoments(&m[1],log(zann[1]/zann[0]),i/step); // 1 year log changes
computeMoments(&m[2],log(zann[4]/zann[0]),i/step); // 5 year log changes
computeFractions(&m[3],abs(log(zann[1]/zann[0])),i/step); // fraction 1 year log changes in ranges
}
//if (blockIdx.x==0) printf("%03d: %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g\n",tid,m[0].x,m[0].y,m[0].z,m[0].w,m[1].x,m[1].y,m[1].z,m[1].w,m[2].x,m[2].y,m[2].z,m[2].w);
// Copy RNG state back to global memory
// rngStates1[tid] = state1;
// rngStates2[tid] = state2;
// Reduce within the block
m[0] = reduce_sum(m[0],nsim/step);
m[1] = reduce_sum(m[1],nsim/step);
m[2] = reduce_sum(m[2],nsim/step);
m[3] = reduce_fractions(m[3]);
// Store the result
if (threadIdx.x == 0) {
moments[bid*4] = m[0];
moments[bid*4+1] = m[1];
moments[bid*4+2] = m[2];
moments[bid*4+3] = m[3];
//printf("%03d: %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g\n",tid,m[0].x,m[0].y,m[0].z,m[0].w,m[1].x,m[1].y,m[1].z,m[1].w,m[2].x,m[2].y,m[2].z,m[2].w);
}
}
// RNG init kernel
static __global__ void rngSetupStates(hiprandState_t *const rngStates1, curandStatePhilox4_32_10 *const rngStates2, int device_id) {
// Determine global thread ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Each threadblock get different seed,
// Threads within a threadblock get different sequence numbers
const int seed = blockIdx.x + gridDim.x * device_id; //(int)clock64();
// Initialise the RNG
hiprand_init(seed, tid, 0, &rngStates1[tid]);
hiprand_init(seed, tid, 0, &rngStates2[tid]);
}
typedef struct PlanType
{
// Device ID for multi-GPU version
int device;
// Simulation path count for this plan
int nsim;
int gridSize;
// Stream handle and event object for this plan
hipStream_t stream;
hipEvent_t event;
// Device- and host-side intermediate results
double4 *d_moments;
double4 *h_moments;
// Random number generator states
hiprandState_t *d_rngStates1;
curandStatePhilox4_32_10 *d_rngStates2;
} PlanType;
typedef struct UserParamsType {
int nf;
int nPlans;
PlanType *plan;
// Host-side target moments and result destination
double4 targets[4];
double4 moments[4];
} UserParamsType;
static void dfovec(const long int nx, const long int mv, const double *x, double *v_err, const void * userParams) {
UserParamsType *pUserParams = (UserParamsType *) userParams;
PlanType *plan = pUserParams->plan;
int nPlans = pUserParams->nPlans;
double4 *targets = pUserParams->targets;
double4 *moments = pUserParams->moments;
double2 lambda = make_double2(2 / (1 + exp(-x[0])), 2 / (1 + exp(-x[1])));
double2 sigma = make_double2(2 / (1 + exp(-x[2])), 2 / (1 + exp(-x[3])));
double2 delta = make_double2(1 / (1 + exp(-x[4])), 1 / (1 + exp(-x[5])));
if (nx != 6 || mv != 8) {
fprintf(stderr,"*** dfovec incorrectly called with n=%d and mv=%d\n",nx,mv);
return;
}
for (int i=0; i<nPlans; i++) {
// Simulate the process and compute moments
checkCudaErrors(hipSetDevice(plan[i].device));
hipLaunchKernelGGL(( simulate), dim3(plan[i].gridSize), dim3(THREAD_N), THREAD_N*sizeof(double4), plan[i].stream, plan[i].d_rngStates1, plan[i].d_rngStates2, plan[i].d_moments, plan[i].nsim, lambda, sigma, delta);
getLastCudaError("Failed to launch simulate kernel\n");
// Copy partial results to host
checkCudaErrors(hipMemcpyAsync(plan[i].h_moments, plan[i].d_moments, plan[i].gridSize*4*sizeof(double4), hipMemcpyDeviceToHost, plan[i].stream));
checkCudaErrors(hipEventRecord(plan[i].event, plan[i].stream));
}
for (int i=0; i<nPlans; i++) {
checkCudaErrors(hipSetDevice(plan[i].device));
hipEventSynchronize(plan[i].event);
}
// Complete reduction on host
for (int j=0; j<3; j++) {
double m1 = 0, m2 = 0, m3 = 0, m4 = 0;
int nsim = 0;
for (int i=0; i<nPlans; i++) {
int nb = plan[i].nsim / plan[i].gridSize;
for (int n=0; n<plan[i].gridSize; n++) {
double4 m = plan[i].h_moments[n*4+j];
double d = m.x - m1, dn = d / (nsim + nb), dn2 = dn * dn, d2 = d * dn * nb * nsim;
m4 += m.w + d2 * dn2 * (nsim*nsim - nsim*nb + nb*nb) + 6 * dn2 * (nsim*nsim*m.y + nb*nb*m2) + 4 * dn * (nsim*m.z - nb*m3);
m3 += m.z + d2 * dn * (nsim - nb) + 3 * dn * (nsim*m.y - nb*m2);
m2 += m.y + d2;
m1 += dn * nb;
nsim += nb;
//printf("++ %.15g %.15g %.15g %.15g\n",m.x,m.y,m.z,m.w);
}
//printf("%.15g %.15g %.15g %.15g\n",m1,m2,m3,m4);
}
// Compute standardised moments
m2 /= nsim;
m3 /= nsim*m2*sqrt(m2);
m4 /= nsim*m2*m2;
moments[j].x = m1; //mean
moments[j].y = m2; // variance
moments[j].z = m3; // skewness
moments[j].w = m4; // kurtosis
//printf("%.15g %.15g %.15g %.15g\n",moments[j].x,moments[j].y,moments[j].z,moments[j].w);
}
// Compute fraction of dy1 less than 5%, 10%, 20% and 50%
moments[3] = make_double4(0.0,0.0,0.0,0.0);
int nsim = 0;
for (int i=0; i<nPlans; i++) {
int nb = plan[i].nsim / plan[i].gridSize;
for (int n=0; n<plan[i].gridSize; n++) {
double4 m = plan[i].h_moments[n*4+3];
moments[3].x += (m.x - moments[3].x) * nb / (nsim + nb);
moments[3].y += (m.y - moments[3].y) * nb / (nsim + nb);
moments[3].z += (m.z - moments[3].z) * nb / (nsim + nb);
moments[3].w += (m.w - moments[3].w) * nb / (nsim + nb);
nsim += nb;
}
}
//printf("%.15g %.15g %.15g %.15g\n",moments[3].x,moments[3].y,moments[3].z,moments[3].w);
// printf("%.15g\t%.15g\t%.15g\t%.15g\t%.15g\t%.15g\t%.15g\n",obj,lambda.x,lambda.y,sigma.x,sigma.y,delta.x,delta.y);
v_err[0] = moments[0].y/targets[0].y-1;
v_err[1] = moments[1].y/targets[1].y-1;
v_err[2] = moments[1].w/targets[1].w-1;
v_err[3] = moments[2].y/targets[2].y-1;
v_err[4] = moments[2].w/targets[2].w-1;
v_err[5] = moments[3].y/targets[3].y-1;
v_err[6] = moments[3].z/targets[3].z-1;
v_err[7] = moments[3].w/targets[3].w-1;
v_err[2] *= sqrt(0.5);
v_err[4] *= sqrt(0.5);
++pUserParams->nf;
}
int main(int argc, char *argv[]) {
// Get number of available devices
int GPU_N = 0;
checkCudaErrors(hipGetDeviceCount(&GPU_N));
if (!GPU_N) {
fprintf(stderr,"There are no CUDA devices.\n");
exit(EXIT_FAILURE);
}
printf("CUDA-capable device count: %i\n", GPU_N);
long NSIM = 1;
if (argc<=1) {
fprintf(stderr,"Usage: estimate N, where N is the exponent of two in the number of simulation paths.\n");
exit(EXIT_FAILURE);
} else
NSIM <<= atoi(argv[1]);
if (((NSIM/GPU_N) % THREAD_N) | (NSIM < GPU_N)) {
fprintf(stderr,"The number of simulation paths per GPU must be a multiple of block size %d.\n",THREAD_N);
exit(EXIT_FAILURE);
}
UserParamsType userParams;
userParams.nf = 0;
userParams.nPlans = GPU_N;
userParams.plan = new PlanType[GPU_N];
for (int device=0; device<GPU_N; device++) {
// Attach to GPU
checkCudaErrors(hipSetDevice(device));
// Get device properties
struct hipDeviceProp_t deviceProperties;
checkCudaErrors(hipGetDeviceProperties(&deviceProperties, device));
// Check precision is valid
if (deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3)) {
printf("Device %d does not have double precision support.\n", device);
exit(EXIT_FAILURE);
}
PlanType *p = &userParams.plan[device];
p->device = device;
// Initialize stream handle and event object for the current device
checkCudaErrors(hipStreamCreate(&p->stream));
checkCudaErrors(hipEventCreate(&p->event));
// Divide the work between GPUs equally
p->nsim = NSIM / GPU_N;
if (device < (NSIM % GPU_N)) p->nsim++;
p->gridSize = p->nsim / THREAD_N;
// Aim to launch around ten to twenty times as many blocks as there
// are multiprocessors on the target device.
// read more on grid-stride loops: https://devblogs.nvidia.com/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/
while (p->gridSize > 20 * deviceProperties.multiProcessorCount) p->gridSize >>= 1;
printf("GPU Device #%i: %s\n", p->device, deviceProperties.name);
printf("Simulation paths: %i\n", p->nsim);
printf("Grid size: %i\n", p->gridSize);
// Allocate intermediate memory for MC results
// Each thread block will produce four double4 results
checkCudaErrors(hipHostMalloc(&p->h_moments,p->gridSize*4*sizeof(double4)));
checkCudaErrors(hipMalloc(&p->d_moments, p->gridSize*4*sizeof(double4)));
// Allocate memory for RNG states
checkCudaErrors(hipMalloc(&p->d_rngStates1, p->gridSize * THREAD_N * sizeof(hiprandState_t)));
checkCudaErrors(hipMalloc(&p->d_rngStates2, p->gridSize * THREAD_N * sizeof(curandStatePhilox4_32_10)));
// Initialise RNG states so that each device is placed pathN random numbers apart on the random number sequence
hipLaunchKernelGGL(( rngSetupStates), dim3(p->gridSize), dim3(THREAD_N), 0, 0, p->d_rngStates1, p->d_rngStates2, p->device);
getLastCudaError("rngSetupStates kernel failed.\n");
checkCudaErrors(hipDeviceSynchronize());
}
// Target moments for USA: 0.7,0.23,17.8,0.46,11.55,0.54,0.71,0.86
// Target moments for Canada: 0.760,0.217,13.377,0.437,8.782,0.51,0.68,0.85
userParams.targets[0] = make_double4(NAN, 0.760, NAN, NAN); // LogY: Mean,Var,Skew,Kurt
userParams.targets[1] = make_double4(NAN, 0.217, NAN, 13.377); // D1LogY: Mean,Var,Skew,Kurt
userParams.targets[2] = make_double4(NAN, 0.437, NAN, 8.782); // D5LogY: Mean,Var,Skew,Kurt
userParams.targets[3] = make_double4(NAN, 0.51, 0.68, 0.85); // FracD1: <5%,<10%,<20%,<50%
long int n=6, mv=8, npt=2*n+1, maxfun=500*(n+1), iprint=1;
double v_err[8], rhobeg=5.0, rhoend=1e-4, *w;
double xmax[6] = {2,2,2,2,1,1}, xmin[6] = {0};
// double x[6] = {0.0972241396763905, 0.014312611368279, 1.60304896242711, 0.892309166034993, 0.947420941274568, 0.00117609031021279};
double x[6] = {.08,.007,1.6,1.6,.7,.01};
// double x[6] = {0.0611244618471226,0.000613274511999765,1.46320215181056,1.999691573564,0.224227629475885,0.0018853181294203};
int wsize = (npt+11)*(npt+n)+n*(5*n+11)/2+mv*(npt+n*(n+7)/2+7);
checkCudaErrors(hipHostMalloc(&w,wsize*sizeof(double)));
for (int i = 0; i<6; i++)
x[i] = -log(xmax[i] / (x[i] - xmin[i]) - 1); // invlogistic
//Start the timer
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
newuoa_h(n, npt, dfovec, &userParams, x, rhobeg, rhoend, iprint, maxfun, w, mv);
dfovec(n,mv,x,v_err,&userParams);
//Stop the timer
sdkStopTimer(&hTimer);
float time = sdkGetTimerValue(&hTimer)/userParams.nf;
sdkDeleteTimer(&hTimer);
double obj = 0;
for (int i=0; i<mv; i++)
obj += v_err[i]*v_err[i];
for (int i=0; i<6; i++)
x[i] = xmin[i]+xmax[i]/(1+exp(-x[i])); // logistic
printf("\nTime per function evaluation (ms.): %f\n", time);
printf("\nFinal objective function value: %.15g\n",obj);//sqrt(obj*2/7));
printf("\nThe returned solution is:\n");
printf(" lambda: %.15g %.15g\n",x[0],x[1]);
printf(" sigma: %.15g %.15g\n",x[2],x[3]);
printf(" delta: %.15g %.15g\n",x[4],x[5]);
printf("\n Moment: Target:\tModel:\n");
printf(" MeanLogY %.15g\t%.15g\n",userParams.targets[0].x,userParams.moments[0].x);
printf(" VarLogY %.15g\t%.15g\n",userParams.targets[0].y,userParams.moments[0].y);
printf(" SkewLogY %.15g\t%.15g\n",userParams.targets[0].z,userParams.moments[0].z);
printf(" KurtLogY %.15g\t%.15g\n",userParams.targets[0].w,userParams.moments[0].w);
printf(" MeanD1LogY %.15g\t%.15g\n",userParams.targets[1].x,userParams.moments[1].x);
printf(" VarD1LogY %.15g\t%.15g\n",userParams.targets[1].y,userParams.moments[1].y);
printf(" SkewD1LogY %.15g\t%.15g\n",userParams.targets[1].z,userParams.moments[1].z);
printf(" KurtD1LogY %.15g\t%.15g\n",userParams.targets[1].w,userParams.moments[1].w);
printf(" MeanD5LogY %.15g\t%.15g\n",userParams.targets[2].x,userParams.moments[2].x);
printf(" VarD5LogY %.15g\t%.15g\n",userParams.targets[2].y,userParams.moments[2].y);
printf(" SkewD5LogY %.15g\t%.15g\n",userParams.targets[2].z,userParams.moments[2].z);
printf(" KurtD5LogY %.15g\t%.15g\n",userParams.targets[2].w,userParams.moments[2].w);
printf(" FracD1Less5 %.15g\t%.15g\n",userParams.targets[3].x,userParams.moments[3].x);
printf(" FracD1Less10 %.15g\t%.15g\n",userParams.targets[3].y,userParams.moments[3].y);
printf(" FracD1Less20 %.15g\t%.15g\n",userParams.targets[3].z,userParams.moments[3].z);
printf(" FracD1Less50 %.15g\t%.15g\n",userParams.targets[3].w,userParams.moments[3].w);
// Cleanup
for (int device=0; device<GPU_N; device++) {
PlanType *p = &userParams.plan[device];
checkCudaErrors(hipSetDevice(p->device));
checkCudaErrors(hipStreamDestroy(p->stream));
checkCudaErrors(hipEventDestroy(p->event));
checkCudaErrors(hipHostFree(p->h_moments));
checkCudaErrors(hipFree(p->d_moments));
checkCudaErrors(hipFree(p->d_rngStates1));
checkCudaErrors(hipFree(p->d_rngStates2));
}
checkCudaErrors(hipHostFree(w));
delete[] userParams.plan;
return(0);
}
| 4da8c23e819730c03e644cf5691211fa6bb5b067.cu | #include "helper_timer.h" // helper functions for timers
#include "helper_cuda.h" // helper functions (cuda error checking and initialization)
#include "newuoa_h.h"
#include <curand_kernel.h>
// Thread block size
#define THREAD_N 128
__device__ double4 reduce_sum(double4 in, int n) {
extern __shared__ double4 sdata[];
// Perform first level of reduction:
// - Write to shared memory
int ltid = threadIdx.x;
sdata[ltid] = in;
__syncthreads();
// Do reduction in shared mem
for (int s = blockDim.x / 2 ; s > 0 ; s >>= 1) {
if (ltid < s) {
double d = sdata[ltid + s].x - sdata[ltid].x, dn = d / 2, dn2 = dn * dn, d2 = d * dn;
sdata[ltid].w += sdata[ltid + s].w + d2 * dn2 * n + 6 * dn2 * (sdata[ltid].y + sdata[ltid + s].y) - 4 * dn * (sdata[ltid].z - sdata[ltid + s].z);
sdata[ltid].z += sdata[ltid + s].z - 3 * dn * (sdata[ltid].y - sdata[ltid + s].y);
sdata[ltid].y += sdata[ltid + s].y + d2 * n;
sdata[ltid].x += dn;
n <<= 1;
}
__syncthreads();
}
return sdata[0];
}
__device__ double4 reduce_fractions(double4 in) {
extern __shared__ double4 sdata[];
// Perform first level of reduction:
// - Write to shared memory
int ltid = threadIdx.x;
sdata[ltid] = in;
__syncthreads();
// Do reduction in shared mem
for (int s = blockDim.x / 2 ; s > 0 ; s >>= 1) {
if (ltid < s) {
sdata[ltid].x += (sdata[ltid + s].x - sdata[ltid].x) / 2;
sdata[ltid].y += (sdata[ltid + s].y - sdata[ltid].y) / 2;
sdata[ltid].z += (sdata[ltid + s].z - sdata[ltid].z) / 2;
sdata[ltid].w += (sdata[ltid + s].w - sdata[ltid].w) / 2;
}
__syncthreads();
}
return sdata[0];
}
__device__ inline void computeMoments(double4 *m, double x, int n) {
double d, d2, dn, dn2;
d = x - m->x;
dn = d / (n + 1);
dn2 = dn * dn;
d2 = d * dn * n;
m->w += d2 * dn2 * (n*n - n + 1) + 6 * dn2 * m->y - 4 * dn * m->z;
m->z += d2 * dn * (n - 1) - 3 * dn * m->y;
m->y += d2;
m->x += dn;
}
__device__ inline void computeFractions(double4 *m, double x, int n) {
m->x += ((x < 0.05) - m->x) / (n + 1);
m->y += ((x < 0.1) - m->y) / (n + 1);
m->z += ((x < 0.2) - m->z) / (n + 1);
m->w += ((x < 0.5) - m->w) / (n + 1);
}
// Simulation kernel
__launch_bounds__(1024)
__global__ void simulate(curandState *const rngStates1, curandStatePhilox4_32_10 *const rngStates2,
double4* moments, const int nsim, const double2 lambda, const double2 sigma, const double2 delta) {
// Determine thread ID
int bid = blockIdx.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int step = gridDim.x * blockDim.x;
double4 m[4] = { make_double4(0,0,0,0) };
// Initialise the RNG
curandState state1 = rngStates1[tid];
curandStatePhilox4_32_10 state2 = rngStates2[tid];
for (int i = tid; i < nsim; i += step) {
// draw initial from normal distribution with same mean and variance
double2 z = curand_normal2_double(&state1);
z.x = sigma.x/sqrt(1+2*delta.x/lambda.x)*z.x;
z.y = sigma.y/sqrt(1+2*delta.y/lambda.y)*z.y;
// simulate income path in dt increments
double zann[5] = { 0.0 };
for (int t=-25; t<5; t++) // burn 25 years, only need 5 years
for (int q=0; q<16; q++) {
// Generate pseudo-random numbers
double2 rand = curand_normal2_double(&state1);
double2 jumprand = curand_uniform2_double(&state2);
z.x = jumprand.x > 1 - lambda.x/4 ? sigma.x*rand.x : (1 - delta.x/4) * z.x;
z.y = jumprand.y > 1 - lambda.y/4 ? sigma.y*rand.y : (1 - delta.y/4) * z.y;
if (t >= 0) zann[t] += exp(z.x + z.y); // aggregate to annual income
}
//if (tid == 0) printf("%d/%d% d/%d: %.15g %.15g %.15g\n",threadIdx.x,blockDim.x,blockIdx.x,gridDim.x,log(zann[0]),log(zann[1]/zann[0]),log(zann[4]/zann[0]));
// Compute central moments
computeMoments(&m[0],log(zann[0]),i/step); // logs
computeMoments(&m[1],log(zann[1]/zann[0]),i/step); // 1 year log changes
computeMoments(&m[2],log(zann[4]/zann[0]),i/step); // 5 year log changes
computeFractions(&m[3],abs(log(zann[1]/zann[0])),i/step); // fraction 1 year log changes in ranges
}
//if (blockIdx.x==0) printf("%03d: %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g\n",tid,m[0].x,m[0].y,m[0].z,m[0].w,m[1].x,m[1].y,m[1].z,m[1].w,m[2].x,m[2].y,m[2].z,m[2].w);
// Copy RNG state back to global memory
// rngStates1[tid] = state1;
// rngStates2[tid] = state2;
// Reduce within the block
m[0] = reduce_sum(m[0],nsim/step);
m[1] = reduce_sum(m[1],nsim/step);
m[2] = reduce_sum(m[2],nsim/step);
m[3] = reduce_fractions(m[3]);
// Store the result
if (threadIdx.x == 0) {
moments[bid*4] = m[0];
moments[bid*4+1] = m[1];
moments[bid*4+2] = m[2];
moments[bid*4+3] = m[3];
//printf("%03d: %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g %.15g\n",tid,m[0].x,m[0].y,m[0].z,m[0].w,m[1].x,m[1].y,m[1].z,m[1].w,m[2].x,m[2].y,m[2].z,m[2].w);
}
}
// RNG init kernel
static __global__ void rngSetupStates(curandState *const rngStates1, curandStatePhilox4_32_10 *const rngStates2, int device_id) {
// Determine global thread ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Each threadblock get different seed,
// Threads within a threadblock get different sequence numbers
const int seed = blockIdx.x + gridDim.x * device_id; //(int)clock64();
// Initialise the RNG
curand_init(seed, tid, 0, &rngStates1[tid]);
curand_init(seed, tid, 0, &rngStates2[tid]);
}
typedef struct PlanType
{
// Device ID for multi-GPU version
int device;
// Simulation path count for this plan
int nsim;
int gridSize;
// Stream handle and event object for this plan
cudaStream_t stream;
cudaEvent_t event;
// Device- and host-side intermediate results
double4 *d_moments;
double4 *h_moments;
// Random number generator states
curandState *d_rngStates1;
curandStatePhilox4_32_10 *d_rngStates2;
} PlanType;
typedef struct UserParamsType {
int nf;
int nPlans;
PlanType *plan;
// Host-side target moments and result destination
double4 targets[4];
double4 moments[4];
} UserParamsType;
static void dfovec(const long int nx, const long int mv, const double *x, double *v_err, const void * userParams) {
UserParamsType *pUserParams = (UserParamsType *) userParams;
PlanType *plan = pUserParams->plan;
int nPlans = pUserParams->nPlans;
double4 *targets = pUserParams->targets;
double4 *moments = pUserParams->moments;
double2 lambda = make_double2(2 / (1 + exp(-x[0])), 2 / (1 + exp(-x[1])));
double2 sigma = make_double2(2 / (1 + exp(-x[2])), 2 / (1 + exp(-x[3])));
double2 delta = make_double2(1 / (1 + exp(-x[4])), 1 / (1 + exp(-x[5])));
if (nx != 6 || mv != 8) {
fprintf(stderr,"*** dfovec incorrectly called with n=%d and mv=%d\n",nx,mv);
return;
}
for (int i=0; i<nPlans; i++) {
// Simulate the process and compute moments
checkCudaErrors(cudaSetDevice(plan[i].device));
simulate<<<plan[i].gridSize, THREAD_N, THREAD_N*sizeof(double4), plan[i].stream>>>(plan[i].d_rngStates1, plan[i].d_rngStates2, plan[i].d_moments, plan[i].nsim, lambda, sigma, delta);
getLastCudaError("Failed to launch simulate kernel\n");
// Copy partial results to host
checkCudaErrors(cudaMemcpyAsync(plan[i].h_moments, plan[i].d_moments, plan[i].gridSize*4*sizeof(double4), cudaMemcpyDeviceToHost, plan[i].stream));
checkCudaErrors(cudaEventRecord(plan[i].event, plan[i].stream));
}
for (int i=0; i<nPlans; i++) {
checkCudaErrors(cudaSetDevice(plan[i].device));
cudaEventSynchronize(plan[i].event);
}
// Complete reduction on host
for (int j=0; j<3; j++) {
double m1 = 0, m2 = 0, m3 = 0, m4 = 0;
int nsim = 0;
for (int i=0; i<nPlans; i++) {
int nb = plan[i].nsim / plan[i].gridSize;
for (int n=0; n<plan[i].gridSize; n++) {
double4 m = plan[i].h_moments[n*4+j];
double d = m.x - m1, dn = d / (nsim + nb), dn2 = dn * dn, d2 = d * dn * nb * nsim;
m4 += m.w + d2 * dn2 * (nsim*nsim - nsim*nb + nb*nb) + 6 * dn2 * (nsim*nsim*m.y + nb*nb*m2) + 4 * dn * (nsim*m.z - nb*m3);
m3 += m.z + d2 * dn * (nsim - nb) + 3 * dn * (nsim*m.y - nb*m2);
m2 += m.y + d2;
m1 += dn * nb;
nsim += nb;
//printf("++ %.15g %.15g %.15g %.15g\n",m.x,m.y,m.z,m.w);
}
//printf("%.15g %.15g %.15g %.15g\n",m1,m2,m3,m4);
}
// Compute standardised moments
m2 /= nsim;
m3 /= nsim*m2*sqrt(m2);
m4 /= nsim*m2*m2;
moments[j].x = m1; //mean
moments[j].y = m2; // variance
moments[j].z = m3; // skewness
moments[j].w = m4; // kurtosis
//printf("%.15g %.15g %.15g %.15g\n",moments[j].x,moments[j].y,moments[j].z,moments[j].w);
}
// Compute fraction of dy1 less than 5%, 10%, 20% and 50%
moments[3] = make_double4(0.0,0.0,0.0,0.0);
int nsim = 0;
for (int i=0; i<nPlans; i++) {
int nb = plan[i].nsim / plan[i].gridSize;
for (int n=0; n<plan[i].gridSize; n++) {
double4 m = plan[i].h_moments[n*4+3];
moments[3].x += (m.x - moments[3].x) * nb / (nsim + nb);
moments[3].y += (m.y - moments[3].y) * nb / (nsim + nb);
moments[3].z += (m.z - moments[3].z) * nb / (nsim + nb);
moments[3].w += (m.w - moments[3].w) * nb / (nsim + nb);
nsim += nb;
}
}
//printf("%.15g %.15g %.15g %.15g\n",moments[3].x,moments[3].y,moments[3].z,moments[3].w);
// printf("%.15g\t%.15g\t%.15g\t%.15g\t%.15g\t%.15g\t%.15g\n",obj,lambda.x,lambda.y,sigma.x,sigma.y,delta.x,delta.y);
v_err[0] = moments[0].y/targets[0].y-1;
v_err[1] = moments[1].y/targets[1].y-1;
v_err[2] = moments[1].w/targets[1].w-1;
v_err[3] = moments[2].y/targets[2].y-1;
v_err[4] = moments[2].w/targets[2].w-1;
v_err[5] = moments[3].y/targets[3].y-1;
v_err[6] = moments[3].z/targets[3].z-1;
v_err[7] = moments[3].w/targets[3].w-1;
v_err[2] *= sqrt(0.5);
v_err[4] *= sqrt(0.5);
++pUserParams->nf;
}
int main(int argc, char *argv[]) {
// Get number of available devices
int GPU_N = 0;
checkCudaErrors(cudaGetDeviceCount(&GPU_N));
if (!GPU_N) {
fprintf(stderr,"There are no CUDA devices.\n");
exit(EXIT_FAILURE);
}
printf("CUDA-capable device count: %i\n", GPU_N);
long NSIM = 1;
if (argc<=1) {
fprintf(stderr,"Usage: estimate N, where N is the exponent of two in the number of simulation paths.\n");
exit(EXIT_FAILURE);
} else
NSIM <<= atoi(argv[1]);
if (((NSIM/GPU_N) % THREAD_N) | (NSIM < GPU_N)) {
fprintf(stderr,"The number of simulation paths per GPU must be a multiple of block size %d.\n",THREAD_N);
exit(EXIT_FAILURE);
}
UserParamsType userParams;
userParams.nf = 0;
userParams.nPlans = GPU_N;
userParams.plan = new PlanType[GPU_N];
for (int device=0; device<GPU_N; device++) {
// Attach to GPU
checkCudaErrors(cudaSetDevice(device));
// Get device properties
struct cudaDeviceProp deviceProperties;
checkCudaErrors(cudaGetDeviceProperties(&deviceProperties, device));
// Check precision is valid
if (deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3)) {
printf("Device %d does not have double precision support.\n", device);
exit(EXIT_FAILURE);
}
PlanType *p = &userParams.plan[device];
p->device = device;
// Initialize stream handle and event object for the current device
checkCudaErrors(cudaStreamCreate(&p->stream));
checkCudaErrors(cudaEventCreate(&p->event));
// Divide the work between GPUs equally
p->nsim = NSIM / GPU_N;
if (device < (NSIM % GPU_N)) p->nsim++;
p->gridSize = p->nsim / THREAD_N;
// Aim to launch around ten to twenty times as many blocks as there
// are multiprocessors on the target device.
// read more on grid-stride loops: https://devblogs.nvidia.com/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/
while (p->gridSize > 20 * deviceProperties.multiProcessorCount) p->gridSize >>= 1;
printf("GPU Device #%i: %s\n", p->device, deviceProperties.name);
printf("Simulation paths: %i\n", p->nsim);
printf("Grid size: %i\n", p->gridSize);
// Allocate intermediate memory for MC results
// Each thread block will produce four double4 results
checkCudaErrors(cudaMallocHost(&p->h_moments,p->gridSize*4*sizeof(double4)));
checkCudaErrors(cudaMalloc(&p->d_moments, p->gridSize*4*sizeof(double4)));
// Allocate memory for RNG states
checkCudaErrors(cudaMalloc(&p->d_rngStates1, p->gridSize * THREAD_N * sizeof(curandState)));
checkCudaErrors(cudaMalloc(&p->d_rngStates2, p->gridSize * THREAD_N * sizeof(curandStatePhilox4_32_10)));
// Initialise RNG states so that each device is placed pathN random numbers apart on the random number sequence
rngSetupStates<<<p->gridSize, THREAD_N>>>(p->d_rngStates1, p->d_rngStates2, p->device);
getLastCudaError("rngSetupStates kernel failed.\n");
checkCudaErrors(cudaDeviceSynchronize());
}
// Target moments for USA: 0.7,0.23,17.8,0.46,11.55,0.54,0.71,0.86
// Target moments for Canada: 0.760,0.217,13.377,0.437,8.782,0.51,0.68,0.85
userParams.targets[0] = make_double4(NAN, 0.760, NAN, NAN); // LogY: Mean,Var,Skew,Kurt
userParams.targets[1] = make_double4(NAN, 0.217, NAN, 13.377); // D1LogY: Mean,Var,Skew,Kurt
userParams.targets[2] = make_double4(NAN, 0.437, NAN, 8.782); // D5LogY: Mean,Var,Skew,Kurt
userParams.targets[3] = make_double4(NAN, 0.51, 0.68, 0.85); // FracD1: <5%,<10%,<20%,<50%
long int n=6, mv=8, npt=2*n+1, maxfun=500*(n+1), iprint=1;
double v_err[8], rhobeg=5.0, rhoend=1e-4, *w;
double xmax[6] = {2,2,2,2,1,1}, xmin[6] = {0};
// double x[6] = {0.0972241396763905, 0.014312611368279, 1.60304896242711, 0.892309166034993, 0.947420941274568, 0.00117609031021279};
double x[6] = {.08,.007,1.6,1.6,.7,.01};
// double x[6] = {0.0611244618471226,0.000613274511999765,1.46320215181056,1.999691573564,0.224227629475885,0.0018853181294203};
int wsize = (npt+11)*(npt+n)+n*(5*n+11)/2+mv*(npt+n*(n+7)/2+7);
checkCudaErrors(cudaMallocHost(&w,wsize*sizeof(double)));
for (int i = 0; i<6; i++)
x[i] = -log(xmax[i] / (x[i] - xmin[i]) - 1); // invlogistic
//Start the timer
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
newuoa_h(n, npt, dfovec, &userParams, x, rhobeg, rhoend, iprint, maxfun, w, mv);
dfovec(n,mv,x,v_err,&userParams);
//Stop the timer
sdkStopTimer(&hTimer);
float time = sdkGetTimerValue(&hTimer)/userParams.nf;
sdkDeleteTimer(&hTimer);
double obj = 0;
for (int i=0; i<mv; i++)
obj += v_err[i]*v_err[i];
for (int i=0; i<6; i++)
x[i] = xmin[i]+xmax[i]/(1+exp(-x[i])); // logistic
printf("\nTime per function evaluation (ms.): %f\n", time);
printf("\nFinal objective function value: %.15g\n",obj);//sqrt(obj*2/7));
printf("\nThe returned solution is:\n");
printf(" lambda: %.15g %.15g\n",x[0],x[1]);
printf(" sigma: %.15g %.15g\n",x[2],x[3]);
printf(" delta: %.15g %.15g\n",x[4],x[5]);
printf("\n Moment: Target:\tModel:\n");
printf(" MeanLogY %.15g\t%.15g\n",userParams.targets[0].x,userParams.moments[0].x);
printf(" VarLogY %.15g\t%.15g\n",userParams.targets[0].y,userParams.moments[0].y);
printf(" SkewLogY %.15g\t%.15g\n",userParams.targets[0].z,userParams.moments[0].z);
printf(" KurtLogY %.15g\t%.15g\n",userParams.targets[0].w,userParams.moments[0].w);
printf(" MeanD1LogY %.15g\t%.15g\n",userParams.targets[1].x,userParams.moments[1].x);
printf(" VarD1LogY %.15g\t%.15g\n",userParams.targets[1].y,userParams.moments[1].y);
printf(" SkewD1LogY %.15g\t%.15g\n",userParams.targets[1].z,userParams.moments[1].z);
printf(" KurtD1LogY %.15g\t%.15g\n",userParams.targets[1].w,userParams.moments[1].w);
printf(" MeanD5LogY %.15g\t%.15g\n",userParams.targets[2].x,userParams.moments[2].x);
printf(" VarD5LogY %.15g\t%.15g\n",userParams.targets[2].y,userParams.moments[2].y);
printf(" SkewD5LogY %.15g\t%.15g\n",userParams.targets[2].z,userParams.moments[2].z);
printf(" KurtD5LogY %.15g\t%.15g\n",userParams.targets[2].w,userParams.moments[2].w);
printf(" FracD1Less5 %.15g\t%.15g\n",userParams.targets[3].x,userParams.moments[3].x);
printf(" FracD1Less10 %.15g\t%.15g\n",userParams.targets[3].y,userParams.moments[3].y);
printf(" FracD1Less20 %.15g\t%.15g\n",userParams.targets[3].z,userParams.moments[3].z);
printf(" FracD1Less50 %.15g\t%.15g\n",userParams.targets[3].w,userParams.moments[3].w);
// Cleanup
for (int device=0; device<GPU_N; device++) {
PlanType *p = &userParams.plan[device];
checkCudaErrors(cudaSetDevice(p->device));
checkCudaErrors(cudaStreamDestroy(p->stream));
checkCudaErrors(cudaEventDestroy(p->event));
checkCudaErrors(cudaFreeHost(p->h_moments));
checkCudaErrors(cudaFree(p->d_moments));
checkCudaErrors(cudaFree(p->d_rngStates1));
checkCudaErrors(cudaFree(p->d_rngStates2));
}
checkCudaErrors(cudaFreeHost(w));
delete[] userParams.plan;
return(0);
}
|
bdd654dfdc9c846fc2b56cbbe69515ba83717ce8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../TFCudaCommon.h"
__global__
void computeVoxelIdxKernel(
float *pts,
unsigned int *voxel_idxs,
int pt_num,
int pt_stride,
float voxel_len,
float min_x,
float min_y,
float min_z
)
{
int pt_index = threadIdx.y + blockIdx.y*blockDim.y;
if(pt_index>=pt_num)
return;
float x=pts[pt_index*pt_stride];
float y=pts[pt_index*pt_stride+1];
float z=pts[pt_index*pt_stride+2];
voxel_idxs[pt_index*3] = floor((x-min_x)/voxel_len);
voxel_idxs[pt_index*3+1] = floor((y-min_y)/voxel_len);
voxel_idxs[pt_index*3+2] = floor((z-min_z)/voxel_len);
}
void computeVoxelIdxImpl(
float* pts,
unsigned int* voxel_idxs,
int pt_num,
int pt_stride,
float voxel_len,
float min_x,
float min_y,
float min_z
)
{
int block_num=pt_num/1024;
if(pt_num%1024>0) block_num++;
dim3 block_dim(1,block_num);
dim3 thread_dim(1,1024);
hipLaunchKernelGGL(( computeVoxelIdxKernel), dim3(block_dim),dim3(thread_dim), 0, 0,
pts,voxel_idxs,pt_num,pt_stride,voxel_len,min_x,min_y,min_z
);
gpuErrchk(hipGetLastError())
} | bdd654dfdc9c846fc2b56cbbe69515ba83717ce8.cu | #include "../TFCudaCommon.h"
__global__
void computeVoxelIdxKernel(
float *pts,
unsigned int *voxel_idxs,
int pt_num,
int pt_stride,
float voxel_len,
float min_x,
float min_y,
float min_z
)
{
int pt_index = threadIdx.y + blockIdx.y*blockDim.y;
if(pt_index>=pt_num)
return;
float x=pts[pt_index*pt_stride];
float y=pts[pt_index*pt_stride+1];
float z=pts[pt_index*pt_stride+2];
voxel_idxs[pt_index*3] = floor((x-min_x)/voxel_len);
voxel_idxs[pt_index*3+1] = floor((y-min_y)/voxel_len);
voxel_idxs[pt_index*3+2] = floor((z-min_z)/voxel_len);
}
void computeVoxelIdxImpl(
float* pts,
unsigned int* voxel_idxs,
int pt_num,
int pt_stride,
float voxel_len,
float min_x,
float min_y,
float min_z
)
{
int block_num=pt_num/1024;
if(pt_num%1024>0) block_num++;
dim3 block_dim(1,block_num);
dim3 thread_dim(1,1024);
computeVoxelIdxKernel<<<block_dim,thread_dim>>>(
pts,voxel_idxs,pt_num,pt_stride,voxel_len,min_x,min_y,min_z
);
gpuErrchk(cudaGetLastError())
} |
5c10b745c0bca4814487cad7c559fcecce7354b4.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/svd.h>
#include <system/op_boilerplate.h>
namespace sd {
namespace ops {
namespace helpers {
// FIXME -> we should optimize these helpers for the case when input matrices have c order (perform transpositions
// appropriately)
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
static void svdQR(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* VT, const bool fullUV,
const bool calcUV) {
// since cusa api hipsolverDnDgesvd/hipsolverDnSgesvd have following constrain on input matrix A: A_rows >= A_columns &&
// A_order = 'f' we make this function to have deal with 2 valid cases only: 1) A_rows >= A_columns and A_corder = 'f'
// 2) A_rows <= A_columns and A_corder = 'c' - int this case perform transposition to get f order
// if 1) or 2) are not met then throw exception
// A [m, n]
// S [n]
// U [m, m] or [m, n] if fullUV = false and m > n
// VT [n, n] or [m, n] if fullUV = false and m < n
if (A->rankOf() != 2) THROW_EXCEPTION("svdQR: rank of A array is not equal 2 !");
auto m = A->sizeAt(0);
auto n = A->sizeAt(1);
const int minDim = m < n ? m : n;
const char orderA = A->ordering();
if (m < n) THROW_EXCEPTION("svdQR: due to cuda api input constrains given shape of A array are not valid !");
if (std::vector<sd::LongType>({minDim}) != S->getShapeAsVector())
THROW_EXCEPTION("svdQR: wrong shape of S array !");
if (calcUV) {
if (fullUV && std::vector<sd::LongType>({m, m}) != U->getShapeAsVector()) {
THROW_EXCEPTION("svdQR: wrong shape of U array !");
} else if (!fullUV && std::vector<sd::LongType>({m, minDim}) != U->getShapeAsVector()) {
THROW_EXCEPTION("svdQR: wrong shape of U array !");
}
if (fullUV && std::vector<sd::LongType>({n, n}) != VT->getShapeAsVector()) {
THROW_EXCEPTION("svdQR: wrong shape of VT array !");
}
else if (!fullUV && std::vector<sd::LongType>({minDim, n}) != VT->getShapeAsVector()) {
THROW_EXCEPTION("svdQR: wrong shape of VT array !");
}
}
NDArray* pA = const_cast<NDArray*>(A);
NDArray* pS = S;
NDArray* pU = U;
NDArray* pVT = VT;
std::vector<NDArray*> toDelete;
if (pA->ews() != 1 || pA->ordering() == 'c') {
pA = new NDArray(A->dup('f'));
toDelete.push_back(pA);
}
if (S->ews() != 1) {
pS = new NDArray(S->dup('f'));
toDelete.push_back(pS);
}
if (calcUV) {
if (pU->ews() != 1 || pU->ordering() == 'c') {
pU = new NDArray(U->dup('f'));
toDelete.push_back(pU);
}
if (pVT->ews() != 1 || pVT->ordering() == 'c') {
pVT = new NDArray(VT->dup('f'));
toDelete.push_back(pVT);
}
}
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
// create cusolverDn handle
hipsolverDnHandle_t* handle = (hipsolverDnHandle_t*)context->getCusolverHandle(); // nullptr;
if (handle == nullptr) throw cuda_exception::build("svdQR: cuda failed !", -1);
// stream
auto status = hipsolverDnSetStream(*handle, *context->getCudaStream());
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status);
// query working space of SVD
int lwork = 0;
if (A->dataType() == DataType::DOUBLE)
status = hipsolverDnDgesvd_bufferSize(*handle, m, n, &lwork);
else if (A->dataType() == DataType::FLOAT32)
status = hipsolverDnSgesvd_bufferSize(*handle, m, n, &lwork);
else
THROW_EXCEPTION("svdQR: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status);
// allocate memory for dWork
void* dWork = nullptr;
hipError_t status2 = hipMalloc((void**)&dWork, A->sizeOfT() * lwork);
if (status2 != hipSuccess) throw cuda_exception::build("svdQR: cuda failed !", status2);
signed char jobu, jobvt;
if (calcUV) {
if (fullUV)
jobu = jobvt = 'A';
else
jobu = jobvt = 'S';
} else {
jobu = jobvt = 'N';
}
int* devInfo = nullptr;
void* rWork = nullptr;
int lda(m), ldu, ldvt;
if (calcUV) {
ldu = pU->sizeAt(0);
ldvt = pVT->sizeAt(0);
}
PointersManager manager(context, "svdQR");
NDArray::prepareSpecialUse({pS, pU, pVT}, {pA});
// choose appropriate cuda gemm api depending on data types
if (A->dataType() == DataType::DOUBLE) {
status = hipsolverDnDgesvd(*handle, jobu, jobvt, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda,
reinterpret_cast<double*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu,
calcUV ? reinterpret_cast<double*>(pVT->specialBuffer()) : nullptr, ldvt,
reinterpret_cast<double*>(dWork), lwork, reinterpret_cast<double*>(rWork), devInfo);
} else if (A->dataType() == DataType::FLOAT32) {
status = hipsolverDnSgesvd(*handle, jobu, jobvt, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda,
reinterpret_cast<float*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr, ldu,
calcUV ? reinterpret_cast<float*>(pVT->specialBuffer()) : nullptr, ldvt,
reinterpret_cast<float*>(dWork), lwork, reinterpret_cast<float*>(rWork), devInfo);
} else
THROW_EXCEPTION("svdQR: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pVT}, {pA});
S->assign(pS);
if (calcUV) {
U->assign(pU);
VT->assign(pVT);
}
for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i];
if (devInfo) hipFree(devInfo);
if (dWork) hipFree(dWork);
if (rWork) hipFree(rWork);
}
//////////////////////////////////////////////////////////////////////////
static void svdJcb(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* V, const bool fullUV,
const bool calcUV) {
// A [m, n]
// S [n]
// U [m, m] or [m, n] if fullUV = false and m > n
// V [n, n] or [n, m] if fullUV = false and m < n
if (A->rankOf() != 2) THROW_EXCEPTION("svdJcb: rank of A array is not equal 2 !");
int m = A->sizeAt(0);
int n = A->sizeAt(1);
const int minDim = m < n ? m : n;
if (std::vector<sd::LongType>({minDim}) != S->getShapeAsVector()) THROW_EXCEPTION("svdJcb: wrong shape of S array !");
if (fullUV && U != nullptr && std::vector<sd::LongType>({m, m}) != U->getShapeAsVector()) {
THROW_EXCEPTION("svdJcb: wrong shape of U array !");
} else if (!fullUV && U != nullptr && std::vector<sd::LongType>({m, minDim}) != U->getShapeAsVector()) {
THROW_EXCEPTION("svdJcb: wrong shape of U array !");
}
if (fullUV && V != nullptr && std::vector<sd::LongType>({n, n}) != V->getShapeAsVector()) {
THROW_EXCEPTION("svdJcb: wrong shape of V array !");
} else if (!fullUV && V != nullptr && std::vector<sd::LongType>({n, minDim}) != V->getShapeAsVector()) {
THROW_EXCEPTION("svdJcb: wrong shape of V array !");
}
NDArray* pA = const_cast<NDArray*>(A);
const bool aForder = m == 1 || A->strideAt(0) == 1;
const bool aCorder = n == 1 || A->strideAt(1) == 1;
const bool transA = !aForder && aCorder;
const bool dupA = !aForder && !aCorder;
std::vector<NDArray*> toDelete;
if (dupA) {
pA = new NDArray(A->dup('f'));
toDelete.push_back(pA);
}
NDArray* pS = S;
if (S->ews() != 1) {
pS = new NDArray(S->dup('f'));
toDelete.push_back(pS);
}
NDArray *pU(nullptr), *pV(nullptr);
int lda = transA ? pA->strideAt(0) : pA->strideAt(1);
int ldu(transA ? n : m), ldv(transA ? m : n);
bool uForder(true), vForder(true);
if (calcUV) {
pU = transA ? V : U;
pV = transA ? U : V;
uForder = pU->sizeAt(0) == 1 || pU->strideAt(0) == 1;
vForder = pV->sizeAt(0) == 1 || pV->strideAt(0) == 1;
if (!uForder) {
pU = new NDArray(pU->dup('f'));
toDelete.push_back(pU);
}
if (!vForder) {
pV = new NDArray(pV->dup('f'));
toDelete.push_back(pV);
}
ldu = pU->strideAt(1);
ldv = pV->strideAt(1);
}
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
// create cusolverDn handle
hipsolverDnHandle_t* handle = (hipsolverDnHandle_t*)context->getCusolverHandle();
if (handle == nullptr) throw cuda_exception::build("svdJcb: cuda failed !", -1);
// stream
auto status = hipsolverDnSetStream(*handle, *context->getCudaStream());
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
// set parameters
hipsolverGesvdjInfo_t gesvdjParams = nullptr;
status = hipsolverDnCreateGesvdjInfo(&gesvdjParams);
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
status = hipsolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
status = hipsolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
int* devInfo = nullptr;
const hipsolverEigMode_t jobz = calcUV ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
const int econ = !fullUV;
if (transA) math::sd_swap<int>(m, n);
// *** avoid bug in cuda API ***
void* nullPtr = nullptr;
NDArray* arrToAvoidBugInAPI = nullptr;
if (!calcUV && m != n) {
int maxDim = m > n ? m : n;
arrToAvoidBugInAPI = new NDArray('c', {maxDim, maxDim}, pA->dataType(), context);
nullPtr = arrToAvoidBugInAPI->specialBuffer();
}
// ******************
NDArray::prepareSpecialUse({pS, pU, pV}, {pA});
// query working space of SVD
int lwork = 0;
if (A->dataType() == DataType::DOUBLE)
status = hipsolverDnDgesvdj_bufferSize(
*handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda,
reinterpret_cast<double*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldu,
calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldv, &lwork,
gesvdjParams);
else if (A->dataType() == DataType::FLOAT32)
status = hipsolverDnSgesvdj_bufferSize(
*handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda,
reinterpret_cast<float*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldu,
calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldv, &lwork,
gesvdjParams);
else
THROW_EXCEPTION("svdJcb: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
// allocate memory dWork
void* dWork = nullptr;
auto status2 = hipMalloc((void**)&dWork, A->sizeOfT() * lwork);
if (status2 != hipSuccess) throw cuda_exception::build("svdJcb: cuda failed !", status2);
PointersManager manager(context, "svdJcb");
// choose appropriate cuda gemm api depending on data types
if (A->dataType() == DataType::DOUBLE) {
status = hipsolverDnDgesvdj(
*handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda,
reinterpret_cast<double*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldu,
calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldv,
reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams);
} else if (A->dataType() == DataType::FLOAT32) {
status = hipsolverDnSgesvdj(
*handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda,
reinterpret_cast<float*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldu,
calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldv,
reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams);
} else
THROW_EXCEPTION("svdJcb: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pV}, {pA});
if (S->ews() != 1) S->assign(pS);
if (calcUV) {
if (!uForder) U->assign(transA ? pV : pU);
if (!vForder) V->assign(transA ? pU : pV);
}
if (!calcUV && m != n) delete arrToAvoidBugInAPI;
for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i];
if (devInfo) hipFree(devInfo);
if (dWork) hipFree(dWork);
if (gesvdjParams) hipsolverDnDestroyGesvdjInfo(gesvdjParams);
}
//////////////////////////////////////////////////////////////////////////
static void svdBatched(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* V,
const bool fullUV, const bool calcUV) {
// A [..., m, n]
// S [..., n]
// U [..., m, m] or [..., m, n] if fullUV = false and m > n
// V [..., n, n] or [..., n, m] if fullUV = false and m < n
auto m = A->sizeAt(-2);
auto n = A->sizeAt(-1);
const int minDim = m < n ? m : n;
const sd::LongType bS = A->lengthOf() / (m * n);
if (m > 32 || n > 32) THROW_EXCEPTION("svdBatched: numbers of rows and columns should be <= 32 !");
if (minDim != S->sizeAt(-1)) THROW_EXCEPTION("svdBatched: wrong shape of S array !");
if (calcUV) {
if (U->sizeAt(-2) != m) THROW_EXCEPTION("svdBatched: wrong shape of U array !");
if (U->sizeAt(-1) != (fullUV ? m : minDim)) THROW_EXCEPTION("svdBatched: wrong shape of U array !");
if (U->lengthOf() / (U->sizeAt(-2) * U->sizeAt(-1)) != bS)
THROW_EXCEPTION("svdBatched: wrong shape of U array !");
if (V->sizeAt(-2) != n) THROW_EXCEPTION("svdBatched: wrong shape of V array !");
if (V->sizeAt(-1) != (fullUV ? n : minDim)) THROW_EXCEPTION("svdBatched: wrong shape of V array !");
if (V->lengthOf() / (V->sizeAt(-2) * V->sizeAt(-1)) != bS)
THROW_EXCEPTION("svdBatched: wrong shape of V array !");
}
NDArray* pA = const_cast<NDArray*>(A);
NDArray* pS = S;
NDArray* pU = U;
NDArray* pV = V;
std::vector<NDArray*> toDelete;
if (pA->ews() != 1 || pA->ordering() == 'c') {
pA = new NDArray(A->dup('f'));
toDelete.push_back(pA);
}
if (S->ews() != 1) {
pS = new NDArray(S->dup('f'));
toDelete.push_back(pS);
}
if (calcUV) {
if (pU->ews() != 1 || pU->ordering() == 'c') {
pU = new NDArray(U->dup('f'));
toDelete.push_back(pU);
}
if (pV->ews() != 1 || pV->ordering() == 'c') {
pV = new NDArray(V->dup('f'));
toDelete.push_back(pV);
}
}
// create cusolverDn handle
hipsolverDnHandle_t handle = nullptr;
cusolverStatus_t status = hipsolverDnCreate(&handle);
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
// stream
status = hipsolverDnSetStream(handle, *context->getCudaStream());
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
// set parameters
hipsolverGesvdjInfo_t gesvdjParams = nullptr;
status = hipsolverDnCreateGesvdjInfo(&gesvdjParams);
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
status = hipsolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
status = hipsolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
// devInfo
int* devInfo = nullptr;
auto status2 = hipMalloc((void**)&devInfo, sizeof(sd::LongType) * bS);
if (status2 != hipSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2);
status2 = hipDeviceSynchronize();
if (status2 != hipSuccess) throw cuda_exception::build("svdJcb: cuda failed !", status2);
const hipsolverEigMode_t jobz = calcUV ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
int lda(m), ldu, ldv;
if (calcUV) {
ldu = pU->sizeAt(-2);
ldv = pV->sizeAt(-2);
}
// Ak (i,j) = A[i + 5*j + 25*k]
// query working space of SVD
int lwork = 0;
if (A->dataType() == DataType::DOUBLE)
status = hipsolverDnDgesvdjBatched_bufferSize(handle, jobz, m, n, reinterpret_cast<double*>(pA->specialBuffer()),
lda, reinterpret_cast<double*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu,
calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : nullptr, ldv,
&lwork, gesvdjParams, bS);
else if (A->dataType() == DataType::FLOAT32)
status = hipsolverDnSgesvdjBatched_bufferSize(
handle, jobz, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda,
reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr,
ldu, calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : nullptr, ldv, &lwork, gesvdjParams, bS);
else
THROW_EXCEPTION("svdBatched: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
// allocate memory dWork
void* dWork = nullptr;
status2 = hipMalloc((void**)&dWork, A->sizeOfT() * lwork);
if (status2 != hipSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2);
status2 = hipDeviceSynchronize();
if (status2 != hipSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2);
PointersManager manager(context, "svdBatched");
NDArray::prepareSpecialUse({pS, pU, pV}, {pA});
// choose appropriate cuda gemm api depending on data types
if (A->dataType() == DataType::DOUBLE) {
status = hipsolverDnDgesvdjBatched(handle, jobz, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda,
reinterpret_cast<double*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu,
calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : nullptr, ldv,
reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams, bS);
} else if (A->dataType() == DataType::FLOAT32) {
status = hipsolverDnSgesvdjBatched(handle, jobz, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda,
reinterpret_cast<float*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr, ldu,
calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : nullptr, ldv,
reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams, bS);
} else
THROW_EXCEPTION("svdBatched: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pV}, {pA});
S->assign(pS);
if (calcUV) {
U->assign(pU);
V->assign(pV);
}
for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i];
if (devInfo) hipFree(devInfo);
if (dWork) hipFree(dWork);
if (handle) hipsolverDnDestroy(handle);
if (gesvdjParams) hipsolverDnDestroyGesvdjInfo(gesvdjParams);
}
////////////////////////////////////////////////////////////////////
void svd(sd::LaunchContext* context, const NDArray* x, const std::vector<NDArray*>& outArrs, const bool fullUV,
const bool calcUV, const int switchNum) {
NDArray* S = outArrs[0];
NDArray* U = outArrs[1];
NDArray* V = outArrs[2];
NDArray::prepareSpecialUse({S, U, V}, {x});
if (x->rankOf() == 2) {
svdJcb(context, x, S, U, V, fullUV, calcUV);
} else {
ResultSet *tadsU(nullptr), *tadsV(nullptr);
auto tadsX = x->allTensorsAlongDimension({x->rankOf() - 2, x->rankOf() - 1});
auto tadsS = S->allTensorsAlongDimension({S->rankOf() - 1});
if (calcUV) {
tadsU = new ResultSet(U->allTensorsAlongDimension({U->rankOf() - 2, U->rankOf() - 1}));
tadsV = new ResultSet(V->allTensorsAlongDimension({V->rankOf() - 2, V->rankOf() - 1}));
}
for (int i = 0; i < tadsX.size(); ++i)
svdJcb(context, tadsX.at(i), tadsS.at(i), calcUV ? tadsU->at(i) : nullptr, calcUV ? tadsV->at(i) : nullptr,
fullUV, calcUV);
if (calcUV) {
delete tadsU;
delete tadsV;
}
}
NDArray::registerSpecialUse({S, U, V}, {x});
}
} // namespace helpers
} // namespace ops
} // namespace sd
| 5c10b745c0bca4814487cad7c559fcecce7354b4.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/svd.h>
#include <system/op_boilerplate.h>
namespace sd {
namespace ops {
namespace helpers {
// FIXME -> we should optimize these helpers for the case when input matrices have c order (perform transpositions
// appropriately)
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
static void svdQR(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* VT, const bool fullUV,
const bool calcUV) {
// since cusa api cusolverDnDgesvd/cusolverDnSgesvd have following constrain on input matrix A: A_rows >= A_columns &&
// A_order = 'f' we make this function to have deal with 2 valid cases only: 1) A_rows >= A_columns and A_corder = 'f'
// 2) A_rows <= A_columns and A_corder = 'c' - int this case perform transposition to get f order
// if 1) or 2) are not met then throw exception
// A [m, n]
// S [n]
// U [m, m] or [m, n] if fullUV = false and m > n
// VT [n, n] or [m, n] if fullUV = false and m < n
if (A->rankOf() != 2) THROW_EXCEPTION("svdQR: rank of A array is not equal 2 !");
auto m = A->sizeAt(0);
auto n = A->sizeAt(1);
const int minDim = m < n ? m : n;
const char orderA = A->ordering();
if (m < n) THROW_EXCEPTION("svdQR: due to cuda api input constrains given shape of A array are not valid !");
if (std::vector<sd::LongType>({minDim}) != S->getShapeAsVector())
THROW_EXCEPTION("svdQR: wrong shape of S array !");
if (calcUV) {
if (fullUV && std::vector<sd::LongType>({m, m}) != U->getShapeAsVector()) {
THROW_EXCEPTION("svdQR: wrong shape of U array !");
} else if (!fullUV && std::vector<sd::LongType>({m, minDim}) != U->getShapeAsVector()) {
THROW_EXCEPTION("svdQR: wrong shape of U array !");
}
if (fullUV && std::vector<sd::LongType>({n, n}) != VT->getShapeAsVector()) {
THROW_EXCEPTION("svdQR: wrong shape of VT array !");
}
else if (!fullUV && std::vector<sd::LongType>({minDim, n}) != VT->getShapeAsVector()) {
THROW_EXCEPTION("svdQR: wrong shape of VT array !");
}
}
NDArray* pA = const_cast<NDArray*>(A);
NDArray* pS = S;
NDArray* pU = U;
NDArray* pVT = VT;
std::vector<NDArray*> toDelete;
if (pA->ews() != 1 || pA->ordering() == 'c') {
pA = new NDArray(A->dup('f'));
toDelete.push_back(pA);
}
if (S->ews() != 1) {
pS = new NDArray(S->dup('f'));
toDelete.push_back(pS);
}
if (calcUV) {
if (pU->ews() != 1 || pU->ordering() == 'c') {
pU = new NDArray(U->dup('f'));
toDelete.push_back(pU);
}
if (pVT->ews() != 1 || pVT->ordering() == 'c') {
pVT = new NDArray(VT->dup('f'));
toDelete.push_back(pVT);
}
}
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
// create cusolverDn handle
cusolverDnHandle_t* handle = (cusolverDnHandle_t*)context->getCusolverHandle(); // nullptr;
if (handle == nullptr) throw cuda_exception::build("svdQR: cuda failed !", -1);
// stream
auto status = cusolverDnSetStream(*handle, *context->getCudaStream());
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status);
// query working space of SVD
int lwork = 0;
if (A->dataType() == DataType::DOUBLE)
status = cusolverDnDgesvd_bufferSize(*handle, m, n, &lwork);
else if (A->dataType() == DataType::FLOAT32)
status = cusolverDnSgesvd_bufferSize(*handle, m, n, &lwork);
else
THROW_EXCEPTION("svdQR: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status);
// allocate memory for dWork
void* dWork = nullptr;
cudaError_t status2 = cudaMalloc((void**)&dWork, A->sizeOfT() * lwork);
if (status2 != cudaSuccess) throw cuda_exception::build("svdQR: cuda failed !", status2);
signed char jobu, jobvt;
if (calcUV) {
if (fullUV)
jobu = jobvt = 'A';
else
jobu = jobvt = 'S';
} else {
jobu = jobvt = 'N';
}
int* devInfo = nullptr;
void* rWork = nullptr;
int lda(m), ldu, ldvt;
if (calcUV) {
ldu = pU->sizeAt(0);
ldvt = pVT->sizeAt(0);
}
PointersManager manager(context, "svdQR");
NDArray::prepareSpecialUse({pS, pU, pVT}, {pA});
// choose appropriate cuda gemm api depending on data types
if (A->dataType() == DataType::DOUBLE) {
status = cusolverDnDgesvd(*handle, jobu, jobvt, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda,
reinterpret_cast<double*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu,
calcUV ? reinterpret_cast<double*>(pVT->specialBuffer()) : nullptr, ldvt,
reinterpret_cast<double*>(dWork), lwork, reinterpret_cast<double*>(rWork), devInfo);
} else if (A->dataType() == DataType::FLOAT32) {
status = cusolverDnSgesvd(*handle, jobu, jobvt, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda,
reinterpret_cast<float*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr, ldu,
calcUV ? reinterpret_cast<float*>(pVT->specialBuffer()) : nullptr, ldvt,
reinterpret_cast<float*>(dWork), lwork, reinterpret_cast<float*>(rWork), devInfo);
} else
THROW_EXCEPTION("svdQR: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pVT}, {pA});
S->assign(pS);
if (calcUV) {
U->assign(pU);
VT->assign(pVT);
}
for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i];
if (devInfo) cudaFree(devInfo);
if (dWork) cudaFree(dWork);
if (rWork) cudaFree(rWork);
}
//////////////////////////////////////////////////////////////////////////
static void svdJcb(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* V, const bool fullUV,
const bool calcUV) {
// A [m, n]
// S [n]
// U [m, m] or [m, n] if fullUV = false and m > n
// V [n, n] or [n, m] if fullUV = false and m < n
if (A->rankOf() != 2) THROW_EXCEPTION("svdJcb: rank of A array is not equal 2 !");
int m = A->sizeAt(0);
int n = A->sizeAt(1);
const int minDim = m < n ? m : n;
if (std::vector<sd::LongType>({minDim}) != S->getShapeAsVector()) THROW_EXCEPTION("svdJcb: wrong shape of S array !");
if (fullUV && U != nullptr && std::vector<sd::LongType>({m, m}) != U->getShapeAsVector()) {
THROW_EXCEPTION("svdJcb: wrong shape of U array !");
} else if (!fullUV && U != nullptr && std::vector<sd::LongType>({m, minDim}) != U->getShapeAsVector()) {
THROW_EXCEPTION("svdJcb: wrong shape of U array !");
}
if (fullUV && V != nullptr && std::vector<sd::LongType>({n, n}) != V->getShapeAsVector()) {
THROW_EXCEPTION("svdJcb: wrong shape of V array !");
} else if (!fullUV && V != nullptr && std::vector<sd::LongType>({n, minDim}) != V->getShapeAsVector()) {
THROW_EXCEPTION("svdJcb: wrong shape of V array !");
}
NDArray* pA = const_cast<NDArray*>(A);
const bool aForder = m == 1 || A->strideAt(0) == 1;
const bool aCorder = n == 1 || A->strideAt(1) == 1;
const bool transA = !aForder && aCorder;
const bool dupA = !aForder && !aCorder;
std::vector<NDArray*> toDelete;
if (dupA) {
pA = new NDArray(A->dup('f'));
toDelete.push_back(pA);
}
NDArray* pS = S;
if (S->ews() != 1) {
pS = new NDArray(S->dup('f'));
toDelete.push_back(pS);
}
NDArray *pU(nullptr), *pV(nullptr);
int lda = transA ? pA->strideAt(0) : pA->strideAt(1);
int ldu(transA ? n : m), ldv(transA ? m : n);
bool uForder(true), vForder(true);
if (calcUV) {
pU = transA ? V : U;
pV = transA ? U : V;
uForder = pU->sizeAt(0) == 1 || pU->strideAt(0) == 1;
vForder = pV->sizeAt(0) == 1 || pV->strideAt(0) == 1;
if (!uForder) {
pU = new NDArray(pU->dup('f'));
toDelete.push_back(pU);
}
if (!vForder) {
pV = new NDArray(pV->dup('f'));
toDelete.push_back(pV);
}
ldu = pU->strideAt(1);
ldv = pV->strideAt(1);
}
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
// create cusolverDn handle
cusolverDnHandle_t* handle = (cusolverDnHandle_t*)context->getCusolverHandle();
if (handle == nullptr) throw cuda_exception::build("svdJcb: cuda failed !", -1);
// stream
auto status = cusolverDnSetStream(*handle, *context->getCudaStream());
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
// set parameters
gesvdjInfo_t gesvdjParams = nullptr;
status = cusolverDnCreateGesvdjInfo(&gesvdjParams);
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
status = cusolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
status = cusolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
int* devInfo = nullptr;
const cusolverEigMode_t jobz = calcUV ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
const int econ = !fullUV;
if (transA) math::sd_swap<int>(m, n);
// *** avoid bug in cuda API ***
void* nullPtr = nullptr;
NDArray* arrToAvoidBugInAPI = nullptr;
if (!calcUV && m != n) {
int maxDim = m > n ? m : n;
arrToAvoidBugInAPI = new NDArray('c', {maxDim, maxDim}, pA->dataType(), context);
nullPtr = arrToAvoidBugInAPI->specialBuffer();
}
// ******************
NDArray::prepareSpecialUse({pS, pU, pV}, {pA});
// query working space of SVD
int lwork = 0;
if (A->dataType() == DataType::DOUBLE)
status = cusolverDnDgesvdj_bufferSize(
*handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda,
reinterpret_cast<double*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldu,
calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldv, &lwork,
gesvdjParams);
else if (A->dataType() == DataType::FLOAT32)
status = cusolverDnSgesvdj_bufferSize(
*handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda,
reinterpret_cast<float*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldu,
calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldv, &lwork,
gesvdjParams);
else
THROW_EXCEPTION("svdJcb: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
// allocate memory dWork
void* dWork = nullptr;
auto status2 = cudaMalloc((void**)&dWork, A->sizeOfT() * lwork);
if (status2 != cudaSuccess) throw cuda_exception::build("svdJcb: cuda failed !", status2);
PointersManager manager(context, "svdJcb");
// choose appropriate cuda gemm api depending on data types
if (A->dataType() == DataType::DOUBLE) {
status = cusolverDnDgesvdj(
*handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda,
reinterpret_cast<double*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldu,
calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldv,
reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams);
} else if (A->dataType() == DataType::FLOAT32) {
status = cusolverDnSgesvdj(
*handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda,
reinterpret_cast<float*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldu,
calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldv,
reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams);
} else
THROW_EXCEPTION("svdJcb: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pV}, {pA});
if (S->ews() != 1) S->assign(pS);
if (calcUV) {
if (!uForder) U->assign(transA ? pV : pU);
if (!vForder) V->assign(transA ? pU : pV);
}
if (!calcUV && m != n) delete arrToAvoidBugInAPI;
for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i];
if (devInfo) cudaFree(devInfo);
if (dWork) cudaFree(dWork);
if (gesvdjParams) cusolverDnDestroyGesvdjInfo(gesvdjParams);
}
//////////////////////////////////////////////////////////////////////////
static void svdBatched(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* V,
const bool fullUV, const bool calcUV) {
// A [..., m, n]
// S [..., n]
// U [..., m, m] or [..., m, n] if fullUV = false and m > n
// V [..., n, n] or [..., n, m] if fullUV = false and m < n
auto m = A->sizeAt(-2);
auto n = A->sizeAt(-1);
const int minDim = m < n ? m : n;
const sd::LongType bS = A->lengthOf() / (m * n);
if (m > 32 || n > 32) THROW_EXCEPTION("svdBatched: numbers of rows and columns should be <= 32 !");
if (minDim != S->sizeAt(-1)) THROW_EXCEPTION("svdBatched: wrong shape of S array !");
if (calcUV) {
if (U->sizeAt(-2) != m) THROW_EXCEPTION("svdBatched: wrong shape of U array !");
if (U->sizeAt(-1) != (fullUV ? m : minDim)) THROW_EXCEPTION("svdBatched: wrong shape of U array !");
if (U->lengthOf() / (U->sizeAt(-2) * U->sizeAt(-1)) != bS)
THROW_EXCEPTION("svdBatched: wrong shape of U array !");
if (V->sizeAt(-2) != n) THROW_EXCEPTION("svdBatched: wrong shape of V array !");
if (V->sizeAt(-1) != (fullUV ? n : minDim)) THROW_EXCEPTION("svdBatched: wrong shape of V array !");
if (V->lengthOf() / (V->sizeAt(-2) * V->sizeAt(-1)) != bS)
THROW_EXCEPTION("svdBatched: wrong shape of V array !");
}
NDArray* pA = const_cast<NDArray*>(A);
NDArray* pS = S;
NDArray* pU = U;
NDArray* pV = V;
std::vector<NDArray*> toDelete;
if (pA->ews() != 1 || pA->ordering() == 'c') {
pA = new NDArray(A->dup('f'));
toDelete.push_back(pA);
}
if (S->ews() != 1) {
pS = new NDArray(S->dup('f'));
toDelete.push_back(pS);
}
if (calcUV) {
if (pU->ews() != 1 || pU->ordering() == 'c') {
pU = new NDArray(U->dup('f'));
toDelete.push_back(pU);
}
if (pV->ews() != 1 || pV->ordering() == 'c') {
pV = new NDArray(V->dup('f'));
toDelete.push_back(pV);
}
}
// create cusolverDn handle
cusolverDnHandle_t handle = nullptr;
cusolverStatus_t status = cusolverDnCreate(&handle);
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
// stream
status = cusolverDnSetStream(handle, *context->getCudaStream());
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
// set parameters
gesvdjInfo_t gesvdjParams = nullptr;
status = cusolverDnCreateGesvdjInfo(&gesvdjParams);
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
status = cusolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
status = cusolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
// devInfo
int* devInfo = nullptr;
auto status2 = cudaMalloc((void**)&devInfo, sizeof(sd::LongType) * bS);
if (status2 != cudaSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2);
status2 = cudaDeviceSynchronize();
if (status2 != cudaSuccess) throw cuda_exception::build("svdJcb: cuda failed !", status2);
const cusolverEigMode_t jobz = calcUV ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
int lda(m), ldu, ldv;
if (calcUV) {
ldu = pU->sizeAt(-2);
ldv = pV->sizeAt(-2);
}
// Ak (i,j) = A[i + 5*j + 25*k]
// query working space of SVD
int lwork = 0;
if (A->dataType() == DataType::DOUBLE)
status = cusolverDnDgesvdjBatched_bufferSize(handle, jobz, m, n, reinterpret_cast<double*>(pA->specialBuffer()),
lda, reinterpret_cast<double*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu,
calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : nullptr, ldv,
&lwork, gesvdjParams, bS);
else if (A->dataType() == DataType::FLOAT32)
status = cusolverDnSgesvdjBatched_bufferSize(
handle, jobz, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda,
reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr,
ldu, calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : nullptr, ldv, &lwork, gesvdjParams, bS);
else
THROW_EXCEPTION("svdBatched: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
// allocate memory dWork
void* dWork = nullptr;
status2 = cudaMalloc((void**)&dWork, A->sizeOfT() * lwork);
if (status2 != cudaSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2);
status2 = cudaDeviceSynchronize();
if (status2 != cudaSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2);
PointersManager manager(context, "svdBatched");
NDArray::prepareSpecialUse({pS, pU, pV}, {pA});
// choose appropriate cuda gemm api depending on data types
if (A->dataType() == DataType::DOUBLE) {
status = cusolverDnDgesvdjBatched(handle, jobz, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda,
reinterpret_cast<double*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu,
calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : nullptr, ldv,
reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams, bS);
} else if (A->dataType() == DataType::FLOAT32) {
status = cusolverDnSgesvdjBatched(handle, jobz, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda,
reinterpret_cast<float*>(pS->specialBuffer()),
calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr, ldu,
calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : nullptr, ldv,
reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams, bS);
} else
THROW_EXCEPTION("svdBatched: given data type is unsupported !");
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pV}, {pA});
S->assign(pS);
if (calcUV) {
U->assign(pU);
V->assign(pV);
}
for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i];
if (devInfo) cudaFree(devInfo);
if (dWork) cudaFree(dWork);
if (handle) cusolverDnDestroy(handle);
if (gesvdjParams) cusolverDnDestroyGesvdjInfo(gesvdjParams);
}
////////////////////////////////////////////////////////////////////
void svd(sd::LaunchContext* context, const NDArray* x, const std::vector<NDArray*>& outArrs, const bool fullUV,
const bool calcUV, const int switchNum) {
NDArray* S = outArrs[0];
NDArray* U = outArrs[1];
NDArray* V = outArrs[2];
NDArray::prepareSpecialUse({S, U, V}, {x});
if (x->rankOf() == 2) {
svdJcb(context, x, S, U, V, fullUV, calcUV);
} else {
ResultSet *tadsU(nullptr), *tadsV(nullptr);
auto tadsX = x->allTensorsAlongDimension({x->rankOf() - 2, x->rankOf() - 1});
auto tadsS = S->allTensorsAlongDimension({S->rankOf() - 1});
if (calcUV) {
tadsU = new ResultSet(U->allTensorsAlongDimension({U->rankOf() - 2, U->rankOf() - 1}));
tadsV = new ResultSet(V->allTensorsAlongDimension({V->rankOf() - 2, V->rankOf() - 1}));
}
for (int i = 0; i < tadsX.size(); ++i)
svdJcb(context, tadsX.at(i), tadsS.at(i), calcUV ? tadsU->at(i) : nullptr, calcUV ? tadsV->at(i) : nullptr,
fullUV, calcUV);
if (calcUV) {
delete tadsU;
delete tadsV;
}
}
NDArray::registerSpecialUse({S, U, V}, {x});
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
6f775995f7fd20aced72ac04e2253426fcf60d8f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `variance`
#include <cudf/detail/reduction_functions.hpp>
#include "compound.cuh"
// @param[in] ddof Delta Degrees of Freedom used for `std`, `var`.
// The divisor used in calculations is N - ddof, where N
// represents the number of elements.
std::unique_ptr<cudf::scalar> cudf::reduction::variance(column_view const& col,
cudf::data_type const output_dtype,
cudf::size_type ddof,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
// TODO: add cuda version check when the fix is available
#if !defined(__CUDACC_DEBUG__)
using reducer = cudf::reduction::compound::element_type_dispatcher<cudf::reduction::op::variance>;
return cudf::type_dispatcher(col.type(), reducer(), col, output_dtype, ddof, mr, stream);
#else
// workaround for bug 200529165 which causes compilation error only at device
// debug build the bug will be fixed at cuda 10.2
CUDF_FAIL("var/std reductions are not supported at debug build.");
#endif
}
| 6f775995f7fd20aced72ac04e2253426fcf60d8f.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `variance`
#include <cudf/detail/reduction_functions.hpp>
#include "compound.cuh"
// @param[in] ddof Delta Degrees of Freedom used for `std`, `var`.
// The divisor used in calculations is N - ddof, where N
// represents the number of elements.
std::unique_ptr<cudf::scalar> cudf::reduction::variance(column_view const& col,
cudf::data_type const output_dtype,
cudf::size_type ddof,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
// TODO: add cuda version check when the fix is available
#if !defined(__CUDACC_DEBUG__)
using reducer = cudf::reduction::compound::element_type_dispatcher<cudf::reduction::op::variance>;
return cudf::type_dispatcher(col.type(), reducer(), col, output_dtype, ddof, mr, stream);
#else
// workaround for bug 200529165 which causes compilation error only at device
// debug build the bug will be fixed at cuda 10.2
CUDF_FAIL("var/std reductions are not supported at debug build.");
#endif
}
|
b8442bd9c02c02ea0057a67928c66c8e11b4f9bd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
int main(void){
} | b8442bd9c02c02ea0057a67928c66c8e11b4f9bd.cu | #include <cuda.h>
int main(void){
} |
7eac58eda745cd36b2d77d23137bfe95e2026151.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************************
Filename : cuda_ProcKernels.cu
Authors : Jing Xu, Kevin Wong, Yifan Jian, Marinko Sarunic
Published : Janurary 6th, 2014
Copyright (C) 2014 Biomedical Optics Research Group - Simon Fraser University
This software contains source code provided by NVIDIA Corporation.
This file is part of a Open Source software. Details of this software has been described
in the papers titled:
"Jing Xu, Kevin Wong, Yifan Jian, and Marinko V. Sarunice.
'Real-time acquisition and display of flow contrast with speckle variance OCT using GPU'
In press (JBO)
and
"Jian, Yifan, Kevin Wong, and Marinko V. Sarunic. 'GPU accelerated OCT processing at
megahertz axial scan rate and high resolution video rate volumetric rendering.'
In SPIE BiOS, pp. 85710Z-85710Z. International Society for Optics and Photonics, 2013."
Please refer to these papers for further information about this software. Redistribution
and modification of this code is restricted to academic purposes ONLY, provided that
the following conditions are met:
- Redistribution of this code must retain the above copyright notice, this list of
conditions and the following disclaimer
- Any use, disclosure, reproduction, or redistribution of this software outside of
academic purposes is strictly prohibited
*DISCLAIMER*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
**********************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include "cuda_ProcHeader.cuh"
typedef float2 Complex;
/******** DEVICE FUNCTIONS **********/
__device__ Complex ComplexMul(Complex srcA, Complex srcB)
{
Complex output;
output.x = srcA.x * srcB.x - srcA.y * srcB.y;
output.y = srcA.x * srcB.y + srcA.y * srcB.x;
return output;
}
__device__ float complexAbs(Complex input)
{
float output;
output = sqrt( pow(input.x, 2) + pow(input.y, 2) );
return output;
}
/******** GLOBAL FUNCTIONS **********/
////This Kernel multiplies the cartesian equivalent of Dispersion Phase with Data
__global__ void subDC_PadComplex( unsigned short *Src,
Complex *DstComplex,
float *dcArray,
int width,
int fftWidth)
{
//get total number of threads and current thread number
//blockDim and gridDim are 1 dimensional vectors (y dim = 1)
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int dstIdx = int(idx/width)*fftWidth + idx%width;
int dcIdx = idx%width;
// This 'short' cast is NOT Necessary.
// But some data may be saved as short data, and therefore short allows for both unsigned and signed to work.
DstComplex[dstIdx].x = (float)(unsigned short)(Src[idx]) - dcArray[dcIdx];
//DstComplex[dstIdx].x = (float)Src[idx] - dcArray[dcIdx];
DstComplex[dstIdx].y = 0;
//The following loop performs zero padding
//The if-statement must be added to avoid unecessary for-loop reads
//if width=fftwidth, such as the case when padding is not required
//In fact, the following for loop can be commented out for the
//case where padding is not required, which can help save a bit of time
//The advantage of having the following is that it becomes dynamic
//when dealing with fftwidth>width
if (fftWidth>width) {
int newDstIdx = dstIdx+width;
DstComplex[newDstIdx].x = 0;
DstComplex[newDstIdx].y = 0;
}
}
//This is the DC Acquisition Kernel
//Takes the average of many A-scans to obtain a general averaged DC line
__global__ void dcAcquireKernel ( unsigned short *Src, float *Dst,
int width,
int imageheight)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
Dst[idx] = 0;
//Sum up all columns of accross Ascans
for (unsigned int n=0; n<imageheight; n++)
Dst[idx] += (float)( unsigned short)(Src[idx + n*width]);
Dst[idx] /= (float)imageheight;
}
//DownSizing post fft kernel
__global__ void downsizeMLS(float *floatArray, Complex *complexArray, int width,
int height, int fftWidth, float minVal, float maxVal,
float coeff, int frameIdx, int reduction)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
//Calculating the indices beforehand is much faster than calculating these indices during the function,
//Therefore it would make sense to do all of it here first before maping the values
int newWidth = width/reduction;
int newHeight = height/reduction;
int newFftWidth = fftWidth/reduction;
int cmplxIdx = idx%newWidth + int(idx/newWidth)*newFftWidth;
int buffFrameNum = cmplxIdx/(newFftWidth*newHeight);
int rowNum = (cmplxIdx%(newFftWidth*newHeight))/newFftWidth;
int rowIdx = (cmplxIdx%(newFftWidth*newHeight))%newFftWidth;
int mapFloatIdx = frameIdx*newWidth*newHeight + idx;
int mapCmpIdx = buffFrameNum*(fftWidth*height) + (rowNum*fftWidth + rowIdx)*reduction;
floatArray[mapFloatIdx] =
__saturatef( (logf( (complexAbs(complexArray[mapCmpIdx])+ 1)) - minVal)*coeff);
}
//Crop Post FFT Method
//ALS = Absolute, Log, and Scaling
//This method of post FFT crops out a certain portion of the data, and copies into buffer
//As opposed to the other method which downsizes the whole volume
__global__ void cropMLS(float *floatArray, Complex *complexArray, int width,
int height, int fftWidth, float minVal, float maxVal,
float coeff, int frameIdx, int offset, int range)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
//Calculating the indices beforehand is much faster than calculating these indices during the function,
//Therefore it would make sense to do all of it here first before maping the values
int mapFloatIdx = frameIdx*range*height + idx;
int mapCmpIdx = int(idx/range)*fftWidth + idx%range + offset;
floatArray[mapFloatIdx] =
__saturatef(
(logf((complexAbs(complexArray[mapCmpIdx])+ 1)) - minVal)*coeff
);
}
__global__ void copySingleFrameFloat(float *Src, float *Dst, int frameNum, int frameSize)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
Dst[idx] = Src[frameSize*frameNum + idx];
}
__global__ void avgKernel(float *src_Buffer, float *dst_Buffer, int frameNum, int frames, int frameSize)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ float tempVal;
tempVal = 0;
for(int i=0; i<frames; i++)
tempVal += src_Buffer[(frameNum+i)*frameSize + idx];
dst_Buffer[idx] = tempVal/frames;
}
__device__ void warpReduce(volatile float *sdata, unsigned int tid, int blockSize)
{
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
__global__ void renderFundus(float *g_idata, float *g_odata, unsigned int width, float scaleCoeff, int inputOffset, int outputOffset, int funoff, int funwid)
{
//The declaration for 1024 elements is arbitrary
//As long as this is larger than blockSize, it is fine
__shared__ float sdata[1024];
int blockSize = blockDim.x;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
int rowIdx = blockIdx.x + inputOffset;
int outputRowIdx = blockIdx.x + outputOffset;
sdata[tid] = 0;
for (int j=0; (tid+j)<funwid; j+=blockDim.x)
sdata[tid] += g_idata[rowIdx*width + tid + j+funoff];
sdata[tid]=sdata[tid]/width;
__syncthreads();
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) warpReduce(sdata, tid, blockSize);
if (tid == 0) g_odata[outputRowIdx] = sdata[0]*scaleCoeff; //Equivalent to 7.0f/1024.0f, multiplication much faster than division
}
__global__ void syncKernel()
{
//This Kernel is Purposely Empty
//By calling a non-streamed empty kernel, the whole system will be synchronized together
//This Kernel will NOT affect any CPU threads, therefore it should not pose any problems
}
__global__ void Variance(float *src_Buffer, float *dst_Buffer, float *dst_svBuffer, int numF, int frameNum, int frameSize, float coefficient)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int zIdx = idx/(frameSize); //0-9
int inFrameIdx = idx%(frameSize);
float tempVal;
tempVal = 0;
for (int i=0;i<numF;i++)
tempVal += src_Buffer[(zIdx*numF + frameNum+i)*frameSize + inFrameIdx]; //zIdx*numF = 0:3:6:9:12...:27
float mean = tempVal/numF;
float var = 0.0;
for (int i=0;i<numF;i++)
var += pow(src_Buffer[(zIdx*numF + frameNum+i)*frameSize + inFrameIdx]-mean,2);
tempVal = var/numF*coefficient; //The scaling factor 20 here was chosen for display purpose
src_Buffer[(zIdx*numF + frameNum)*frameSize + inFrameIdx] = tempVal;
}
__global__ void avgKernel2(float *src_Buffer, float *dst_Buffer, int frameNum, int frames, int frameSize)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int zIdx = idx/(frameSize); //0-9
int inFrameIdx = idx%(frameSize);
float tempVal;
tempVal = 0;
if (zIdx <8 && zIdx >0){
for(int i=-1; i<frames-1; i++)
tempVal += src_Buffer[(zIdx*frames+ frameNum+i*3)*frameSize + inFrameIdx];
src_Buffer[(zIdx*frames + frameNum)*frameSize + inFrameIdx] = tempVal/frames;
}
else if (zIdx <1)
{
for(int i=0; i<frames; i++)
tempVal += src_Buffer[(zIdx*frames+ frameNum+i*3)*frameSize + inFrameIdx];
src_Buffer[(zIdx*frames + frameNum)*frameSize + inFrameIdx] = tempVal/frames;
}
else
{
for(int i=-2; i<1; i++)
tempVal += src_Buffer[(zIdx*frames+ frameNum+i*3)*frameSize + inFrameIdx];
src_Buffer[(zIdx*frames + frameNum)*frameSize + inFrameIdx] = tempVal/frames;
}
}
//registration section//
__global__ void complexMulConj(Complex *Src, Complex *Dst, int frameNum, int frames, int width, int height, int subPixelFactor)
{
int frameSize = width*height;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int zIdx = idx/(frameSize);
int inFrameIdx = idx%(frameSize);
Complex temp;
int a = (zIdx*frames+frameNum)*frameSize;
int b = (zIdx*frames+frameNum+1)*frameSize;
temp.x = Src[a + inFrameIdx].x * Src[b+inFrameIdx].x - Src[a+inFrameIdx].y * (-1)*Src[b+inFrameIdx].y;
temp.y = Src[a + inFrameIdx].x * Src[b+inFrameIdx].y*(-1) + Src[a + inFrameIdx].y * Src[b+inFrameIdx].x;
int outFrameIdx = 0;
if (subPixelFactor == 1)
outFrameIdx = idx;
else
outFrameIdx = (inFrameIdx/width+height/2)* width*subPixelFactor +(inFrameIdx%width+width/2) +zIdx*frameSize*4;
Dst[outFrameIdx] = temp;
}
__global__ void batchComplexAbs( Complex *Src, float *Dst, int offset)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
Dst[offset + idx] = complexAbs(Src[idx]);
}
__global__ void copyToComplex( float *input, Complex *output)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
output[idx].x = input[idx];
output[idx].y = 0.0f;
}
__global__ void normData(Complex *input, float norm)
{ int idx = threadIdx.x + blockIdx.x*blockDim.x;
input[idx].x *= norm;
input[idx].y *= norm;
}
__device__ void MaxWarpReduce(volatile float *sdata, unsigned int tid, int blockSize,int* loc)
{
if (blockSize >= 64) {
if (sdata[tid] < sdata[tid +32]){
sdata[tid] = sdata[tid +32];
loc[tid] = loc[tid +32];
}
}
if (blockSize >= 32) {
if (sdata[tid] < sdata[tid +16]){
sdata[tid] = sdata[tid +16];
loc[tid] = loc[tid +16];
}
}
if (blockSize >= 16) {
if (sdata[tid] < sdata[tid +8]){
sdata[tid] = sdata[tid +8];
loc[tid] = loc[tid +8];
}
}
if (blockSize >= 8){
if (sdata[tid] < sdata[tid +4]){
sdata[tid] = sdata[tid +4];
loc[tid] = loc[tid +4];
}
}
if (blockSize >= 4) {
if (sdata[tid] < sdata[tid +2]){
sdata[tid] = sdata[tid +2];
loc[tid] = loc[tid +2];
}
}
if (blockSize >= 2) {
if (sdata[tid] < sdata[tid +1]){
sdata[tid] = sdata[tid +1];
loc[tid] = loc[tid +1];
}
}
}
__global__ void maxReductionBatch(float *g_idata, float *maxV, unsigned int width,int height, int* loc)
{
//The declaration for 1024 elements is arbitrary
//As long as this is larger than blockSize, it is fine
__shared__ float sdata[1024];
__shared__ int sloc[1024];
int blockSize = blockDim.x;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
sdata[tid] = 0;
for (int j=tid; j<width; j+=blockSize){
if (sdata[tid] < g_idata[(blockIdx.x)*width + j]){
sdata[tid] = g_idata[(blockIdx.x)*width + j];
sloc[tid] = j;
}
}
__syncthreads();
if (blockSize >= 128) {
if (tid < 64) {
if (sdata[tid] < sdata[tid + 64]){
sdata[tid] = sdata[tid + 64];
sloc[tid] = sloc[tid + 64];
}
}
__syncthreads();
if (tid < 32) MaxWarpReduce(sdata, tid, blockSize,sloc);
if (tid == 0) {
maxV[blockIdx.x] = sdata[tid];
loc[blockIdx.x] = sloc[tid];
}
}
}
__global__ void computeShift(float *RegMaxV, int *RegLoc, int width,
int height,int offsetFrame,int framesPerBuffer, float *MaxV,float *diffphase, Complex *data, int *shift, int subPixelFactor)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int zIdx = idx*height;
MaxV[idx] = RegMaxV[zIdx];
int hloc;
int wloc;
hloc = 0;
wloc = RegLoc[zIdx];
for (int j=1; j<height; j++){
if (MaxV[idx] < RegMaxV[zIdx+j]){
MaxV[idx] = RegMaxV[zIdx+j];
hloc = j;
wloc = RegLoc[zIdx+j];
}
}
int md2 = width/2;
int nd2 = height/2;
if (wloc > md2)
shift[idx] = wloc - width+1;
else
shift[idx] = wloc;
if (hloc > nd2)
shift[idx+framesPerBuffer/3] = hloc - height+1;
else
shift[idx+framesPerBuffer/3] = hloc;
shift[idx] /=subPixelFactor;
shift[idx+framesPerBuffer/3] /= subPixelFactor;
// diffphase ---> Global phase difference between the two images (should be zero if images are non-negative).
// diffphase[idx] = atan2(data[(idx*3 + offsetFrame)*width/subPixelFactor*height/subPixelFactor+ hloc/subPixelFactor*width/subPixelFactor +wloc/subPixelFactor].y,data[(idx*3 + offsetFrame)*width/subPixelFactor*height/subPixelFactor+hloc/subPixelFactor*width/subPixelFactor +wloc/subPixelFactor].x);
// For our OCT processing pipeline, the intensity of processed images are only from 0-1.
diffphase[idx] = 0;
}
__global__ void getMeshgrid( int *Nw, int *Nh, int width,int height)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int dstIdx1 = int(idx/width);
int dstIdx2 = idx%width;
if (dstIdx2 < (width/2))
Nw[idx] = dstIdx2;
else
Nw[idx] = dstIdx2 - width;
if (dstIdx1 < (height/2))
Nh[idx] = dstIdx1;
else
Nh[idx] = dstIdx1 - height;
}
__global__ void ImagExpB (Complex *Src, int *Nw, int *Nh, int width, int height, int frameNum, int framesPerBuffer,int *shift,float *diffphase)
{
float theta;
Complex r;
Complex s;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int zIdx = idx/(width*height);
int InframeIdx = idx%(width*height);
theta = 2*_PI*((-1)*(float(shift[zIdx])*float(Nw[InframeIdx])/width + float(shift[zIdx+framesPerBuffer/3])*float(Nh[InframeIdx])/height));
r.x = cosf(theta);
r.y = sinf(theta);
s.x = cosf(diffphase[zIdx]);
s.y = sinf(diffphase[zIdx]);
Src[(zIdx*3+frameNum)*width*height+InframeIdx] = ComplexMul(Src[(zIdx*3+frameNum)*width*height+InframeIdx],ComplexMul(r,s));
}
__device__ void MIPwarpReduce(volatile float *sdata, unsigned int tid, int blockSize)
{
if (blockSize >= 64) {
if (sdata[tid] < sdata[tid +32]){
sdata[tid] = sdata[tid +32];
}
}
if (blockSize >= 32) {
if (sdata[tid] < sdata[tid +16]){
sdata[tid] = sdata[tid +16];
}
}
if (blockSize >= 16) {
if (sdata[tid] < sdata[tid +8]){
sdata[tid] = sdata[tid +8];
}
}
if (blockSize >= 8){
if (sdata[tid] < sdata[tid +4]){
sdata[tid] = sdata[tid +4];
}
}
if (blockSize >= 4) {
if (sdata[tid] < sdata[tid +2]){
sdata[tid] = sdata[tid +2];
}
}
if (blockSize >= 2) {
if (sdata[tid] < sdata[tid +1]){
sdata[tid] = sdata[tid +1];
}
}
}
__global__ void MIPrenderFundus(float *g_idata, float *g_odata, unsigned int width, float scaleCoeff, int inputOffset, int outputOffset, int funoff, int funwid)
{
//The declaration for 1024 elements is arbitrary
//As long as this is larger than blockSize, it is fine
__shared__ float sdata[1024];
int rowIdx = blockIdx.x + inputOffset;
int outputRowIdx = blockIdx.x + outputOffset;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
sdata[tid] = 0;
for (int j=0; (tid+j)<funwid; j+=blockDim.x){
if (sdata[tid] < g_idata[rowIdx*width + tid + j+funoff]){
sdata[tid] = g_idata[rowIdx*width + tid + j+funoff];
}
}
__syncthreads();
if (blockDim.x >= 128) {
if (tid < 64) {
if (sdata[tid] < sdata[tid + 64]){
sdata[tid] = sdata[tid + 64];
}
}
__syncthreads();
if (tid < 32) MIPwarpReduce(sdata, tid, blockDim.x);
if (tid == 0) {
g_odata[outputRowIdx] = sdata[0]*scaleCoeff;
}
}
}
__global__ void MIPrenderFundusSV(float *g_idata, float *g_odata, unsigned int width, float scaleCoeff, int inputOffset, int outputOffset, int funoff, int funwid, int height)
{
//The declaration for 1024 elements is arbitrary
//As long as this is larger than blockSize, it is fine
__shared__ float sdata[1024];
int svZIdx = (blockIdx.x/height) * 3;
int svInFrameIdx = blockIdx.x%height;
int svBlockIdx = svZIdx*height + svInFrameIdx;
int rowIdx = svBlockIdx + inputOffset;
int outputRowIdx = blockIdx.x + outputOffset;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
sdata[tid] = 0;
for (int j=0; (tid+j)<funwid; j+=blockDim.x){
if (sdata[tid] < g_idata[rowIdx*width + tid + j+funoff]){
sdata[tid] = g_idata[rowIdx*width + tid + j+funoff];
}
}
__syncthreads();
if (blockDim.x >= 128) {
if (tid < 64) {
if (sdata[tid] < sdata[tid + 64]){
sdata[tid] = sdata[tid + 64];
}
}
__syncthreads();
if (tid < 32) MIPwarpReduce(sdata, tid, blockDim.x);
if (tid == 0) {
g_odata[outputRowIdx] = sdata[0]*scaleCoeff;
}
}
}
__global__ void renderFundusSV(float *g_idata, float *g_odata, unsigned int width, float scaleCoeff, int inputOffset, int outputOffset, int funoff, int funwid, int height)
{
//The declaration for 1024 elements is arbitrary
//As long as this is larger than blockSize, it is fine
__shared__ float sdata[1024];
int blockSize = blockDim.x;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
int svZIdx = (blockIdx.x/height) * 3;
int svInFrameIdx = blockIdx.x%height;
int svBlockIdx = svZIdx*height + svInFrameIdx;
int rowIdx = svBlockIdx + inputOffset;
int outputRowIdx = blockIdx.x + outputOffset;
sdata[tid] = 0;
for (int j=0; (tid+j)<funwid; j+=blockDim.x)
sdata[tid] += g_idata[rowIdx*width + tid + j+funoff];
sdata[tid]=sdata[tid]/width;
__syncthreads();
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) warpReduce(sdata, tid, blockSize);
if (tid == 0) g_odata[outputRowIdx] = sdata[0]*scaleCoeff; //Equivalent to 7.0f/1024.0f, multiplication much faster than division
}
__global__ void setColor(float *g_odata,float *g_idata,int index)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
g_odata[idx*3+index] = g_idata[idx];
}
__global__ void fftshift2D( Complex *input, Complex *output, int width, int height)
{
int frameSize = width*height;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int zIdx = idx/(frameSize); //0-9
int inFrameIdx = idx%(frameSize);
int x1 = inFrameIdx/width;
int y1 = inFrameIdx%width;
int outIdx = ((y1+width/2)%width) + ((x1+height/2)%height)*width+ zIdx*frameSize;
output[outIdx] = input[idx];
}
| 7eac58eda745cd36b2d77d23137bfe95e2026151.cu | /**********************************************************************************
Filename : cuda_ProcKernels.cu
Authors : Jing Xu, Kevin Wong, Yifan Jian, Marinko Sarunic
Published : Janurary 6th, 2014
Copyright (C) 2014 Biomedical Optics Research Group - Simon Fraser University
This software contains source code provided by NVIDIA Corporation.
This file is part of a Open Source software. Details of this software has been described
in the papers titled:
"Jing Xu, Kevin Wong, Yifan Jian, and Marinko V. Sarunice.
'Real-time acquisition and display of flow contrast with speckle variance OCT using GPU'
In press (JBO)
and
"Jian, Yifan, Kevin Wong, and Marinko V. Sarunic. 'GPU accelerated OCT processing at
megahertz axial scan rate and high resolution video rate volumetric rendering.'
In SPIE BiOS, pp. 85710Z-85710Z. International Society for Optics and Photonics, 2013."
Please refer to these papers for further information about this software. Redistribution
and modification of this code is restricted to academic purposes ONLY, provided that
the following conditions are met:
- Redistribution of this code must retain the above copyright notice, this list of
conditions and the following disclaimer
- Any use, disclosure, reproduction, or redistribution of this software outside of
academic purposes is strictly prohibited
*DISCLAIMER*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
**********************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include "cuda_ProcHeader.cuh"
typedef float2 Complex;
/******** DEVICE FUNCTIONS **********/
__device__ Complex ComplexMul(Complex srcA, Complex srcB)
{
Complex output;
output.x = srcA.x * srcB.x - srcA.y * srcB.y;
output.y = srcA.x * srcB.y + srcA.y * srcB.x;
return output;
}
__device__ float complexAbs(Complex input)
{
float output;
output = sqrt( pow(input.x, 2) + pow(input.y, 2) );
return output;
}
/******** GLOBAL FUNCTIONS **********/
////This Kernel multiplies the cartesian equivalent of Dispersion Phase with Data
__global__ void subDC_PadComplex( unsigned short *Src,
Complex *DstComplex,
float *dcArray,
int width,
int fftWidth)
{
//get total number of threads and current thread number
//blockDim and gridDim are 1 dimensional vectors (y dim = 1)
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int dstIdx = int(idx/width)*fftWidth + idx%width;
int dcIdx = idx%width;
// This 'short' cast is NOT Necessary.
// But some data may be saved as short data, and therefore short allows for both unsigned and signed to work.
DstComplex[dstIdx].x = (float)(unsigned short)(Src[idx]) - dcArray[dcIdx];
//DstComplex[dstIdx].x = (float)Src[idx] - dcArray[dcIdx];
DstComplex[dstIdx].y = 0;
//The following loop performs zero padding
//The if-statement must be added to avoid unecessary for-loop reads
//if width=fftwidth, such as the case when padding is not required
//In fact, the following for loop can be commented out for the
//case where padding is not required, which can help save a bit of time
//The advantage of having the following is that it becomes dynamic
//when dealing with fftwidth>width
if (fftWidth>width) {
int newDstIdx = dstIdx+width;
DstComplex[newDstIdx].x = 0;
DstComplex[newDstIdx].y = 0;
}
}
//This is the DC Acquisition Kernel
//Takes the average of many A-scans to obtain a general averaged DC line
__global__ void dcAcquireKernel ( unsigned short *Src, float *Dst,
int width,
int imageheight)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
Dst[idx] = 0;
//Sum up all columns of accross Ascans
for (unsigned int n=0; n<imageheight; n++)
Dst[idx] += (float)( unsigned short)(Src[idx + n*width]);
Dst[idx] /= (float)imageheight;
}
//DownSizing post fft kernel
__global__ void downsizeMLS(float *floatArray, Complex *complexArray, int width,
int height, int fftWidth, float minVal, float maxVal,
float coeff, int frameIdx, int reduction)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
//Calculating the indices beforehand is much faster than calculating these indices during the function,
//Therefore it would make sense to do all of it here first before maping the values
int newWidth = width/reduction;
int newHeight = height/reduction;
int newFftWidth = fftWidth/reduction;
int cmplxIdx = idx%newWidth + int(idx/newWidth)*newFftWidth;
int buffFrameNum = cmplxIdx/(newFftWidth*newHeight);
int rowNum = (cmplxIdx%(newFftWidth*newHeight))/newFftWidth;
int rowIdx = (cmplxIdx%(newFftWidth*newHeight))%newFftWidth;
int mapFloatIdx = frameIdx*newWidth*newHeight + idx;
int mapCmpIdx = buffFrameNum*(fftWidth*height) + (rowNum*fftWidth + rowIdx)*reduction;
floatArray[mapFloatIdx] =
__saturatef( (logf( (complexAbs(complexArray[mapCmpIdx])+ 1)) - minVal)*coeff);
}
//Crop Post FFT Method
//ALS = Absolute, Log, and Scaling
//This method of post FFT crops out a certain portion of the data, and copies into buffer
//As opposed to the other method which downsizes the whole volume
__global__ void cropMLS(float *floatArray, Complex *complexArray, int width,
int height, int fftWidth, float minVal, float maxVal,
float coeff, int frameIdx, int offset, int range)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
//Calculating the indices beforehand is much faster than calculating these indices during the function,
//Therefore it would make sense to do all of it here first before maping the values
int mapFloatIdx = frameIdx*range*height + idx;
int mapCmpIdx = int(idx/range)*fftWidth + idx%range + offset;
floatArray[mapFloatIdx] =
__saturatef(
(logf((complexAbs(complexArray[mapCmpIdx])+ 1)) - minVal)*coeff
);
}
__global__ void copySingleFrameFloat(float *Src, float *Dst, int frameNum, int frameSize)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
Dst[idx] = Src[frameSize*frameNum + idx];
}
__global__ void avgKernel(float *src_Buffer, float *dst_Buffer, int frameNum, int frames, int frameSize)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ float tempVal;
tempVal = 0;
for(int i=0; i<frames; i++)
tempVal += src_Buffer[(frameNum+i)*frameSize + idx];
dst_Buffer[idx] = tempVal/frames;
}
__device__ void warpReduce(volatile float *sdata, unsigned int tid, int blockSize)
{
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
__global__ void renderFundus(float *g_idata, float *g_odata, unsigned int width, float scaleCoeff, int inputOffset, int outputOffset, int funoff, int funwid)
{
//The declaration for 1024 elements is arbitrary
//As long as this is larger than blockSize, it is fine
__shared__ float sdata[1024];
int blockSize = blockDim.x;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
int rowIdx = blockIdx.x + inputOffset;
int outputRowIdx = blockIdx.x + outputOffset;
sdata[tid] = 0;
for (int j=0; (tid+j)<funwid; j+=blockDim.x)
sdata[tid] += g_idata[rowIdx*width + tid + j+funoff];
sdata[tid]=sdata[tid]/width;
__syncthreads();
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) warpReduce(sdata, tid, blockSize);
if (tid == 0) g_odata[outputRowIdx] = sdata[0]*scaleCoeff; //Equivalent to 7.0f/1024.0f, multiplication much faster than division
}
__global__ void syncKernel()
{
//This Kernel is Purposely Empty
//By calling a non-streamed empty kernel, the whole system will be synchronized together
//This Kernel will NOT affect any CPU threads, therefore it should not pose any problems
}
__global__ void Variance(float *src_Buffer, float *dst_Buffer, float *dst_svBuffer, int numF, int frameNum, int frameSize, float coefficient)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int zIdx = idx/(frameSize); //0-9
int inFrameIdx = idx%(frameSize);
float tempVal;
tempVal = 0;
for (int i=0;i<numF;i++)
tempVal += src_Buffer[(zIdx*numF + frameNum+i)*frameSize + inFrameIdx]; //zIdx*numF = 0:3:6:9:12...:27
float mean = tempVal/numF;
float var = 0.0;
for (int i=0;i<numF;i++)
var += pow(src_Buffer[(zIdx*numF + frameNum+i)*frameSize + inFrameIdx]-mean,2);
tempVal = var/numF*coefficient; //The scaling factor 20 here was chosen for display purpose
src_Buffer[(zIdx*numF + frameNum)*frameSize + inFrameIdx] = tempVal;
}
__global__ void avgKernel2(float *src_Buffer, float *dst_Buffer, int frameNum, int frames, int frameSize)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int zIdx = idx/(frameSize); //0-9
int inFrameIdx = idx%(frameSize);
float tempVal;
tempVal = 0;
if (zIdx <8 && zIdx >0){
for(int i=-1; i<frames-1; i++)
tempVal += src_Buffer[(zIdx*frames+ frameNum+i*3)*frameSize + inFrameIdx];
src_Buffer[(zIdx*frames + frameNum)*frameSize + inFrameIdx] = tempVal/frames;
}
else if (zIdx <1)
{
for(int i=0; i<frames; i++)
tempVal += src_Buffer[(zIdx*frames+ frameNum+i*3)*frameSize + inFrameIdx];
src_Buffer[(zIdx*frames + frameNum)*frameSize + inFrameIdx] = tempVal/frames;
}
else
{
for(int i=-2; i<1; i++)
tempVal += src_Buffer[(zIdx*frames+ frameNum+i*3)*frameSize + inFrameIdx];
src_Buffer[(zIdx*frames + frameNum)*frameSize + inFrameIdx] = tempVal/frames;
}
}
//registration section//
__global__ void complexMulConj(Complex *Src, Complex *Dst, int frameNum, int frames, int width, int height, int subPixelFactor)
{
int frameSize = width*height;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int zIdx = idx/(frameSize);
int inFrameIdx = idx%(frameSize);
Complex temp;
int a = (zIdx*frames+frameNum)*frameSize;
int b = (zIdx*frames+frameNum+1)*frameSize;
temp.x = Src[a + inFrameIdx].x * Src[b+inFrameIdx].x - Src[a+inFrameIdx].y * (-1)*Src[b+inFrameIdx].y;
temp.y = Src[a + inFrameIdx].x * Src[b+inFrameIdx].y*(-1) + Src[a + inFrameIdx].y * Src[b+inFrameIdx].x;
int outFrameIdx = 0;
if (subPixelFactor == 1)
outFrameIdx = idx;
else
outFrameIdx = (inFrameIdx/width+height/2)* width*subPixelFactor +(inFrameIdx%width+width/2) +zIdx*frameSize*4;
Dst[outFrameIdx] = temp;
}
__global__ void batchComplexAbs( Complex *Src, float *Dst, int offset)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
Dst[offset + idx] = complexAbs(Src[idx]);
}
__global__ void copyToComplex( float *input, Complex *output)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
output[idx].x = input[idx];
output[idx].y = 0.0f;
}
__global__ void normData(Complex *input, float norm)
{ int idx = threadIdx.x + blockIdx.x*blockDim.x;
input[idx].x *= norm;
input[idx].y *= norm;
}
__device__ void MaxWarpReduce(volatile float *sdata, unsigned int tid, int blockSize,int* loc)
{
if (blockSize >= 64) {
if (sdata[tid] < sdata[tid +32]){
sdata[tid] = sdata[tid +32];
loc[tid] = loc[tid +32];
}
}
if (blockSize >= 32) {
if (sdata[tid] < sdata[tid +16]){
sdata[tid] = sdata[tid +16];
loc[tid] = loc[tid +16];
}
}
if (blockSize >= 16) {
if (sdata[tid] < sdata[tid +8]){
sdata[tid] = sdata[tid +8];
loc[tid] = loc[tid +8];
}
}
if (blockSize >= 8){
if (sdata[tid] < sdata[tid +4]){
sdata[tid] = sdata[tid +4];
loc[tid] = loc[tid +4];
}
}
if (blockSize >= 4) {
if (sdata[tid] < sdata[tid +2]){
sdata[tid] = sdata[tid +2];
loc[tid] = loc[tid +2];
}
}
if (blockSize >= 2) {
if (sdata[tid] < sdata[tid +1]){
sdata[tid] = sdata[tid +1];
loc[tid] = loc[tid +1];
}
}
}
__global__ void maxReductionBatch(float *g_idata, float *maxV, unsigned int width,int height, int* loc)
{
//The declaration for 1024 elements is arbitrary
//As long as this is larger than blockSize, it is fine
__shared__ float sdata[1024];
__shared__ int sloc[1024];
int blockSize = blockDim.x;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
sdata[tid] = 0;
for (int j=tid; j<width; j+=blockSize){
if (sdata[tid] < g_idata[(blockIdx.x)*width + j]){
sdata[tid] = g_idata[(blockIdx.x)*width + j];
sloc[tid] = j;
}
}
__syncthreads();
if (blockSize >= 128) {
if (tid < 64) {
if (sdata[tid] < sdata[tid + 64]){
sdata[tid] = sdata[tid + 64];
sloc[tid] = sloc[tid + 64];
}
}
__syncthreads();
if (tid < 32) MaxWarpReduce(sdata, tid, blockSize,sloc);
if (tid == 0) {
maxV[blockIdx.x] = sdata[tid];
loc[blockIdx.x] = sloc[tid];
}
}
}
__global__ void computeShift(float *RegMaxV, int *RegLoc, int width,
int height,int offsetFrame,int framesPerBuffer, float *MaxV,float *diffphase, Complex *data, int *shift, int subPixelFactor)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int zIdx = idx*height;
MaxV[idx] = RegMaxV[zIdx];
int hloc;
int wloc;
hloc = 0;
wloc = RegLoc[zIdx];
for (int j=1; j<height; j++){
if (MaxV[idx] < RegMaxV[zIdx+j]){
MaxV[idx] = RegMaxV[zIdx+j];
hloc = j;
wloc = RegLoc[zIdx+j];
}
}
int md2 = width/2;
int nd2 = height/2;
if (wloc > md2)
shift[idx] = wloc - width+1;
else
shift[idx] = wloc;
if (hloc > nd2)
shift[idx+framesPerBuffer/3] = hloc - height+1;
else
shift[idx+framesPerBuffer/3] = hloc;
shift[idx] /=subPixelFactor;
shift[idx+framesPerBuffer/3] /= subPixelFactor;
// diffphase ---> Global phase difference between the two images (should be zero if images are non-negative).
// diffphase[idx] = atan2(data[(idx*3 + offsetFrame)*width/subPixelFactor*height/subPixelFactor+ hloc/subPixelFactor*width/subPixelFactor +wloc/subPixelFactor].y,data[(idx*3 + offsetFrame)*width/subPixelFactor*height/subPixelFactor+hloc/subPixelFactor*width/subPixelFactor +wloc/subPixelFactor].x);
// For our OCT processing pipeline, the intensity of processed images are only from 0-1.
diffphase[idx] = 0;
}
__global__ void getMeshgrid( int *Nw, int *Nh, int width,int height)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int dstIdx1 = int(idx/width);
int dstIdx2 = idx%width;
if (dstIdx2 < (width/2))
Nw[idx] = dstIdx2;
else
Nw[idx] = dstIdx2 - width;
if (dstIdx1 < (height/2))
Nh[idx] = dstIdx1;
else
Nh[idx] = dstIdx1 - height;
}
__global__ void ImagExpB (Complex *Src, int *Nw, int *Nh, int width, int height, int frameNum, int framesPerBuffer,int *shift,float *diffphase)
{
float theta;
Complex r;
Complex s;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int zIdx = idx/(width*height);
int InframeIdx = idx%(width*height);
theta = 2*_PI*((-1)*(float(shift[zIdx])*float(Nw[InframeIdx])/width + float(shift[zIdx+framesPerBuffer/3])*float(Nh[InframeIdx])/height));
r.x = cosf(theta);
r.y = sinf(theta);
s.x = cosf(diffphase[zIdx]);
s.y = sinf(diffphase[zIdx]);
Src[(zIdx*3+frameNum)*width*height+InframeIdx] = ComplexMul(Src[(zIdx*3+frameNum)*width*height+InframeIdx],ComplexMul(r,s));
}
__device__ void MIPwarpReduce(volatile float *sdata, unsigned int tid, int blockSize)
{
if (blockSize >= 64) {
if (sdata[tid] < sdata[tid +32]){
sdata[tid] = sdata[tid +32];
}
}
if (blockSize >= 32) {
if (sdata[tid] < sdata[tid +16]){
sdata[tid] = sdata[tid +16];
}
}
if (blockSize >= 16) {
if (sdata[tid] < sdata[tid +8]){
sdata[tid] = sdata[tid +8];
}
}
if (blockSize >= 8){
if (sdata[tid] < sdata[tid +4]){
sdata[tid] = sdata[tid +4];
}
}
if (blockSize >= 4) {
if (sdata[tid] < sdata[tid +2]){
sdata[tid] = sdata[tid +2];
}
}
if (blockSize >= 2) {
if (sdata[tid] < sdata[tid +1]){
sdata[tid] = sdata[tid +1];
}
}
}
__global__ void MIPrenderFundus(float *g_idata, float *g_odata, unsigned int width, float scaleCoeff, int inputOffset, int outputOffset, int funoff, int funwid)
{
//The declaration for 1024 elements is arbitrary
//As long as this is larger than blockSize, it is fine
__shared__ float sdata[1024];
int rowIdx = blockIdx.x + inputOffset;
int outputRowIdx = blockIdx.x + outputOffset;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
sdata[tid] = 0;
for (int j=0; (tid+j)<funwid; j+=blockDim.x){
if (sdata[tid] < g_idata[rowIdx*width + tid + j+funoff]){
sdata[tid] = g_idata[rowIdx*width + tid + j+funoff];
}
}
__syncthreads();
if (blockDim.x >= 128) {
if (tid < 64) {
if (sdata[tid] < sdata[tid + 64]){
sdata[tid] = sdata[tid + 64];
}
}
__syncthreads();
if (tid < 32) MIPwarpReduce(sdata, tid, blockDim.x);
if (tid == 0) {
g_odata[outputRowIdx] = sdata[0]*scaleCoeff;
}
}
}
__global__ void MIPrenderFundusSV(float *g_idata, float *g_odata, unsigned int width, float scaleCoeff, int inputOffset, int outputOffset, int funoff, int funwid, int height)
{
//The declaration for 1024 elements is arbitrary
//As long as this is larger than blockSize, it is fine
__shared__ float sdata[1024];
int svZIdx = (blockIdx.x/height) * 3;
int svInFrameIdx = blockIdx.x%height;
int svBlockIdx = svZIdx*height + svInFrameIdx;
int rowIdx = svBlockIdx + inputOffset;
int outputRowIdx = blockIdx.x + outputOffset;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
sdata[tid] = 0;
for (int j=0; (tid+j)<funwid; j+=blockDim.x){
if (sdata[tid] < g_idata[rowIdx*width + tid + j+funoff]){
sdata[tid] = g_idata[rowIdx*width + tid + j+funoff];
}
}
__syncthreads();
if (blockDim.x >= 128) {
if (tid < 64) {
if (sdata[tid] < sdata[tid + 64]){
sdata[tid] = sdata[tid + 64];
}
}
__syncthreads();
if (tid < 32) MIPwarpReduce(sdata, tid, blockDim.x);
if (tid == 0) {
g_odata[outputRowIdx] = sdata[0]*scaleCoeff;
}
}
}
__global__ void renderFundusSV(float *g_idata, float *g_odata, unsigned int width, float scaleCoeff, int inputOffset, int outputOffset, int funoff, int funwid, int height)
{
//The declaration for 1024 elements is arbitrary
//As long as this is larger than blockSize, it is fine
__shared__ float sdata[1024];
int blockSize = blockDim.x;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
int svZIdx = (blockIdx.x/height) * 3;
int svInFrameIdx = blockIdx.x%height;
int svBlockIdx = svZIdx*height + svInFrameIdx;
int rowIdx = svBlockIdx + inputOffset;
int outputRowIdx = blockIdx.x + outputOffset;
sdata[tid] = 0;
for (int j=0; (tid+j)<funwid; j+=blockDim.x)
sdata[tid] += g_idata[rowIdx*width + tid + j+funoff];
sdata[tid]=sdata[tid]/width;
__syncthreads();
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) warpReduce(sdata, tid, blockSize);
if (tid == 0) g_odata[outputRowIdx] = sdata[0]*scaleCoeff; //Equivalent to 7.0f/1024.0f, multiplication much faster than division
}
__global__ void setColor(float *g_odata,float *g_idata,int index)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
g_odata[idx*3+index] = g_idata[idx];
}
__global__ void fftshift2D( Complex *input, Complex *output, int width, int height)
{
int frameSize = width*height;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int zIdx = idx/(frameSize); //0-9
int inFrameIdx = idx%(frameSize);
int x1 = inFrameIdx/width;
int y1 = inFrameIdx%width;
int outIdx = ((y1+width/2)%width) + ((x1+height/2)%height)*width+ zIdx*frameSize;
output[outIdx] = input[idx];
}
|
1147a38338935af9c1a16a2110ffb3394a2a9efa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <CUDATemplate.hpp>
#include <iostream>
#include <CUDATemplate_Kernels.cu>
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
using namespace std;
CUDATemplate::CUDATemplate(unsigned int pWidth, PRECISION pMultiplier): Width(pWidth), Multiplier(pMultiplier), input(NULL), output(NULL), tStart(0LL), tEnd(0LL), tDelta(0LL), tPaused(true)
{
// Printing simulation size.
cout << "Width = " << Width << endl;
cout << "Multiplier = " << Multiplier << endl;
}
// Allocate memory for data arrays.
int CUDATemplate::AllocateMemoryCPU()
{
input = new PRECISION[Width];
output = new PRECISION[Width];
return 0;
}
// Initialise CPU data.
int CUDATemplate::InitialiseCPU()
{
for (unsigned int i=0; i<Width; i++)
input[i] = (PRECISION)(rand()%100);
return 0;
}
int CUDATemplate::AllocateMemoryGPU()
{
// Device memory allocation
checkCudaErrors(hipMalloc((void **)&d_input, sizeof(PRECISION)*Width));
checkCudaErrors(hipMalloc((void **)&d_output, sizeof(PRECISION)*Width));
return 0;
}
int CUDATemplate::CopyDataCPUtoGPU()
{
checkCudaErrors(hipMemcpy(d_input, input, sizeof(PRECISION)*Width, hipMemcpyHostToDevice));
return 0;
}
int CUDATemplate::RunSimulationCPU()
{
for (unsigned int i=0; i<Width; i++)
output[i] = Multiplier * input[i];
// Display results.
cout << "Multiplier is " << Multiplier << endl;
cout << "Input array is: " << endl;
SafeCall(DisplayArray(Width, input), "Error: Displaying input array.");
cout << "Output array is: " << endl;
SafeCall(DisplayArray(Width, output), "Error: Displaying output array.");
return 0;
}
int CUDATemplate::RunSimulationGPU()
{
// Total local threads in a block. Can be thought of as Block dimensions.
const unsigned int ThreadsX = 256;
const unsigned int ThreadsY = 1;
// Total blocks in simulation grid. Can be thought of as no. of blocks in grid.
// Size should be divisible by 256.
unsigned int BlocksX = Width/ThreadsX;
unsigned int BlocksY = 1;
// Kernel parameters.
dim3 Blocks(BlocksX, BlocksY);
dim3 Threads(ThreadsX, ThreadsY);
cout << "Simulation (GPU) started..." << endl;
cout << "Block dimensions: " << ThreadsX << "x" << ThreadsY << endl;
cout << "Grid dimensions: " << BlocksX << "x" << BlocksY << endl;
StopWatchInterface *Timer = 0;
sdkCreateTimer(&Timer);
sdkResetTimer(&Timer);
sdkStartTimer(&Timer);
// Kernel call.
hipLaunchKernelGGL(( CUDATemplateKernel <ThreadsX, ThreadsY>) , dim3(Blocks), dim3(Threads), 0, 0, d_input, d_output, Multiplier);
// Error checking.
getLastCudaError("Kernel execution failed");
sdkStopTimer(&Timer);
checkCudaErrors(hipMemcpy(output, d_output, sizeof(PRECISION)*Width, hipMemcpyDeviceToHost));
// Display results.
cout << "Multiplier is " << Multiplier << endl;
cout << "Input array is: " << endl;
SafeCall(DisplayArray(Width, input), "Error: Displaying input array.");
cout << "Output array is: " << endl;
SafeCall(DisplayArray(Width, output), "Error: Displaying output array.");
cout << "\r" << "kernel execution time = " << sdkGetTimerValue(&Timer) << " ms." << endl;
sdkDeleteTimer(&Timer);
return 0;
}
int CUDATemplate::CompleteRunCPU()
{
SafeCall(AllocateMemoryCPU(), "Error: Allocating memory on CPU.");
SafeCall(InitialiseCPU(), "Error: Initialising data on CPU.");
SafeCall(RunSimulationCPU(), "Error: Running on CPU.");
SafeCall(CleanupCPU(), "Error: Cleaning up CPU.");
return 0;
}
int CUDATemplate::CompleteRunGPU()
{
SafeCall(AllocateMemoryCPU(), "Error: Allocating memory on CPU.");
SafeCall(InitialiseCPU(), "Error: Initialising data on CPU.");
SafeCall(AllocateMemoryGPU(), "Error: Allocating memory on GPU.");
SafeCall(CopyDataCPUtoGPU(), "Error: Copying data from CPU to GPU.");
SafeCall(RunSimulationGPU(), "Error: Running on GPU.");
SafeCall(CleanupCPU(), "Error: Cleaning up CPU.");
SafeCall(CleanupGPU(), "Error: Cleaning up CPU.");
return 0;
}
// Display array.
int CUDATemplate::DisplayArray(const unsigned int Size, PRECISION* Array)
{
for (unsigned int i=0; i<Size; i++)
cout << Array[i] << " ";
cout << endl;
return 0;
}
// Timing.
void CUDATemplate::StartTimer()
{
if (tPaused == true)
{
tStart = GetTimeus64();
tPaused = false;
}
}
void CUDATemplate::StopTimer()
{
if (tPaused == false)
{
tEnd = GetTimeus64();
tDelta += tEnd - tStart;
tStart = tEnd;
tPaused = true;
}
}
void CUDATemplate::ResetTimer()
{
if (tPaused == true)
tStart = tEnd;
else
tStart = GetTimeus64();
tDelta = 0UL;
}
double CUDATemplate::GetElapsedTime()
{
if (tPaused == false)
tEnd = GetTimeus64();
return ((double)(tEnd-tStart+tDelta))/(1000000.);
}
int CUDATemplate::SafeCall(int Status, const char *Error)
{
if (Status != 0)
{
if (Error!=NULL) cout << Error << endl;
exit(Status);
}
return Status;
}
template<typename T> void DeleteArray(T *&ptr)
{
if (ptr != NULL)
{
delete[] ptr;
ptr = NULL;
}
}
int CUDATemplate::CleanupCPU()
{
// Host cleanup.
DeleteArray(input);
DeleteArray(output);
return 0;
}
int CUDATemplate::CleanupGPU()
{
// Device cleanup.
checkCudaErrors(hipFree(d_input));
checkCudaErrors(hipFree(d_output));
hipDeviceReset();
return 0;
}
CUDATemplate::~CUDATemplate ()
{
// Cleanup.
DeleteArray(input);
DeleteArray(output);
}
| 1147a38338935af9c1a16a2110ffb3394a2a9efa.cu | #include <CUDATemplate.hpp>
#include <iostream>
#include <CUDATemplate_Kernels.cu>
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
using namespace std;
CUDATemplate::CUDATemplate(unsigned int pWidth, PRECISION pMultiplier): Width(pWidth), Multiplier(pMultiplier), input(NULL), output(NULL), tStart(0LL), tEnd(0LL), tDelta(0LL), tPaused(true)
{
// Printing simulation size.
cout << "Width = " << Width << endl;
cout << "Multiplier = " << Multiplier << endl;
}
// Allocate memory for data arrays.
int CUDATemplate::AllocateMemoryCPU()
{
input = new PRECISION[Width];
output = new PRECISION[Width];
return 0;
}
// Initialise CPU data.
int CUDATemplate::InitialiseCPU()
{
for (unsigned int i=0; i<Width; i++)
input[i] = (PRECISION)(rand()%100);
return 0;
}
int CUDATemplate::AllocateMemoryGPU()
{
// Device memory allocation
checkCudaErrors(cudaMalloc((void **)&d_input, sizeof(PRECISION)*Width));
checkCudaErrors(cudaMalloc((void **)&d_output, sizeof(PRECISION)*Width));
return 0;
}
int CUDATemplate::CopyDataCPUtoGPU()
{
checkCudaErrors(cudaMemcpy(d_input, input, sizeof(PRECISION)*Width, cudaMemcpyHostToDevice));
return 0;
}
int CUDATemplate::RunSimulationCPU()
{
for (unsigned int i=0; i<Width; i++)
output[i] = Multiplier * input[i];
// Display results.
cout << "Multiplier is " << Multiplier << endl;
cout << "Input array is: " << endl;
SafeCall(DisplayArray(Width, input), "Error: Displaying input array.");
cout << "Output array is: " << endl;
SafeCall(DisplayArray(Width, output), "Error: Displaying output array.");
return 0;
}
int CUDATemplate::RunSimulationGPU()
{
// Total local threads in a block. Can be thought of as Block dimensions.
const unsigned int ThreadsX = 256;
const unsigned int ThreadsY = 1;
// Total blocks in simulation grid. Can be thought of as no. of blocks in grid.
// Size should be divisible by 256.
unsigned int BlocksX = Width/ThreadsX;
unsigned int BlocksY = 1;
// Kernel parameters.
dim3 Blocks(BlocksX, BlocksY);
dim3 Threads(ThreadsX, ThreadsY);
cout << "Simulation (GPU) started..." << endl;
cout << "Block dimensions: " << ThreadsX << "x" << ThreadsY << endl;
cout << "Grid dimensions: " << BlocksX << "x" << BlocksY << endl;
StopWatchInterface *Timer = 0;
sdkCreateTimer(&Timer);
sdkResetTimer(&Timer);
sdkStartTimer(&Timer);
// Kernel call.
CUDATemplateKernel <ThreadsX, ThreadsY> <<<Blocks, Threads>>>(d_input, d_output, Multiplier);
// Error checking.
getLastCudaError("Kernel execution failed");
sdkStopTimer(&Timer);
checkCudaErrors(cudaMemcpy(output, d_output, sizeof(PRECISION)*Width, cudaMemcpyDeviceToHost));
// Display results.
cout << "Multiplier is " << Multiplier << endl;
cout << "Input array is: " << endl;
SafeCall(DisplayArray(Width, input), "Error: Displaying input array.");
cout << "Output array is: " << endl;
SafeCall(DisplayArray(Width, output), "Error: Displaying output array.");
cout << "\r" << "kernel execution time = " << sdkGetTimerValue(&Timer) << " ms." << endl;
sdkDeleteTimer(&Timer);
return 0;
}
int CUDATemplate::CompleteRunCPU()
{
SafeCall(AllocateMemoryCPU(), "Error: Allocating memory on CPU.");
SafeCall(InitialiseCPU(), "Error: Initialising data on CPU.");
SafeCall(RunSimulationCPU(), "Error: Running on CPU.");
SafeCall(CleanupCPU(), "Error: Cleaning up CPU.");
return 0;
}
int CUDATemplate::CompleteRunGPU()
{
SafeCall(AllocateMemoryCPU(), "Error: Allocating memory on CPU.");
SafeCall(InitialiseCPU(), "Error: Initialising data on CPU.");
SafeCall(AllocateMemoryGPU(), "Error: Allocating memory on GPU.");
SafeCall(CopyDataCPUtoGPU(), "Error: Copying data from CPU to GPU.");
SafeCall(RunSimulationGPU(), "Error: Running on GPU.");
SafeCall(CleanupCPU(), "Error: Cleaning up CPU.");
SafeCall(CleanupGPU(), "Error: Cleaning up CPU.");
return 0;
}
// Display array.
int CUDATemplate::DisplayArray(const unsigned int Size, PRECISION* Array)
{
for (unsigned int i=0; i<Size; i++)
cout << Array[i] << " ";
cout << endl;
return 0;
}
// Timing.
void CUDATemplate::StartTimer()
{
if (tPaused == true)
{
tStart = GetTimeus64();
tPaused = false;
}
}
void CUDATemplate::StopTimer()
{
if (tPaused == false)
{
tEnd = GetTimeus64();
tDelta += tEnd - tStart;
tStart = tEnd;
tPaused = true;
}
}
void CUDATemplate::ResetTimer()
{
if (tPaused == true)
tStart = tEnd;
else
tStart = GetTimeus64();
tDelta = 0UL;
}
double CUDATemplate::GetElapsedTime()
{
if (tPaused == false)
tEnd = GetTimeus64();
return ((double)(tEnd-tStart+tDelta))/(1000000.);
}
int CUDATemplate::SafeCall(int Status, const char *Error)
{
if (Status != 0)
{
if (Error!=NULL) cout << Error << endl;
exit(Status);
}
return Status;
}
template<typename T> void DeleteArray(T *&ptr)
{
if (ptr != NULL)
{
delete[] ptr;
ptr = NULL;
}
}
int CUDATemplate::CleanupCPU()
{
// Host cleanup.
DeleteArray(input);
DeleteArray(output);
return 0;
}
int CUDATemplate::CleanupGPU()
{
// Device cleanup.
checkCudaErrors(cudaFree(d_input));
checkCudaErrors(cudaFree(d_output));
cudaDeviceReset();
return 0;
}
CUDATemplate::~CUDATemplate ()
{
// Cleanup.
DeleteArray(input);
DeleteArray(output);
}
|
322f6cd9490d1fd33f4b3ae4230f058a7329a5d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by root on 23/03/2020.
//
#include "../Matrix.cuh"
#include <iostream>
__global__ void matrixLogit(double *a, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = log(a[y * cc + x] / (1-a[y * cc + x]));
}
}
Matrix Matrix::logit(){
static double* c;
c = (double*) calloc(this->Rows*this->Columns,sizeof(double));
//Define os endereoes da memria de vdeo
double *d_a, *d_c;
//Define o tamanho de cada matriz e escalar na memria
long aSize = this->Rows*this->Columns*sizeof(double);
long cSize = this->Rows*this->Columns*sizeof(double);
//Aloca espao na memria de vdeo
hipMalloc((void**)&d_a, aSize);
hipMalloc((void**)&d_c, cSize);
//Move a matriz e o escalar para a memria de vdeo alocada
hipMemcpy(d_a, this->Value, aSize, hipMemcpyHostToDevice);
//Define as dimenses
dim3 dimBlock(32,32); // 32x32 -> 1024 Threads
dim3 dimGrid(this->Rows,this->Columns);
//Efetua a multiplicao
hipLaunchKernelGGL(( matrixLogit), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_c, this->Rows, this->Columns);
//Copia o resultado de volta
hipMemcpy(c, d_c, cSize, hipMemcpyDeviceToHost);
//Limpa a memria de vdeo
hipFree(d_a);
hipFree(d_c);
//Salva
return {this->Columns, this->Rows, c};
}
| 322f6cd9490d1fd33f4b3ae4230f058a7329a5d5.cu | //
// Created by root on 23/03/2020.
//
#include "../Matrix.cuh"
#include <iostream>
__global__ void matrixLogit(double *a, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = log(a[y * cc + x] / (1-a[y * cc + x]));
}
}
Matrix Matrix::logit(){
static double* c;
c = (double*) calloc(this->Rows*this->Columns,sizeof(double));
//Define os endereçoes da memória de vídeo
double *d_a, *d_c;
//Define o tamanho de cada matriz e escalar na memória
long aSize = this->Rows*this->Columns*sizeof(double);
long cSize = this->Rows*this->Columns*sizeof(double);
//Aloca espaço na memória de vídeo
cudaMalloc((void**)&d_a, aSize);
cudaMalloc((void**)&d_c, cSize);
//Move a matriz e o escalar para a memória de vídeo alocada
cudaMemcpy(d_a, this->Value, aSize, cudaMemcpyHostToDevice);
//Define as dimensões
dim3 dimBlock(32,32); // 32x32 -> 1024 Threads
dim3 dimGrid(this->Rows,this->Columns);
//Efetua a multiplicação
matrixLogit<<<dimGrid, dimBlock>>>(d_a, d_c, this->Rows, this->Columns);
//Copia o resultado de volta
cudaMemcpy(c, d_c, cSize, cudaMemcpyDeviceToHost);
//Limpa a memória de vídeo
cudaFree(d_a);
cudaFree(d_c);
//Salva
return {this->Columns, this->Rows, c};
}
|
7bb161538d3c84c0e4db3f4029df7c1e30572358.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stereo.h"
__global__
void SolveSmoothDualTVGlobalKernel (float* duhat, float* dvhat,
float* pu1, float* pu2,
float* pv1, float* pv2,
int width, int height, int stride,
float tau, float theta,
float *pu1s, float *pu2s,
float *pv1s, float* pv2s)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
// int left = (ix - 1) + iy * stride;
int right = (ix + 1) + iy * stride;
int down = ix + (iy - 1) * stride;
int up = ix + (iy + 1) * stride;
//solve derivatives of duhat and dvhat
float dux, duy, dvx, dvy;
if ((ix + 1) >= width) {
//dux = duhat[pos] - duhat[left];
//dvx = dvhat[pos] - dvhat[left];
dux = 0;
dvx = 0;
}
else {
dux = duhat[right] - duhat[pos];
dvx = dvhat[right] - dvhat[pos];
}
if ((iy + 1) >= height) {
//duy = duhat[pos] - duhat[down];
//dvy = dvhat[pos] - dvhat[down];
duy = 0;
dvy = 0;
}
else {
duy = duhat[up] - duhat[pos];
dvy = dvhat[up] - dvhat[pos];
}
float magdu = sqrt(dux*dux + duy*duy);
float magdv = sqrt(dvx*dvx + dvy*dvy);
float fac = tau / theta;
float pu1sub = pu1[pos];
float pu2sub = pu2[pos];
float pv1sub = pv1[pos];
float pv2sub = pv2[pos];
for (int k = 0; k < 1; k++) {
pu1sub = (pu1sub + fac*dux) / (1 + fac*magdu);
pu2sub = (pu2sub + fac*duy) / (1 + fac*magdu);
pv1sub = (pv1sub + fac*dvx) / (1 + fac*magdv);
pv2sub = (pv2sub + fac*dvy) / (1 + fac*magdv);
}
pu1s[pos] = pu1sub;
pu2s[pos] = pu2sub;
pv1s[pos] = pv1sub;
pv2s[pos] = pv2sub;
}
void Stereo::SolveSmoothDualTVGlobal(float *duhat, float *dvhat,
float *pu1, float *pu2, float *pv1, float *pv2,
int w, int h, int s,
float tau, float theta,
float *pu1s, float*pu2s,
float *pv1s, float *pv2s
)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
hipLaunchKernelGGL(( SolveSmoothDualTVGlobalKernel) , dim3(blocks), dim3(threads) , 0, 0, duhat, dvhat,
pu1, pu2, pv1, pv2,
w, h, s,
tau, theta,
pu1s, pu2s, pv1s, pv2s);
}
// ***************************
// Dual TV Stereo
// ***************************
__global__
void SolveSmoothDualTVGlobalStereoKernel(float* dwhat,
float* pw1, float* pw2,
int width, int height, int stride,
float tau, float theta,
float *pw1s, float *pw2s)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
// const int pos = ix + iy * stride;
float desiredRadius = (float)width / 2.20f;
float halfWidth = (float)width / 2.0f;
float halfHeight = (float)height / 2.0f;
float radius = sqrtf((iy - halfHeight) * (iy - halfHeight) + (ix - halfWidth) * (ix - halfWidth));
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;// current pixel index
if (radius >= halfWidth)
{
pw1s[pos] = 0.0f;
pw2s[pos] = 0.0f;
}
else
{
//int left = (ix - 1) + iy * stride;
int right = (ix + 1) + iy * stride;
//int down = ix + (iy - 1) * stride;
int up = ix + (iy + 1) * stride;
//solve derivatives of duhat and dvhat
float dwx, dwy;
if ((ix + 1) >= width) {
//dux = duhat[pos] - duhat[left];
//dvx = dvhat[pos] - dvhat[left];
dwx = 0;
}
else {
dwx = dwhat[right] - dwhat[pos];
}
if ((iy + 1) >= height) {
//duy = duhat[pos] - duhat[down];
//dvy = dvhat[pos] - dvhat[down];
dwy = 0;
}
else {
dwy = dwhat[up] - dwhat[pos];
}
float magdu = sqrt(dwx*dwx + dwy * dwy);
float fac = tau / theta;
float pw1sub = pw1[pos];
float pw2sub = pw2[pos];
for (int k = 0; k < 1; k++) {
pw1sub = (pw1sub + fac * dwx) / (1 + fac * magdu);
pw2sub = (pw2sub + fac * dwy) / (1 + fac * magdu);
}
pw1s[pos] = pw1sub;
pw2s[pos] = pw2sub;
}
}
}
void Stereo::SolveSmoothDualTVGlobalStereo(float *duhat,
float *pw1, float *pw2,
int w, int h, int s,
float tau, float theta,
float *pw1s, float*pw2s
)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
SolveSmoothDualTVGlobalStereoKernel << < blocks, threads >> > (duhat,
pw1, pw2,
w, h, s,
tau, theta,
pw1s, pw2s);
}
| 7bb161538d3c84c0e4db3f4029df7c1e30572358.cu | #include "stereo.h"
__global__
void SolveSmoothDualTVGlobalKernel (float* duhat, float* dvhat,
float* pu1, float* pu2,
float* pv1, float* pv2,
int width, int height, int stride,
float tau, float theta,
float *pu1s, float *pu2s,
float *pv1s, float* pv2s)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
// int left = (ix - 1) + iy * stride;
int right = (ix + 1) + iy * stride;
int down = ix + (iy - 1) * stride;
int up = ix + (iy + 1) * stride;
//solve derivatives of duhat and dvhat
float dux, duy, dvx, dvy;
if ((ix + 1) >= width) {
//dux = duhat[pos] - duhat[left];
//dvx = dvhat[pos] - dvhat[left];
dux = 0;
dvx = 0;
}
else {
dux = duhat[right] - duhat[pos];
dvx = dvhat[right] - dvhat[pos];
}
if ((iy + 1) >= height) {
//duy = duhat[pos] - duhat[down];
//dvy = dvhat[pos] - dvhat[down];
duy = 0;
dvy = 0;
}
else {
duy = duhat[up] - duhat[pos];
dvy = dvhat[up] - dvhat[pos];
}
float magdu = sqrt(dux*dux + duy*duy);
float magdv = sqrt(dvx*dvx + dvy*dvy);
float fac = tau / theta;
float pu1sub = pu1[pos];
float pu2sub = pu2[pos];
float pv1sub = pv1[pos];
float pv2sub = pv2[pos];
for (int k = 0; k < 1; k++) {
pu1sub = (pu1sub + fac*dux) / (1 + fac*magdu);
pu2sub = (pu2sub + fac*duy) / (1 + fac*magdu);
pv1sub = (pv1sub + fac*dvx) / (1 + fac*magdv);
pv2sub = (pv2sub + fac*dvy) / (1 + fac*magdv);
}
pu1s[pos] = pu1sub;
pu2s[pos] = pu2sub;
pv1s[pos] = pv1sub;
pv2s[pos] = pv2sub;
}
void Stereo::SolveSmoothDualTVGlobal(float *duhat, float *dvhat,
float *pu1, float *pu2, float *pv1, float *pv2,
int w, int h, int s,
float tau, float theta,
float *pu1s, float*pu2s,
float *pv1s, float *pv2s
)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
SolveSmoothDualTVGlobalKernel <<< blocks, threads >>> (duhat, dvhat,
pu1, pu2, pv1, pv2,
w, h, s,
tau, theta,
pu1s, pu2s, pv1s, pv2s);
}
// ***************************
// Dual TV Stereo
// ***************************
__global__
void SolveSmoothDualTVGlobalStereoKernel(float* dwhat,
float* pw1, float* pw2,
int width, int height, int stride,
float tau, float theta,
float *pw1s, float *pw2s)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
// const int pos = ix + iy * stride;
float desiredRadius = (float)width / 2.20f;
float halfWidth = (float)width / 2.0f;
float halfHeight = (float)height / 2.0f;
float radius = sqrtf((iy - halfHeight) * (iy - halfHeight) + (ix - halfWidth) * (ix - halfWidth));
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;// current pixel index
if (radius >= halfWidth)
{
pw1s[pos] = 0.0f;
pw2s[pos] = 0.0f;
}
else
{
//int left = (ix - 1) + iy * stride;
int right = (ix + 1) + iy * stride;
//int down = ix + (iy - 1) * stride;
int up = ix + (iy + 1) * stride;
//solve derivatives of duhat and dvhat
float dwx, dwy;
if ((ix + 1) >= width) {
//dux = duhat[pos] - duhat[left];
//dvx = dvhat[pos] - dvhat[left];
dwx = 0;
}
else {
dwx = dwhat[right] - dwhat[pos];
}
if ((iy + 1) >= height) {
//duy = duhat[pos] - duhat[down];
//dvy = dvhat[pos] - dvhat[down];
dwy = 0;
}
else {
dwy = dwhat[up] - dwhat[pos];
}
float magdu = sqrt(dwx*dwx + dwy * dwy);
float fac = tau / theta;
float pw1sub = pw1[pos];
float pw2sub = pw2[pos];
for (int k = 0; k < 1; k++) {
pw1sub = (pw1sub + fac * dwx) / (1 + fac * magdu);
pw2sub = (pw2sub + fac * dwy) / (1 + fac * magdu);
}
pw1s[pos] = pw1sub;
pw2s[pos] = pw2sub;
}
}
}
void Stereo::SolveSmoothDualTVGlobalStereo(float *duhat,
float *pw1, float *pw2,
int w, int h, int s,
float tau, float theta,
float *pw1s, float*pw2s
)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
SolveSmoothDualTVGlobalStereoKernel << < blocks, threads >> > (duhat,
pw1, pw2,
w, h, s,
tau, theta,
pw1s, pw2s);
}
|
8f06c76b997b3b1c712f905cdab7f77b8777b9ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include "lodepng.h"
/******************************************************************************
Compile the program:
nvcc -o ImageBlur ImageBlur.cu
Run the program:
./ImageBlur
******************************************************************************/
__global__ void blur(unsigned char *gpu_image_output, unsigned char *gpu_image_input, int width, int height)
{
int count = 1;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockIdx.x;
int j = threadIdx.x;
int t_red = 0;
int t_green = 0;
int t_blue = 0;
int t_alpha = 0;
if (i + 1 && j - 1)
{
int pos = blockDim.x * (blockIdx.x + 1) + threadIdx.x - 1;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
if (j + 1)
{
int pos = blockDim.x * (blockIdx.x) + threadIdx.x + 1;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
if (i + 1 && j + 1)
{
int pos = blockDim.x * (blockIdx.x + 1) + threadIdx.x + 1;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
if (i + 1)
{
int pos = blockDim.x * (blockIdx.x + 1) + threadIdx.x;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
if (j - 1)
{
int pos = blockDim.x * (blockIdx.x) + threadIdx.x - 1;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
if (i - 1)
{
int pos = blockDim.x * (blockIdx.x - 1) + threadIdx.x;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
int current_pixel = idx * 4;
gpu_image_output[current_pixel] = t_red / count;
gpu_image_output[1 + current_pixel] = t_green / count;
gpu_image_output[2 + current_pixel] = t_blue / count;
gpu_image_output[3 + current_pixel] = gpu_image_input[3 + current_pixel];
}
int main(int argc, char **argv)
{
unsigned int error;
unsigned int encError;
unsigned char *image;
unsigned int width;
unsigned int height;
const char *filename = "cat.png";
const char *new_file_name = "blurred_cat.png";
error = lodepng_decode32_file(&image, &width, &height, filename);
if (error)
{
printf("error %u: %s\n", error, lodepng_error_text(error));
}
const int ARRAY_SIZE = width * height * 4;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(unsigned char);
unsigned char *host_image_input = (unsigned char *)malloc(ARRAY_SIZE * 4);
unsigned char *host_image_output = (unsigned char *)malloc(ARRAY_SIZE * 4);
for (int i = 0; i < ARRAY_SIZE; i++)
{
host_image_input[i] = image[i];
}
// declare GPU memory pointers
unsigned char *d_in;
unsigned char *d_out;
// allocate GPU memory
hipMalloc((void **)&d_in, ARRAY_BYTES);
hipMalloc((void **)&d_out, ARRAY_BYTES);
hipMemcpy(d_in, host_image_input, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( blur), dim3(height), dim3(width), 0, 0, d_out, d_in, width, height);
// copy back the result array to the CPU
hipMemcpy(host_image_output, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
encError = lodepng_encode32_file(new_file_name, host_image_output, width, height);
if (encError)
{
printf("error %u: %s\n", error, lodepng_error_text(encError));
}
//free(image);
//free(host_image_input);
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 8f06c76b997b3b1c712f905cdab7f77b8777b9ae.cu | #include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include "lodepng.h"
/******************************************************************************
Compile the program:
nvcc -o ImageBlur ImageBlur.cu
Run the program:
./ImageBlur
******************************************************************************/
__global__ void blur(unsigned char *gpu_image_output, unsigned char *gpu_image_input, int width, int height)
{
int count = 1;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockIdx.x;
int j = threadIdx.x;
int t_red = 0;
int t_green = 0;
int t_blue = 0;
int t_alpha = 0;
if (i + 1 && j - 1)
{
int pos = blockDim.x * (blockIdx.x + 1) + threadIdx.x - 1;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
if (j + 1)
{
int pos = blockDim.x * (blockIdx.x) + threadIdx.x + 1;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
if (i + 1 && j + 1)
{
int pos = blockDim.x * (blockIdx.x + 1) + threadIdx.x + 1;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
if (i + 1)
{
int pos = blockDim.x * (blockIdx.x + 1) + threadIdx.x;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
if (j - 1)
{
int pos = blockDim.x * (blockIdx.x) + threadIdx.x - 1;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
if (i - 1)
{
int pos = blockDim.x * (blockIdx.x - 1) + threadIdx.x;
int pixel = pos * 4;
t_red = gpu_image_input[idx * 4];
t_green = gpu_image_input[idx * 4 + 1];
t_blue = gpu_image_input[idx * 4 + 2];
t_alpha = gpu_image_input[idx * 4 + 3];
t_red += gpu_image_input[pixel];
t_green += gpu_image_input[1 + pixel];
t_blue += gpu_image_input[2 + pixel];
t_alpha += gpu_image_input[3 + pixel];
count++;
}
int current_pixel = idx * 4;
gpu_image_output[current_pixel] = t_red / count;
gpu_image_output[1 + current_pixel] = t_green / count;
gpu_image_output[2 + current_pixel] = t_blue / count;
gpu_image_output[3 + current_pixel] = gpu_image_input[3 + current_pixel];
}
int main(int argc, char **argv)
{
unsigned int error;
unsigned int encError;
unsigned char *image;
unsigned int width;
unsigned int height;
const char *filename = "cat.png";
const char *new_file_name = "blurred_cat.png";
error = lodepng_decode32_file(&image, &width, &height, filename);
if (error)
{
printf("error %u: %s\n", error, lodepng_error_text(error));
}
const int ARRAY_SIZE = width * height * 4;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(unsigned char);
unsigned char *host_image_input = (unsigned char *)malloc(ARRAY_SIZE * 4);
unsigned char *host_image_output = (unsigned char *)malloc(ARRAY_SIZE * 4);
for (int i = 0; i < ARRAY_SIZE; i++)
{
host_image_input[i] = image[i];
}
// declare GPU memory pointers
unsigned char *d_in;
unsigned char *d_out;
// allocate GPU memory
cudaMalloc((void **)&d_in, ARRAY_BYTES);
cudaMalloc((void **)&d_out, ARRAY_BYTES);
cudaMemcpy(d_in, host_image_input, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
blur<<<height, width>>>(d_out, d_in, width, height);
// copy back the result array to the CPU
cudaMemcpy(host_image_output, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
encError = lodepng_encode32_file(new_file_name, host_image_output, width, height);
if (encError)
{
printf("error %u: %s\n", error, lodepng_error_text(encError));
}
//free(image);
//free(host_image_input);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
010eba62425ee3caf3d1a2837e9a5499d9e334f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include <xtensor/xarray.hpp>
#include <xtensor/xio.hpp>
#include <xtensor/xview.hpp>
#include <xtensor/xnpy.hpp>
#include <boost/filesystem.hpp>
#define BLOCK_HEIGHT 64
#define BLOCK_WIDTH 98
// GLOBAL VARIABLES
uint LAYER_WIDTH = 512;
uint MODEL_SEED = 52233264;
// // Matrix-vector multiplication using CUDA
// // Using shared memory and avoiding banking conflicts
template<typename T>
__global__ void MatMulKernel(T *out, T *in, T *a,
const int matrixHeight,
const int matrixWidth)
{
// get variables for loop
// copy section of b into shared mem
// go through the threads vertically and sum them into a variable
// atomic add these variables to the corresponding c index
// looping is happening horizontally on the matrix
// BLOCK_WIDTH is again horizontal
// BLOCK_HEIGHT is going vertical
// n_cols / BLOCK_WIDTH blocks horizontally
// n_rows / BLOCK_HEIGHT block vertically
// get variables for loop
// variable for loop length: blockEltHeight
__shared__ int blockElt;
__shared__ int blockxInd;
__shared__ int blockyInd;
if (threadIdx.x == 0) // only the first thread of the entire block initializes the shared variables blockElt, blockxInd, blockyInd.
{
if ((blockIdx.x + 1) * BLOCK_WIDTH <= matrixWidth)
blockElt = BLOCK_WIDTH; // NOT the rightmost block so width of block = BLOCK_WIDTH
else blockElt = matrixWidth % BLOCK_WIDTH; // rightmost block so width of block = matrixWidth % BLOCK_WIDTH
blockxInd = blockIdx.x * BLOCK_WIDTH; // top left thread x-index of the block
blockyInd = blockIdx.y * BLOCK_HEIGHT; // top left thread y-index of the block
}
__syncthreads(); //all threads have value of blockElt, blockxInd, blockyInd
// copy section of b into shared mem
// https://stackoverflow.com/questions/24419822/efficiently-initializing-shared-memory-array-in-cuda/24419969#24419969
// use threads to write into independent locations of b[] from in []
__shared__ T b[BLOCK_WIDTH];
__shared__ T in_sub[BLOCK_HEIGHT][BLOCK_WIDTH + 31];
// __shared__ T in_sub[BLOCK_HEIGHT][BLOCK_WIDTH];
int threads_per_block = BLOCK_HEIGHT;
int lidx = threadIdx.x;
while (lidx < BLOCK_WIDTH)
{
b[lidx] = in[lidx + blockIdx.x * BLOCK_WIDTH];
lidx += threads_per_block;
}
__syncthreads();
for (int i=0; i<blockElt; i++) //each thread loads one sub-row of matrix a[].
{
in_sub[threadIdx.x][i] = a[(blockyInd + threadIdx.x) * matrixWidth + blockxInd + i];
}
__syncthreads();
// summing variable
T cSum = (T) 0.0;
int threadyInd = blockyInd + threadIdx.x;
// make sure we are inside the matrix verticallly
if (threadyInd < matrixHeight)
{
// each thread computes one element of a block segment of the output vector
for (int i=0; i<blockElt; i++)
{
// row R of matrix a[] --> (blockIdx.y * BLOCK_HEIGHT + threadIdx.x) * matrixWidth = (blockyInd + threadIdx.x) * matrixWidth
// col C of row R of matrix a[] --> blockIdx.x * BLOCK_WIDTH = blockxInd
// element E of col C of row R of matrix a[] --> i
// b[i] is accessed by all threads and therefore it is broadcast without any banking conflicts.
// cSum += b[i] * a[(blockyInd + threadIdx.x) * matrixWidth + blockxInd + i]; //working version
cSum += in_sub[threadIdx.x][i] * b[i];
// if (i==blockElt-1)
// printf("blockxInd = %d, blockyInd = %d, threadIdx.x = %d, csum = %f\n", blockxInd, blockyInd, threadIdx.x, cSum);
}
// atomic add these variables to the corresponding c index
atomicAdd(out + threadyInd, cSum);
}
}
template <class _Tp>
xt::xarray<_Tp> matvec_banking (xt::xarray<_Tp> matrix_A,
xt::xarray<_Tp> vector_B)
{
unsigned int n_rows = matrix_A.shape()[0];
unsigned int n_cols = matrix_A.shape()[1];
unsigned int size_A = n_rows * n_cols;
unsigned int size_B = n_cols;
assert (vector_B.shape()[0] == size_B && "matrix A and vector B shape mismatch.");
assert (vector_B.shape()[1] == 1 && "vector B no. of columns != 1");
unsigned int size_C = n_rows;
// declare matrices for GPU and allocate memory
// host copies of A,B,C
_Tp *A = new _Tp[size_A];
_Tp *B = new _Tp[size_B];
_Tp *C = new _Tp[size_C];
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&A, size_A*sizeof(_Tp));
hipMallocManaged(&B, size_B*sizeof(_Tp));
hipMallocManaged(&C, size_C*sizeof(_Tp));
// Fill the matrix values from xtensor to C++ array
for (int i = 0; i < size_A; i++)
A[i] = matrix_A.flat(i);
for (int i = 0; i < size_B; i++)
B[i] = vector_B.flat(i);
//run mat-vec multiplication
// set up threading and blocking variables
// Block Grid for MatMulKernel<<< >>>
int blockCols = (int) ceil(n_cols / (double) BLOCK_WIDTH);
int blockRows = (int) ceil(n_rows / (double) BLOCK_HEIGHT);
dim3 dimBlock(BLOCK_HEIGHT); // BLOCK_HEIGHT directly corresponds to no. of threads per block i.e., one thread per row of the block.
dim3 dimGrid(blockCols, blockRows);
int sharedMem = 3 * sizeof (int) + BLOCK_WIDTH * sizeof(_Tp) + BLOCK_HEIGHT*(BLOCK_WIDTH + 31) * sizeof(_Tp);
// 31 is for padding s.t. (98+31) mod 32 = 1
// 3 * sizeof (int) -> to store blockElt, blockxInd, blockyInd;
// initialize vector C to zero
hipMemset(C, 0, n_rows*sizeof(_Tp));
// execute kernels
hipLaunchKernelGGL(( MatMulKernel<float>), dim3(dimGrid), dim3(dimBlock), sharedMem, 0, C, B, A, n_rows, n_cols);
hipDeviceSynchronize();
// Convert product vector to xtensor
xt::xarray<double>::shape_type C_shape = {size_C, 1};
xt::xarray<_Tp> vec_C = xt::adapt(C, size_C, xt::no_ownership(), C_shape);
hipFree(A);
hipFree(B);
hipFree(C);
return vec_C;
}
int main()
{
// load weights from npy files
boost::filesystem::path weight_folder("../weights");
const std::string dense_weights_folder = "../weights/mnist_dense-w" +
std::to_string(LAYER_WIDTH) +
"x" +
std::to_string(LAYER_WIDTH) +
"-" +
std::to_string(MODEL_SEED);
const std::string dense_weights_file = dense_weights_folder + "/mnist_dense-w" +
std::to_string(LAYER_WIDTH) +
"x" +
std::to_string(LAYER_WIDTH) +
"-" +
std::to_string(MODEL_SEED) + "_dense_weights.npy";
// std::cout << "******************************" << std::endl;
// std::cout << "Weights: " << dense_weights_file << std::endl;
xt::xarray<float> dense_weights = xt::load_npy<float>(dense_weights_file);
xt::xarray<float> tr_dense_weights = xt::transpose(dense_weights);
// load input vector from npy file
uint image_no = 69999;
const std::string input_vector_file = "../data/vector_" + std::to_string(image_no) + ".npy";
// std::cout << "Input: " << input_vector_file << std::endl;
xt::xarray<float> input_vector = xt::load_npy<float>(input_vector_file);
// std::cout << "******************************" << std::endl;
// std::cout << "Transposed Weight Matrix Shape: "<< xt::adapt(tr_dense_weights.shape()) << std::endl;
// std::cout << "Input Vector Shape: "<< xt::adapt(input_vector.shape()) << std::endl;
// std::cout << "******************************" << std::endl;
// for (int i = 0; i < 10; ++i)
// {
// matvec_banking(tr_dense_weights, input_vector);
// }
// std::cout << "******************************" << std::endl;
// Display Output
auto matvecproduct = matvec_banking(tr_dense_weights, input_vector);
// std::cout << "Matrix-Vector Product Shape: " << xt::adapt(matvecproduct.shape()) << std::endl;
// std::cout << "Matrix-Vector Product" << std::endl;
// std::cout << matvecproduct << std::endl;
// std::cout << "******************************" << std::endl;
return 0;
}
| 010eba62425ee3caf3d1a2837e9a5499d9e334f4.cu | #include <iostream>
#include <string>
#include <xtensor/xarray.hpp>
#include <xtensor/xio.hpp>
#include <xtensor/xview.hpp>
#include <xtensor/xnpy.hpp>
#include <boost/filesystem.hpp>
#define BLOCK_HEIGHT 64
#define BLOCK_WIDTH 98
// GLOBAL VARIABLES
uint LAYER_WIDTH = 512;
uint MODEL_SEED = 52233264;
// // Matrix-vector multiplication using CUDA
// // Using shared memory and avoiding banking conflicts
template<typename T>
__global__ void MatMulKernel(T *out, T *in, T *a,
const int matrixHeight,
const int matrixWidth)
{
// get variables for loop
// copy section of b into shared mem
// go through the threads vertically and sum them into a variable
// atomic add these variables to the corresponding c index
// looping is happening horizontally on the matrix
// BLOCK_WIDTH is again horizontal
// BLOCK_HEIGHT is going vertical
// n_cols / BLOCK_WIDTH blocks horizontally
// n_rows / BLOCK_HEIGHT block vertically
// get variables for loop
// variable for loop length: blockEltHeight
__shared__ int blockElt;
__shared__ int blockxInd;
__shared__ int blockyInd;
if (threadIdx.x == 0) // only the first thread of the entire block initializes the shared variables blockElt, blockxInd, blockyInd.
{
if ((blockIdx.x + 1) * BLOCK_WIDTH <= matrixWidth)
blockElt = BLOCK_WIDTH; // NOT the rightmost block so width of block = BLOCK_WIDTH
else blockElt = matrixWidth % BLOCK_WIDTH; // rightmost block so width of block = matrixWidth % BLOCK_WIDTH
blockxInd = blockIdx.x * BLOCK_WIDTH; // top left thread x-index of the block
blockyInd = blockIdx.y * BLOCK_HEIGHT; // top left thread y-index of the block
}
__syncthreads(); //all threads have value of blockElt, blockxInd, blockyInd
// copy section of b into shared mem
// https://stackoverflow.com/questions/24419822/efficiently-initializing-shared-memory-array-in-cuda/24419969#24419969
// use threads to write into independent locations of b[] from in []
__shared__ T b[BLOCK_WIDTH];
__shared__ T in_sub[BLOCK_HEIGHT][BLOCK_WIDTH + 31];
// __shared__ T in_sub[BLOCK_HEIGHT][BLOCK_WIDTH];
int threads_per_block = BLOCK_HEIGHT;
int lidx = threadIdx.x;
while (lidx < BLOCK_WIDTH)
{
b[lidx] = in[lidx + blockIdx.x * BLOCK_WIDTH];
lidx += threads_per_block;
}
__syncthreads();
for (int i=0; i<blockElt; i++) //each thread loads one sub-row of matrix a[].
{
in_sub[threadIdx.x][i] = a[(blockyInd + threadIdx.x) * matrixWidth + blockxInd + i];
}
__syncthreads();
// summing variable
T cSum = (T) 0.0;
int threadyInd = blockyInd + threadIdx.x;
// make sure we are inside the matrix verticallly
if (threadyInd < matrixHeight)
{
// each thread computes one element of a block segment of the output vector
for (int i=0; i<blockElt; i++)
{
// row R of matrix a[] --> (blockIdx.y * BLOCK_HEIGHT + threadIdx.x) * matrixWidth = (blockyInd + threadIdx.x) * matrixWidth
// col C of row R of matrix a[] --> blockIdx.x * BLOCK_WIDTH = blockxInd
// element E of col C of row R of matrix a[] --> i
// b[i] is accessed by all threads and therefore it is broadcast without any banking conflicts.
// cSum += b[i] * a[(blockyInd + threadIdx.x) * matrixWidth + blockxInd + i]; //working version
cSum += in_sub[threadIdx.x][i] * b[i];
// if (i==blockElt-1)
// printf("blockxInd = %d, blockyInd = %d, threadIdx.x = %d, csum = %f\n", blockxInd, blockyInd, threadIdx.x, cSum);
}
// atomic add these variables to the corresponding c index
atomicAdd(out + threadyInd, cSum);
}
}
template <class _Tp>
xt::xarray<_Tp> matvec_banking (xt::xarray<_Tp> matrix_A,
xt::xarray<_Tp> vector_B)
{
unsigned int n_rows = matrix_A.shape()[0];
unsigned int n_cols = matrix_A.shape()[1];
unsigned int size_A = n_rows * n_cols;
unsigned int size_B = n_cols;
assert (vector_B.shape()[0] == size_B && "matrix A and vector B shape mismatch.");
assert (vector_B.shape()[1] == 1 && "vector B no. of columns != 1");
unsigned int size_C = n_rows;
// declare matrices for GPU and allocate memory
// host copies of A,B,C
_Tp *A = new _Tp[size_A];
_Tp *B = new _Tp[size_B];
_Tp *C = new _Tp[size_C];
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&A, size_A*sizeof(_Tp));
cudaMallocManaged(&B, size_B*sizeof(_Tp));
cudaMallocManaged(&C, size_C*sizeof(_Tp));
// Fill the matrix values from xtensor to C++ array
for (int i = 0; i < size_A; i++)
A[i] = matrix_A.flat(i);
for (int i = 0; i < size_B; i++)
B[i] = vector_B.flat(i);
//run mat-vec multiplication
// set up threading and blocking variables
// Block Grid for MatMulKernel<<< >>>
int blockCols = (int) ceil(n_cols / (double) BLOCK_WIDTH);
int blockRows = (int) ceil(n_rows / (double) BLOCK_HEIGHT);
dim3 dimBlock(BLOCK_HEIGHT); // BLOCK_HEIGHT directly corresponds to no. of threads per block i.e., one thread per row of the block.
dim3 dimGrid(blockCols, blockRows);
int sharedMem = 3 * sizeof (int) + BLOCK_WIDTH * sizeof(_Tp) + BLOCK_HEIGHT*(BLOCK_WIDTH + 31) * sizeof(_Tp);
// 31 is for padding s.t. (98+31) mod 32 = 1
// 3 * sizeof (int) -> to store blockElt, blockxInd, blockyInd;
// initialize vector C to zero
cudaMemset(C, 0, n_rows*sizeof(_Tp));
// execute kernels
MatMulKernel<float><<<dimGrid, dimBlock, sharedMem>>>(C, B, A, n_rows, n_cols);
cudaDeviceSynchronize();
// Convert product vector to xtensor
xt::xarray<double>::shape_type C_shape = {size_C, 1};
xt::xarray<_Tp> vec_C = xt::adapt(C, size_C, xt::no_ownership(), C_shape);
cudaFree(A);
cudaFree(B);
cudaFree(C);
return vec_C;
}
int main()
{
// load weights from npy files
boost::filesystem::path weight_folder("../weights");
const std::string dense_weights_folder = "../weights/mnist_dense-w" +
std::to_string(LAYER_WIDTH) +
"x" +
std::to_string(LAYER_WIDTH) +
"-" +
std::to_string(MODEL_SEED);
const std::string dense_weights_file = dense_weights_folder + "/mnist_dense-w" +
std::to_string(LAYER_WIDTH) +
"x" +
std::to_string(LAYER_WIDTH) +
"-" +
std::to_string(MODEL_SEED) + "_dense_weights.npy";
// std::cout << "******************************" << std::endl;
// std::cout << "Weights: " << dense_weights_file << std::endl;
xt::xarray<float> dense_weights = xt::load_npy<float>(dense_weights_file);
xt::xarray<float> tr_dense_weights = xt::transpose(dense_weights);
// load input vector from npy file
uint image_no = 69999;
const std::string input_vector_file = "../data/vector_" + std::to_string(image_no) + ".npy";
// std::cout << "Input: " << input_vector_file << std::endl;
xt::xarray<float> input_vector = xt::load_npy<float>(input_vector_file);
// std::cout << "******************************" << std::endl;
// std::cout << "Transposed Weight Matrix Shape: "<< xt::adapt(tr_dense_weights.shape()) << std::endl;
// std::cout << "Input Vector Shape: "<< xt::adapt(input_vector.shape()) << std::endl;
// std::cout << "******************************" << std::endl;
// for (int i = 0; i < 10; ++i)
// {
// matvec_banking(tr_dense_weights, input_vector);
// }
// std::cout << "******************************" << std::endl;
// Display Output
auto matvecproduct = matvec_banking(tr_dense_weights, input_vector);
// std::cout << "Matrix-Vector Product Shape: " << xt::adapt(matvecproduct.shape()) << std::endl;
// std::cout << "Matrix-Vector Product" << std::endl;
// std::cout << matvecproduct << std::endl;
// std::cout << "******************************" << std::endl;
return 0;
}
|
b9e99c0d72b276fcd8d553b288889bfba44b46d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char **argv)
{
int dev = 0;
CHECK(hipSetDevice(dev));
unsigned int isize = 1 << 22;
unsigned int nbytes = isize * sizeof(float);
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
if (!deviceProp.canMapHostMemory)
{
printf("Device %d does not support mapping CPU host memory!\n", dev);
CHECK(hipDeviceReset());
exit(EXIT_SUCCESS);
}
printf("%s starting at ", argv[0]);
printf("device %d: %s memory size %d nbyte %5.2fMB canMap %d\n", dev,
deviceProp.name, isize, nbytes / (1024.0f * 1024.0f),
deviceProp.canMapHostMemory);
float *h_a;
CHECK(hipHostMalloc ((float **)&h_a, nbytes));
float *d_a;
CHECK(hipMalloc((float **)&d_a, nbytes));
memset(h_a, 0, nbytes);
for (int i = 0; i < isize; i++) h_a[i] = 100.10f;
CHECK(hipMemcpy(d_a, h_a, nbytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(h_a, d_a, nbytes, hipMemcpyDeviceToHost));
CHECK(hipFree(d_a));
CHECK(hipHostFree(h_a));
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| b9e99c0d72b276fcd8d553b288889bfba44b46d4.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
int main(int argc, char **argv)
{
int dev = 0;
CHECK(cudaSetDevice(dev));
unsigned int isize = 1 << 22;
unsigned int nbytes = isize * sizeof(float);
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
if (!deviceProp.canMapHostMemory)
{
printf("Device %d does not support mapping CPU host memory!\n", dev);
CHECK(cudaDeviceReset());
exit(EXIT_SUCCESS);
}
printf("%s starting at ", argv[0]);
printf("device %d: %s memory size %d nbyte %5.2fMB canMap %d\n", dev,
deviceProp.name, isize, nbytes / (1024.0f * 1024.0f),
deviceProp.canMapHostMemory);
float *h_a;
CHECK(cudaMallocHost ((float **)&h_a, nbytes));
float *d_a;
CHECK(cudaMalloc((float **)&d_a, nbytes));
memset(h_a, 0, nbytes);
for (int i = 0; i < isize; i++) h_a[i] = 100.10f;
CHECK(cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_a));
CHECK(cudaFreeHost(h_a));
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
8488a24ebe43a4ae1ac8dca226e35079949edd7f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "bcnn_op_cuda_clamp_grad_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *dx = NULL;
hipMalloc(&dx, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
bcnn_op_cuda_clamp_grad_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,dx);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
bcnn_op_cuda_clamp_grad_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,dx);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
bcnn_op_cuda_clamp_grad_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,dx);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8488a24ebe43a4ae1ac8dca226e35079949edd7f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "bcnn_op_cuda_clamp_grad_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *dx = NULL;
cudaMalloc(&dx, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
bcnn_op_cuda_clamp_grad_kernel<<<gridBlock,threadBlock>>>(n,x,dx);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
bcnn_op_cuda_clamp_grad_kernel<<<gridBlock,threadBlock>>>(n,x,dx);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
bcnn_op_cuda_clamp_grad_kernel<<<gridBlock,threadBlock>>>(n,x,dx);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
287eaa962209f199927c16fe077d30dec8b0cde4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlaswp_batched.cu, normal z -> c, Wed Jan 2 14:18:51 2019
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#define BLK_SIZE 256
#define CLASWP_COL_NTH 32
// SWP_WIDTH is number of threads in a block
// 64 and 256 are better on Kepler;
extern __shared__ magmaFloatComplex shared_data[];
/******************************************************************************/
static __device__
void claswp_rowparallel_devfunc(
int n, int width, int height,
magmaFloatComplex *dA, int lda,
magmaFloatComplex *dout, int ldo,
magma_int_t* pivinfo)
{
//int height = k2- k1;
//int height = blockDim.x;
unsigned int tid = threadIdx.x;
dA += SWP_WIDTH * blockIdx.x * lda;
dout += SWP_WIDTH * blockIdx.x * ldo;
magmaFloatComplex *sdata = shared_data;
if (blockIdx.x == gridDim.x -1)
{
width = n - blockIdx.x * SWP_WIDTH;
}
if (tid < height)
{
int mynewroworig = pivinfo[tid]-1; //-1 to get the index in C
int itsreplacement = pivinfo[mynewroworig] -1; //-1 to get the index in C
//printf("%d: mynewroworig = %d, itsreplacement = %d\n", tid, mynewroworig, itsreplacement);
#pragma unroll
for (int i=0; i < width; i++)
{
sdata[ tid + i * height ] = dA[ mynewroworig + i * lda ];
dA[ mynewroworig + i * lda ] = dA[ itsreplacement + i * lda ];
}
}
__syncthreads();
if (tid < height)
{
// copy back the upper swapped portion of A to dout
#pragma unroll
for (int i=0; i < width; i++)
{
dout[tid + i * ldo] = sdata[tid + i * height];
}
}
}
/******************************************************************************/
// parallel swap the swaped dA(1:nb,i:n) is stored in dout
__global__
void claswp_rowparallel_kernel(
int n, int width, int height,
magmaFloatComplex *dinput, int ldi,
magmaFloatComplex *doutput, int ldo,
magma_int_t* pivinfo)
{
claswp_rowparallel_devfunc(n, width, height, dinput, ldi, doutput, ldo, pivinfo);
}
/******************************************************************************/
__global__
void claswp_rowparallel_kernel_batched(
int n, int width, int height,
magmaFloatComplex **input_array, int input_i, int input_j, int ldi,
magmaFloatComplex **output_array, int output_i, int output_j, int ldo,
magma_int_t** pivinfo_array)
{
int batchid = blockIdx.z;
claswp_rowparallel_devfunc( n, width, height,
input_array[batchid] + input_j * ldi + input_i, ldi,
output_array[batchid] + output_j * ldo + output_i, ldo,
pivinfo_array[batchid]);
}
/******************************************************************************/
extern "C" void
magma_claswp_rowparallel_batched( magma_int_t n,
magmaFloatComplex** input_array, magma_int_t input_i, magma_int_t input_j, magma_int_t ldi,
magmaFloatComplex** output_array, magma_int_t output_i, magma_int_t output_j, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t **pivinfo_array,
magma_int_t batchCount, magma_queue_t queue)
{
#define input_array(i,j) input_array, i, j
#define output_array(i,j) output_array, i, j
if (n == 0 ) return;
int height = k2-k1;
if ( height > 1024)
{
fprintf( stderr, "%s: n=%lld > 1024, not supported\n", __func__, (long long) n );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, batchCount);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(magmaFloatComplex) * height * n;
hipLaunchKernelGGL(( claswp_rowparallel_kernel_batched)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, n, height, input_array, input_i, input_j, ldi, output_array, output_i, output_j, ldo, pivinfo_array );
}
else
{
size_t shmem = sizeof(magmaFloatComplex) * height * SWP_WIDTH;
hipLaunchKernelGGL(( claswp_rowparallel_kernel_batched)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, SWP_WIDTH, height, input_array, input_i, input_j, ldi, output_array, output_i, output_j, ldo, pivinfo_array );
}
#undef input_array
#undef output_attay
}
/******************************************************************************/
extern "C" void
magma_claswp_rowparallel_native(
magma_int_t n,
magmaFloatComplex* input, magma_int_t ldi,
magmaFloatComplex* output, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t *pivinfo,
magma_queue_t queue)
{
if (n == 0 ) return;
int height = k2-k1;
if ( height > MAX_NTHREADS)
{
fprintf( stderr, "%s: height=%lld > %lld, magma_claswp_rowparallel_q not supported\n",
__func__, (long long) n, (long long) MAX_NTHREADS );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, 1);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(magmaFloatComplex) * height * n;
hipLaunchKernelGGL(( claswp_rowparallel_kernel)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, n, height, input, ldi, output, ldo, pivinfo );
}
else
{
size_t shmem = sizeof(magmaFloatComplex) * height * SWP_WIDTH;
hipLaunchKernelGGL(( claswp_rowparallel_kernel)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, SWP_WIDTH, height, input, ldi, output, ldo, pivinfo );
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row
__global__ void claswp_rowserial_kernel_batched( int n, magmaFloatComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
magmaFloatComplex* dA = dA_array[blockIdx.z];
magma_int_t *dipiv = ipiv_array[blockIdx.z];
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if (tid < n) {
magmaFloatComplex A1;
for (int i1 = k1; i1 < k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 + tid * lda];
dA[i1 + tid * lda] = dA[i2 + tid * lda];
dA[i2 + tid * lda] = A1;
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row
__global__ void claswp_rowserial_kernel_native( int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
//k1--;
//k2--;
if (tid < n) {
magmaFloatComplex A1;
for (int i1 = k1; i1 < k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 + tid * lda];
dA[i1 + tid * lda] = dA[i2 + tid * lda];
dA[i2 + tid * lda] = A1;
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row, similar to LAPACK
// K1, K2 are in Fortran indexing
extern "C" void
magma_claswp_rowserial_batched(magma_int_t n, magmaFloatComplex** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, batchCount);
hipLaunchKernelGGL(( claswp_rowserial_kernel_batched)
, dim3(grid), dim3(max(BLK_SIZE, n)), 0, queue->cuda_stream() ,
n, dA_array, lda, k1, k2, ipiv_array);
}
/******************************************************************************/
// serial swap that does swapping one row by one row, similar to LAPACK
// K1, K2 are in Fortran indexing
extern "C" void
magma_claswp_rowserial_native(magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t* dipiv, magma_queue_t queue)
{
if (n == 0) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, 1);
hipLaunchKernelGGL(( claswp_rowserial_kernel_native)
, dim3(grid), dim3(max(BLK_SIZE, n)), 0, queue->cuda_stream() ,
n, dA, lda, k1, k2, dipiv);
}
/******************************************************************************/
// serial swap that does swapping one column by one column
__device__ void claswp_columnserial_devfunc(int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if ( k1 < 0 || k2 < 0 ) return;
if ( tid < n) {
magmaFloatComplex A1;
if (k1 <= k2)
{
for (int i1 = k1; i1 <= k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
} else
{
for (int i1 = k1; i1 >= k2; i1--)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
}
}
}
__global__ void claswp_columnserial_kernel_batched( int n, magmaFloatComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
magmaFloatComplex* dA = dA_array[blockIdx.z];
magma_int_t *dipiv = ipiv_array[blockIdx.z];
claswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv);
}
__global__ void claswp_columnserial_kernel( int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
claswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv);
}
/******************************************************************************/
// serial swap that does swapping one column by one column
// K1, K2 are in Fortran indexing
extern "C" void
magma_claswp_columnserial(
magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t *dipiv, magma_queue_t queue)
{
if (n == 0 ) return;
int blocks = magma_ceildiv( n, CLASWP_COL_NTH );
dim3 grid(blocks, 1, 1);
hipLaunchKernelGGL(( claswp_columnserial_kernel), dim3(grid), dim3(CLASWP_COL_NTH), 0, queue->cuda_stream() ,
n, dA, lda, k1, k2, dipiv);
}
extern "C" void
magma_claswp_columnserial_batched(magma_int_t n, magmaFloatComplex** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0 ) return;
int blocks = magma_ceildiv( n, CLASWP_COL_NTH );
dim3 grid(blocks, 1, batchCount);
hipLaunchKernelGGL(( claswp_columnserial_kernel_batched)
, dim3(grid), dim3(min(CLASWP_COL_NTH,n)), 0, queue->cuda_stream() ,
n, dA_array, lda, k1, k2, ipiv_array);
}
| 287eaa962209f199927c16fe077d30dec8b0cde4.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlaswp_batched.cu, normal z -> c, Wed Jan 2 14:18:51 2019
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#define BLK_SIZE 256
#define CLASWP_COL_NTH 32
// SWP_WIDTH is number of threads in a block
// 64 and 256 are better on Kepler;
extern __shared__ magmaFloatComplex shared_data[];
/******************************************************************************/
static __device__
void claswp_rowparallel_devfunc(
int n, int width, int height,
magmaFloatComplex *dA, int lda,
magmaFloatComplex *dout, int ldo,
magma_int_t* pivinfo)
{
//int height = k2- k1;
//int height = blockDim.x;
unsigned int tid = threadIdx.x;
dA += SWP_WIDTH * blockIdx.x * lda;
dout += SWP_WIDTH * blockIdx.x * ldo;
magmaFloatComplex *sdata = shared_data;
if (blockIdx.x == gridDim.x -1)
{
width = n - blockIdx.x * SWP_WIDTH;
}
if (tid < height)
{
int mynewroworig = pivinfo[tid]-1; //-1 to get the index in C
int itsreplacement = pivinfo[mynewroworig] -1; //-1 to get the index in C
//printf("%d: mynewroworig = %d, itsreplacement = %d\n", tid, mynewroworig, itsreplacement);
#pragma unroll
for (int i=0; i < width; i++)
{
sdata[ tid + i * height ] = dA[ mynewroworig + i * lda ];
dA[ mynewroworig + i * lda ] = dA[ itsreplacement + i * lda ];
}
}
__syncthreads();
if (tid < height)
{
// copy back the upper swapped portion of A to dout
#pragma unroll
for (int i=0; i < width; i++)
{
dout[tid + i * ldo] = sdata[tid + i * height];
}
}
}
/******************************************************************************/
// parallel swap the swaped dA(1:nb,i:n) is stored in dout
__global__
void claswp_rowparallel_kernel(
int n, int width, int height,
magmaFloatComplex *dinput, int ldi,
magmaFloatComplex *doutput, int ldo,
magma_int_t* pivinfo)
{
claswp_rowparallel_devfunc(n, width, height, dinput, ldi, doutput, ldo, pivinfo);
}
/******************************************************************************/
__global__
void claswp_rowparallel_kernel_batched(
int n, int width, int height,
magmaFloatComplex **input_array, int input_i, int input_j, int ldi,
magmaFloatComplex **output_array, int output_i, int output_j, int ldo,
magma_int_t** pivinfo_array)
{
int batchid = blockIdx.z;
claswp_rowparallel_devfunc( n, width, height,
input_array[batchid] + input_j * ldi + input_i, ldi,
output_array[batchid] + output_j * ldo + output_i, ldo,
pivinfo_array[batchid]);
}
/******************************************************************************/
extern "C" void
magma_claswp_rowparallel_batched( magma_int_t n,
magmaFloatComplex** input_array, magma_int_t input_i, magma_int_t input_j, magma_int_t ldi,
magmaFloatComplex** output_array, magma_int_t output_i, magma_int_t output_j, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t **pivinfo_array,
magma_int_t batchCount, magma_queue_t queue)
{
#define input_array(i,j) input_array, i, j
#define output_array(i,j) output_array, i, j
if (n == 0 ) return;
int height = k2-k1;
if ( height > 1024)
{
fprintf( stderr, "%s: n=%lld > 1024, not supported\n", __func__, (long long) n );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, batchCount);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(magmaFloatComplex) * height * n;
claswp_rowparallel_kernel_batched
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, n, height, input_array, input_i, input_j, ldi, output_array, output_i, output_j, ldo, pivinfo_array );
}
else
{
size_t shmem = sizeof(magmaFloatComplex) * height * SWP_WIDTH;
claswp_rowparallel_kernel_batched
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, SWP_WIDTH, height, input_array, input_i, input_j, ldi, output_array, output_i, output_j, ldo, pivinfo_array );
}
#undef input_array
#undef output_attay
}
/******************************************************************************/
extern "C" void
magma_claswp_rowparallel_native(
magma_int_t n,
magmaFloatComplex* input, magma_int_t ldi,
magmaFloatComplex* output, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t *pivinfo,
magma_queue_t queue)
{
if (n == 0 ) return;
int height = k2-k1;
if ( height > MAX_NTHREADS)
{
fprintf( stderr, "%s: height=%lld > %lld, magma_claswp_rowparallel_q not supported\n",
__func__, (long long) n, (long long) MAX_NTHREADS );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, 1);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(magmaFloatComplex) * height * n;
claswp_rowparallel_kernel
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, n, height, input, ldi, output, ldo, pivinfo );
}
else
{
size_t shmem = sizeof(magmaFloatComplex) * height * SWP_WIDTH;
claswp_rowparallel_kernel
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, SWP_WIDTH, height, input, ldi, output, ldo, pivinfo );
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row
__global__ void claswp_rowserial_kernel_batched( int n, magmaFloatComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
magmaFloatComplex* dA = dA_array[blockIdx.z];
magma_int_t *dipiv = ipiv_array[blockIdx.z];
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if (tid < n) {
magmaFloatComplex A1;
for (int i1 = k1; i1 < k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 + tid * lda];
dA[i1 + tid * lda] = dA[i2 + tid * lda];
dA[i2 + tid * lda] = A1;
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row
__global__ void claswp_rowserial_kernel_native( int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
//k1--;
//k2--;
if (tid < n) {
magmaFloatComplex A1;
for (int i1 = k1; i1 < k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 + tid * lda];
dA[i1 + tid * lda] = dA[i2 + tid * lda];
dA[i2 + tid * lda] = A1;
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row, similar to LAPACK
// K1, K2 are in Fortran indexing
extern "C" void
magma_claswp_rowserial_batched(magma_int_t n, magmaFloatComplex** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, batchCount);
claswp_rowserial_kernel_batched
<<< grid, max(BLK_SIZE, n), 0, queue->cuda_stream() >>>
(n, dA_array, lda, k1, k2, ipiv_array);
}
/******************************************************************************/
// serial swap that does swapping one row by one row, similar to LAPACK
// K1, K2 are in Fortran indexing
extern "C" void
magma_claswp_rowserial_native(magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t* dipiv, magma_queue_t queue)
{
if (n == 0) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, 1);
claswp_rowserial_kernel_native
<<< grid, max(BLK_SIZE, n), 0, queue->cuda_stream() >>>
(n, dA, lda, k1, k2, dipiv);
}
/******************************************************************************/
// serial swap that does swapping one column by one column
__device__ void claswp_columnserial_devfunc(int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if ( k1 < 0 || k2 < 0 ) return;
if ( tid < n) {
magmaFloatComplex A1;
if (k1 <= k2)
{
for (int i1 = k1; i1 <= k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
} else
{
for (int i1 = k1; i1 >= k2; i1--)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
}
}
}
__global__ void claswp_columnserial_kernel_batched( int n, magmaFloatComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
magmaFloatComplex* dA = dA_array[blockIdx.z];
magma_int_t *dipiv = ipiv_array[blockIdx.z];
claswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv);
}
__global__ void claswp_columnserial_kernel( int n, magmaFloatComplex_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
claswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv);
}
/******************************************************************************/
// serial swap that does swapping one column by one column
// K1, K2 are in Fortran indexing
extern "C" void
magma_claswp_columnserial(
magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t *dipiv, magma_queue_t queue)
{
if (n == 0 ) return;
int blocks = magma_ceildiv( n, CLASWP_COL_NTH );
dim3 grid(blocks, 1, 1);
claswp_columnserial_kernel<<< grid, CLASWP_COL_NTH, 0, queue->cuda_stream() >>>
(n, dA, lda, k1, k2, dipiv);
}
extern "C" void
magma_claswp_columnserial_batched(magma_int_t n, magmaFloatComplex** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0 ) return;
int blocks = magma_ceildiv( n, CLASWP_COL_NTH );
dim3 grid(blocks, 1, batchCount);
claswp_columnserial_kernel_batched
<<< grid, min(CLASWP_COL_NTH,n), 0, queue->cuda_stream() >>>
(n, dA_array, lda, k1, k2, ipiv_array);
}
|
bfa59e1a506c0cd3f736410c29bd65d97fe9fc95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
__global__ void gamma_transform_2(float3* d_idata, float3* d_odata, int width, int height, float gamma)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = yIndex * width + xIndex;
if (xIndex < width && yIndex < height){
float3 rgb = d_idata[idx];
d_odata[idx].x = powf(rgb.x, gamma);
d_odata[idx].y = powf(rgb.y, gamma);
d_odata[idx].z = powf(rgb.z, gamma);
}
}
} | bfa59e1a506c0cd3f736410c29bd65d97fe9fc95.cu | extern "C" {
__global__ void gamma_transform_2(float3* d_idata, float3* d_odata, int width, int height, float gamma)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = yIndex * width + xIndex;
if (xIndex < width && yIndex < height){
float3 rgb = d_idata[idx];
d_odata[idx].x = powf(rgb.x, gamma);
d_odata[idx].y = powf(rgb.y, gamma);
d_odata[idx].z = powf(rgb.z, gamma);
}
}
} |
057b2acd493efbd224fb66a08ce156ef999090a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2019 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/span.h"
#include "../common/common.h"
#include "../common/compressed_iterator.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/row_partitioner.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public dmlc::Parameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
// number of rows in a single GPU batch
int gpu_batch_nrows;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(gpu_batch_nrows)
.set_lower_bound(-1)
.set_default(0)
.describe("Number of rows in a GPU batch, used for finding quantiles on GPU; "
"-1 to use all rows assignted to a GPU, and 0 to auto-deduce");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (split.loss_chg < param.min_split_loss) { return false; }
if (param.max_depth > 0 && depth == param.max_depth) {return false; }
if (param.max_leaves > 0 && num_leaves == param.max_leaves) { return false; }
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
// With constraints
template <typename GradientPairT>
XGBOOST_DEVICE float inline LossChangeMissing(
const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum,
const float& parent_gain, const GPUTrainingParam& param, int constraint,
const ValueConstraint& value_constraint,
bool& missing_left_out) { // NOLINT
float missing_left_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT>
__device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, hipcub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename ScanT,
typename MaxReduceT, typename TempStorageT, typename GradientSumT>
__device__ void EvaluateFeature(
int fidx, common::Span<const GradientSumT> node_histogram,
const xgboost::ELLPackMatrix& matrix,
DeviceSplitCandidate* best_split, // shared memory storing best split
const DeviceNodeStats& node, const GPUTrainingParam& param,
TempStorageT* temp_storage, // temp memory for cub operations
int constraint, // monotonic_constraints
const ValueConstraint& value_constraint) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = matrix.feature_segments[fidx]; // begining bin
uint32_t gidx_end =
matrix.feature_segments[fidx + 1]; // end bin for i^th feature
// Sum histogram bins for current feature
GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage);
GradientSumT const parent_sum = GradientSumT(node.sum_gradients);
GradientSumT const missing = parent_sum - feature_sum;
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op =
SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
// Gradient value for current bin.
GradientSumT bin =
thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT();
ScanT(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
hipcub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
hipcub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, hipcub::ArgMax());
__shared__ hipcub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int split_gidx = (scan_begin + threadIdx.x) - 1;
float fvalue;
if (split_gidx < static_cast<int>(gidx_begin)) {
fvalue = matrix.min_fvalue[fidx];
} else {
fvalue = matrix.gidx_fvalue_map[split_gidx];
}
GradientSumT left = missing_left ? bin + missing : bin;
GradientSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue,
fidx, GradientPair(left), GradientPair(right), param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitKernel(
common::Span<const GradientSumT> node_histogram, // histogram for gradients
common::Span<const int> feature_set, // Selected features
DeviceNodeStats node,
xgboost::ELLPackMatrix matrix,
GPUTrainingParam gpu_param,
common::Span<DeviceSplitCandidate> split_candidates, // resulting split
ValueConstraint value_constraint,
common::Span<int> d_monotonic_constraints) {
// KeyValuePair here used as threadIdx.x -> gain_value
using ArgMaxT = hipcub::KeyValuePair<int, float>;
using BlockScanT =
hipcub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>;
using MaxReduceT = hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS>;
using SumReduceT = hipcub::BlockReduce<GradientSumT, BLOCK_THREADS>;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = feature_set[blockIdx.x];
int constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, node_histogram, matrix, &best_split, node, gpu_param, &temp_storage,
constraint, value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss for each feature
split_candidates[blockIdx.x] = best_split;
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
dh::safe_cuda(hipMemsetAsync(
data_.data().get(), 0,
data_.size() * sizeof(typename decltype(data_)::value_type)));
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
dh::safe_cuda(hipSetDevice(device_id_));
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
dh::safe_cuda(hipMemsetAsync(data_.data().get() + used_size, 0,
n_bins_ * sizeof(GradientSumT)));
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
dh::safe_cuda(hipMemsetAsync(data_.data().get() + old_entry.second, 0,
n_bins_ * sizeof(GradientSumT)));
nidx_map_[nidx] = old_entry.second;
}
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
size_t new_required_memory = ::max(data_.size() * 2, HistogramSize());
if (data_.size() < new_required_memory) {
data_.resize(new_required_memory);
}
}
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
template <typename GradientSumT>
__global__ void SharedMemHistKernel(xgboost::ELLPackMatrix matrix,
common::Span<const RowPartitioner::RowIndexT> d_ridx,
GradientSumT* d_node_hist,
const GradientPair* d_gpair, size_t n_elements,
bool use_shared_memory_histograms) {
extern __shared__ char smem[];
GradientSumT* smem_arr = reinterpret_cast<GradientSumT*>(smem); // NOLINT
if (use_shared_memory_histograms) {
dh::BlockFill(smem_arr, matrix.BinCount(), GradientSumT());
__syncthreads();
}
for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) {
int ridx = d_ridx[idx / matrix.row_stride ];
int gidx =
matrix.gidx_iter[ridx * matrix.row_stride + idx % matrix.row_stride];
if (gidx != matrix.null_gidx_value) {
// If we are not using shared memory, accumulate the values directly into
// global memory
GradientSumT* atomic_add_ptr =
use_shared_memory_histograms ? smem_arr : d_node_hist;
AtomicAddGpair(atomic_add_ptr + gidx, d_gpair[ridx]);
}
}
if (use_shared_memory_histograms) {
// Write shared memory back to global memory
__syncthreads();
for (auto i :
dh::BlockStrideRange(static_cast<size_t>(0), matrix.BinCount())) {
AtomicAddGpair(d_node_hist + i, smem_arr[i]);
}
}
}
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
dh::BulkAllocator ba;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
/*! \brief Gradient pair for each row. */
common::Span<GradientPair> gpair;
common::Span<int> monotone_constraints;
common::Span<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
common::Span<GradientPair> node_sum_gradients_d;
bst_uint n_rows;
TrainParam param;
bool prediction_cache_initialised;
bool use_shared_memory_histograms {false};
dh::CubMemory temp_memory;
dh::PinnedMemory pinned_memory;
std::vector<hipStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraint interaction_constraints;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features)
: device_id(_device_id),
page(_page),
n_rows(_n_rows),
param(std::move(_param)),
prediction_cache_initialised(false),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features) {
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
void InitHistogram();
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<hipStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(hipStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(hipSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, n_rows));
dh::safe_cuda(hipMemcpyAsync(
gpair.data(), dh_gpair->ConstDevicePointer(),
gpair.size() * sizeof(GradientPair), hipMemcpyHostToHost));
SubsampleGradientPair(device_id, gpair, param.subsample);
hist.Reset();
}
std::vector<DeviceSplitCandidate> EvaluateSplits(
std::vector<int> nidxs, const RegTree& tree,
size_t num_columns) {
dh::safe_cuda(hipSetDevice(device_id));
auto result_all = pinned_memory.GetSpan<DeviceSplitCandidate>(nidxs.size());
// Work out cub temporary memory requirement
GPUTrainingParam gpu_param(param);
DeviceSplitCandidateReduceOp op(gpu_param);
size_t temp_storage_bytes = 0;
DeviceSplitCandidate*dummy = nullptr;
hipcub::DeviceReduce::Reduce(
nullptr, temp_storage_bytes, dummy,
dummy, num_columns, op,
DeviceSplitCandidate());
// size in terms of DeviceSplitCandidate
size_t cub_memory_size =
::ceil(static_cast<double>(temp_storage_bytes) /
sizeof(DeviceSplitCandidate));
// Allocate enough temporary memory
// Result for each nidx
// + intermediate result for each column
// + cub reduce memory
auto temp_span = temp_memory.GetSpan<DeviceSplitCandidate>(
nidxs.size() + nidxs.size() * num_columns +cub_memory_size*nidxs.size());
auto d_result_all = temp_span.subspan(0, nidxs.size());
auto d_split_candidates_all =
temp_span.subspan(d_result_all.size(), nidxs.size() * num_columns);
auto d_cub_memory_all =
temp_span.subspan(d_result_all.size() + d_split_candidates_all.size(),
cub_memory_size * nidxs.size());
auto& streams = this->GetStreams(nidxs.size());
for (auto i = 0ull; i < nidxs.size(); i++) {
auto nidx = nidxs[i];
auto p_feature_set = column_sampler.GetFeatureSet(tree.GetDepth(nidx));
p_feature_set->SetDevice(device_id);
auto d_sampled_features = p_feature_set->DeviceSpan();
common::Span<int32_t> d_feature_set =
interaction_constraints.Query(d_sampled_features, nidx);
auto d_split_candidates =
d_split_candidates_all.subspan(i * num_columns, d_feature_set.size());
DeviceNodeStats node(node_sum_gradients[nidx], nidx, param);
auto d_result = d_result_all.subspan(i, 1);
if (d_feature_set.empty()) {
// Acting as a device side constructor for DeviceSplitCandidate.
// DeviceSplitCandidate::IsValid is false so that ApplySplit can reject this
// candidate.
auto worst_candidate = DeviceSplitCandidate();
dh::safe_cuda(hipMemcpyAsync(d_result.data(), &worst_candidate,
sizeof(DeviceSplitCandidate),
hipMemcpyHostToDevice));
continue;
}
// One block for each feature
int constexpr kBlockThreads = 256;
hipLaunchKernelGGL(( EvaluateSplitKernel<kBlockThreads, GradientSumT>)
, dim3(uint32_t(d_feature_set.size())), dim3(kBlockThreads), 0, streams[i],
hist.GetNodeHistogram(nidx), d_feature_set, node, page->ellpack_matrix,
gpu_param, d_split_candidates, node_value_constraints[nidx],
monotone_constraints);
// Reduce over features to find best feature
auto d_cub_memory =
d_cub_memory_all.subspan(i * cub_memory_size, cub_memory_size);
size_t cub_bytes = d_cub_memory.size() * sizeof(DeviceSplitCandidate);
hipcub::DeviceReduce::Reduce(reinterpret_cast<void*>(d_cub_memory.data()),
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
}
dh::safe_cuda(hipMemcpy(result_all.data(), d_result_all.data(),
sizeof(DeviceSplitCandidate) * d_result_all.size(),
hipMemcpyDeviceToHost));
return std::vector<DeviceSplitCandidate>(result_all.begin(), result_all.end());
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
auto d_gpair = gpair.data();
auto n_elements = d_ridx.size() * page->ellpack_matrix.row_stride;
const size_t smem_size =
use_shared_memory_histograms
? sizeof(GradientSumT) * page->ellpack_matrix.BinCount()
: 0;
const int items_per_thread = 8;
const int block_threads = 256;
const int grid_size = static_cast<int>(
common::DivRoundUp(n_elements, items_per_thread * block_threads));
if (grid_size <= 0) {
return;
}
hipLaunchKernelGGL(( SharedMemHistKernel), dim3(grid_size), dim3(block_threads), smem_size, 0,
page->ellpack_matrix, d_ridx, d_node_hist.data(), d_gpair, n_elements,
use_shared_memory_histograms);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->n_bins, [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->ellpack_matrix;
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetElement(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf This information is used later to update the
// prediction cache
void FinalisePosition(RegTree* p_tree) {
const auto d_nodes =
temp_memory.GetSpan<RegTree::Node>(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpy(d_nodes.data(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
auto d_matrix = page->ellpack_matrix;
row_partitioner->FinalisePosition(
[=] __device__(bst_uint ridx, int position) {
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetElement(ridx, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(hipSetDevice(device_id));
if (!prediction_cache_initialised) {
dh::safe_cuda(hipMemcpyAsync(prediction_cache.data(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
hipMemcpyDefault));
}
prediction_cache_initialised = true;
CalcWeightTrainParam param_d(param);
dh::safe_cuda(
hipMemcpyAsync(node_sum_gradients_d.data(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_ridx = row_partitioner->GetRows();
auto d_node_sum_gradients = node_sum_gradients_d.data();
auto d_prediction_cache = prediction_cache.data();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(hipMemcpy(
out_preds_d, prediction_cache.data(),
prediction_cache.size() * sizeof(bst_float), hipMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.StartCuda("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->ellpack_matrix.BinCount() *
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize();
monitor.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
auto left_node_rows = row_partitioner->GetRows(nidx_left).size();
auto right_node_rows = row_partitioner->GetRows(nidx_right).size();
// Decide whether to build the left histogram or right histogram
// Find the largest number of training instances on any given device
// Assume this will be the bottleneck and avoid building this node if
// possible
std::vector<size_t> max_reduce;
max_reduce.push_back(left_node_rows);
max_reduce.push_back(right_node_rows);
reducer->HostMaxAllReduce(&max_reduce);
bool fewer_right = max_reduce[1] < max_reduce[0];
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
nidx_parent, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(nidx_parent, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
GradStats left_stats{};
left_stats.Add(candidate.split.left_sum);
GradStats right_stats{};
right_stats.Add(candidate.split.right_sum);
GradStats parent_sum{};
parent_sum.Add(left_stats);
parent_sum.Add(right_stats);
node_value_constraints.resize(tree.GetNodes().size());
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(param, parent_sum);
auto left_weight =
node_value_constraints[candidate.nid].CalcWeight(param, left_stats)*param.learning_rate;
auto right_weight =
node_value_constraints[candidate.nid].CalcWeight(param, right_stats)*param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.sum_hess);
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), left_stats, right_stats,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
void InitRoot(RegTree* p_tree, HostDeviceVector<GradientPair>* gpair_all,
dh::AllReducer* reducer, int64_t num_columns) {
constexpr int kRootNIdx = 0;
const auto &gpair = gpair_all->DeviceSpan();
dh::SumReduction(temp_memory, gpair, node_sum_gradients_d,
gpair.size());
reducer->AllReduceSum(
reinterpret_cast<float*>(node_sum_gradients_d.data()),
reinterpret_cast<float*>(node_sum_gradients_d.data()), 2);
reducer->Synchronize();
dh::safe_cuda(hipMemcpy(node_sum_gradients.data(),
node_sum_gradients_d.data(), sizeof(GradientPair),
hipMemcpyDeviceToHost));
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
p_tree->Stat(kRootNIdx).sum_hess = node_sum_gradients[kRootNIdx].GetHess();
auto weight = CalcWeight(param, node_sum_gradients[kRootNIdx]);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateSplits({kRootNIdx}, *p_tree, num_columns);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split.at(0), 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.StartCuda("Reset");
this->Reset(gpair_all, p_fmat->Info().num_col_);
monitor.StopCuda("Reset");
monitor.StartCuda("InitRoot");
this->InitRoot(p_tree, gpair_all, reducer, p_fmat->Info().num_col_);
monitor.StopCuda("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.StartCuda("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.StopCuda("UpdatePosition");
monitor.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate.nid, left_child_nidx, right_child_nidx, reducer);
monitor.StopCuda("BuildHist");
monitor.StartCuda("EvaluateSplits");
auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx},
*p_tree, p_fmat->Info().num_col_);
monitor.StopCuda("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.StartCuda("FinalisePosition");
this->FinalisePosition(p_tree);
monitor.StopCuda("FinalisePosition");
}
};
template <typename GradientSumT>
inline void GPUHistMakerDevice<GradientSumT>::InitHistogram() {
CHECK(!(param.max_leaves == 0 && param.max_depth == 0))
<< "Max leaves and max depth cannot both be unconstrained for "
"gpu_hist.";
int max_nodes =
param.max_leaves > 0 ? param.max_leaves * 2 : MaxNodesDepth(param.max_depth);
ba.Allocate(device_id,
&gpair, n_rows,
&prediction_cache, n_rows,
&node_sum_gradients_d, max_nodes,
&monotone_constraints, param.monotone_constraints.size());
dh::CopyVectorToDeviceSpan(monotone_constraints, param.monotone_constraints);
node_sum_gradients.resize(max_nodes);
// check if we can use shared memory for building histograms
// (assuming atleast we need 2 CTAs per SM to maintain decent latency
// hiding)
auto histogram_size = sizeof(GradientSumT) * page->n_bins;
auto max_smem = dh::MaxSharedMemory(device_id);
if (histogram_size <= max_smem) {
use_shared_memory_histograms = true;
}
// Init histogram
hist.Init(device_id, page->n_bins);
}
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() : initialised_{false}, p_last_fmat_{nullptr} {}
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.InitAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.InitAllowUnknown(args);
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
info_ = &dmat->Info();
reducer_.Init({device_});
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
// TODO(rongou): support multiple Ellpack pages.
EllpackPageImpl* page{};
for (auto& batch : dmat->GetBatches<EllpackPage>()) {
page = batch.Impl();
page->Init(device_, param_.max_bin, hist_maker_param_.gpu_batch_nrows);
}
dh::safe_cuda(hipSetDevice(device_));
maker_.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_));
monitor_.StartCuda("InitHistogram");
dh::safe_cuda(hipSetDevice(device_));
maker_->InitHistogram();
monitor_.StopCuda("InitHistogram");
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(const std::vector<RegTree>& local_trees) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_trees.front().Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree{};
reference_tree.Load(&fs);
for (const auto& tree : local_trees) {
CHECK(tree == reference_tree);
}
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.StartCuda("InitData");
this->InitData(p_fmat);
monitor_.StopCuda("InitData");
gpair->SetDevice(device_);
maker_->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker_ == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker_->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker_; // NOLINT
private:
bool initialised_;
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_;
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
hist_maker_param_.InitAllowUnknown(args);
float_maker_.reset();
double_maker_.reset();
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->Configure(args, tparam_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
| 057b2acd493efbd224fb66a08ce156ef999090a1.cu | /*!
* Copyright 2017-2019 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/span.h"
#include "../common/common.h"
#include "../common/compressed_iterator.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/row_partitioner.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public dmlc::Parameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
// number of rows in a single GPU batch
int gpu_batch_nrows;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(gpu_batch_nrows)
.set_lower_bound(-1)
.set_default(0)
.describe("Number of rows in a GPU batch, used for finding quantiles on GPU; "
"-1 to use all rows assignted to a GPU, and 0 to auto-deduce");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (split.loss_chg < param.min_split_loss) { return false; }
if (param.max_depth > 0 && depth == param.max_depth) {return false; }
if (param.max_leaves > 0 && num_leaves == param.max_leaves) { return false; }
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
// With constraints
template <typename GradientPairT>
XGBOOST_DEVICE float inline LossChangeMissing(
const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum,
const float& parent_gain, const GPUTrainingParam& param, int constraint,
const ValueConstraint& value_constraint,
bool& missing_left_out) { // NOLINT
float missing_left_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT>
__device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, cub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename ScanT,
typename MaxReduceT, typename TempStorageT, typename GradientSumT>
__device__ void EvaluateFeature(
int fidx, common::Span<const GradientSumT> node_histogram,
const xgboost::ELLPackMatrix& matrix,
DeviceSplitCandidate* best_split, // shared memory storing best split
const DeviceNodeStats& node, const GPUTrainingParam& param,
TempStorageT* temp_storage, // temp memory for cub operations
int constraint, // monotonic_constraints
const ValueConstraint& value_constraint) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = matrix.feature_segments[fidx]; // begining bin
uint32_t gidx_end =
matrix.feature_segments[fidx + 1]; // end bin for i^th feature
// Sum histogram bins for current feature
GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage);
GradientSumT const parent_sum = GradientSumT(node.sum_gradients);
GradientSumT const missing = parent_sum - feature_sum;
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op =
SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
// Gradient value for current bin.
GradientSumT bin =
thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT();
ScanT(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
cub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
cub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax());
__shared__ cub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int split_gidx = (scan_begin + threadIdx.x) - 1;
float fvalue;
if (split_gidx < static_cast<int>(gidx_begin)) {
fvalue = matrix.min_fvalue[fidx];
} else {
fvalue = matrix.gidx_fvalue_map[split_gidx];
}
GradientSumT left = missing_left ? bin + missing : bin;
GradientSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue,
fidx, GradientPair(left), GradientPair(right), param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitKernel(
common::Span<const GradientSumT> node_histogram, // histogram for gradients
common::Span<const int> feature_set, // Selected features
DeviceNodeStats node,
xgboost::ELLPackMatrix matrix,
GPUTrainingParam gpu_param,
common::Span<DeviceSplitCandidate> split_candidates, // resulting split
ValueConstraint value_constraint,
common::Span<int> d_monotonic_constraints) {
// KeyValuePair here used as threadIdx.x -> gain_value
using ArgMaxT = cub::KeyValuePair<int, float>;
using BlockScanT =
cub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>;
using MaxReduceT = cub::BlockReduce<ArgMaxT, BLOCK_THREADS>;
using SumReduceT = cub::BlockReduce<GradientSumT, BLOCK_THREADS>;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = feature_set[blockIdx.x];
int constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, node_histogram, matrix, &best_split, node, gpu_param, &temp_storage,
constraint, value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss for each feature
split_candidates[blockIdx.x] = best_split;
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
dh::safe_cuda(cudaMemsetAsync(
data_.data().get(), 0,
data_.size() * sizeof(typename decltype(data_)::value_type)));
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
dh::safe_cuda(cudaSetDevice(device_id_));
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
dh::safe_cuda(cudaMemsetAsync(data_.data().get() + used_size, 0,
n_bins_ * sizeof(GradientSumT)));
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
dh::safe_cuda(cudaMemsetAsync(data_.data().get() + old_entry.second, 0,
n_bins_ * sizeof(GradientSumT)));
nidx_map_[nidx] = old_entry.second;
}
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
size_t new_required_memory = std::max(data_.size() * 2, HistogramSize());
if (data_.size() < new_required_memory) {
data_.resize(new_required_memory);
}
}
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
template <typename GradientSumT>
__global__ void SharedMemHistKernel(xgboost::ELLPackMatrix matrix,
common::Span<const RowPartitioner::RowIndexT> d_ridx,
GradientSumT* d_node_hist,
const GradientPair* d_gpair, size_t n_elements,
bool use_shared_memory_histograms) {
extern __shared__ char smem[];
GradientSumT* smem_arr = reinterpret_cast<GradientSumT*>(smem); // NOLINT
if (use_shared_memory_histograms) {
dh::BlockFill(smem_arr, matrix.BinCount(), GradientSumT());
__syncthreads();
}
for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) {
int ridx = d_ridx[idx / matrix.row_stride ];
int gidx =
matrix.gidx_iter[ridx * matrix.row_stride + idx % matrix.row_stride];
if (gidx != matrix.null_gidx_value) {
// If we are not using shared memory, accumulate the values directly into
// global memory
GradientSumT* atomic_add_ptr =
use_shared_memory_histograms ? smem_arr : d_node_hist;
AtomicAddGpair(atomic_add_ptr + gidx, d_gpair[ridx]);
}
}
if (use_shared_memory_histograms) {
// Write shared memory back to global memory
__syncthreads();
for (auto i :
dh::BlockStrideRange(static_cast<size_t>(0), matrix.BinCount())) {
AtomicAddGpair(d_node_hist + i, smem_arr[i]);
}
}
}
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
dh::BulkAllocator ba;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
/*! \brief Gradient pair for each row. */
common::Span<GradientPair> gpair;
common::Span<int> monotone_constraints;
common::Span<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
common::Span<GradientPair> node_sum_gradients_d;
bst_uint n_rows;
TrainParam param;
bool prediction_cache_initialised;
bool use_shared_memory_histograms {false};
dh::CubMemory temp_memory;
dh::PinnedMemory pinned_memory;
std::vector<cudaStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraint interaction_constraints;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features)
: device_id(_device_id),
page(_page),
n_rows(_n_rows),
param(std::move(_param)),
prediction_cache_initialised(false),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features) {
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
void InitHistogram();
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<cudaStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, n_rows));
dh::safe_cuda(cudaMemcpyAsync(
gpair.data(), dh_gpair->ConstDevicePointer(),
gpair.size() * sizeof(GradientPair), cudaMemcpyHostToHost));
SubsampleGradientPair(device_id, gpair, param.subsample);
hist.Reset();
}
std::vector<DeviceSplitCandidate> EvaluateSplits(
std::vector<int> nidxs, const RegTree& tree,
size_t num_columns) {
dh::safe_cuda(cudaSetDevice(device_id));
auto result_all = pinned_memory.GetSpan<DeviceSplitCandidate>(nidxs.size());
// Work out cub temporary memory requirement
GPUTrainingParam gpu_param(param);
DeviceSplitCandidateReduceOp op(gpu_param);
size_t temp_storage_bytes = 0;
DeviceSplitCandidate*dummy = nullptr;
cub::DeviceReduce::Reduce(
nullptr, temp_storage_bytes, dummy,
dummy, num_columns, op,
DeviceSplitCandidate());
// size in terms of DeviceSplitCandidate
size_t cub_memory_size =
std::ceil(static_cast<double>(temp_storage_bytes) /
sizeof(DeviceSplitCandidate));
// Allocate enough temporary memory
// Result for each nidx
// + intermediate result for each column
// + cub reduce memory
auto temp_span = temp_memory.GetSpan<DeviceSplitCandidate>(
nidxs.size() + nidxs.size() * num_columns +cub_memory_size*nidxs.size());
auto d_result_all = temp_span.subspan(0, nidxs.size());
auto d_split_candidates_all =
temp_span.subspan(d_result_all.size(), nidxs.size() * num_columns);
auto d_cub_memory_all =
temp_span.subspan(d_result_all.size() + d_split_candidates_all.size(),
cub_memory_size * nidxs.size());
auto& streams = this->GetStreams(nidxs.size());
for (auto i = 0ull; i < nidxs.size(); i++) {
auto nidx = nidxs[i];
auto p_feature_set = column_sampler.GetFeatureSet(tree.GetDepth(nidx));
p_feature_set->SetDevice(device_id);
auto d_sampled_features = p_feature_set->DeviceSpan();
common::Span<int32_t> d_feature_set =
interaction_constraints.Query(d_sampled_features, nidx);
auto d_split_candidates =
d_split_candidates_all.subspan(i * num_columns, d_feature_set.size());
DeviceNodeStats node(node_sum_gradients[nidx], nidx, param);
auto d_result = d_result_all.subspan(i, 1);
if (d_feature_set.empty()) {
// Acting as a device side constructor for DeviceSplitCandidate.
// DeviceSplitCandidate::IsValid is false so that ApplySplit can reject this
// candidate.
auto worst_candidate = DeviceSplitCandidate();
dh::safe_cuda(cudaMemcpyAsync(d_result.data(), &worst_candidate,
sizeof(DeviceSplitCandidate),
cudaMemcpyHostToDevice));
continue;
}
// One block for each feature
int constexpr kBlockThreads = 256;
EvaluateSplitKernel<kBlockThreads, GradientSumT>
<<<uint32_t(d_feature_set.size()), kBlockThreads, 0, streams[i]>>>(
hist.GetNodeHistogram(nidx), d_feature_set, node, page->ellpack_matrix,
gpu_param, d_split_candidates, node_value_constraints[nidx],
monotone_constraints);
// Reduce over features to find best feature
auto d_cub_memory =
d_cub_memory_all.subspan(i * cub_memory_size, cub_memory_size);
size_t cub_bytes = d_cub_memory.size() * sizeof(DeviceSplitCandidate);
cub::DeviceReduce::Reduce(reinterpret_cast<void*>(d_cub_memory.data()),
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
}
dh::safe_cuda(cudaMemcpy(result_all.data(), d_result_all.data(),
sizeof(DeviceSplitCandidate) * d_result_all.size(),
cudaMemcpyDeviceToHost));
return std::vector<DeviceSplitCandidate>(result_all.begin(), result_all.end());
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
auto d_gpair = gpair.data();
auto n_elements = d_ridx.size() * page->ellpack_matrix.row_stride;
const size_t smem_size =
use_shared_memory_histograms
? sizeof(GradientSumT) * page->ellpack_matrix.BinCount()
: 0;
const int items_per_thread = 8;
const int block_threads = 256;
const int grid_size = static_cast<int>(
common::DivRoundUp(n_elements, items_per_thread * block_threads));
if (grid_size <= 0) {
return;
}
SharedMemHistKernel<<<grid_size, block_threads, smem_size>>>(
page->ellpack_matrix, d_ridx, d_node_hist.data(), d_gpair, n_elements,
use_shared_memory_histograms);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->n_bins, [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->ellpack_matrix;
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetElement(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf This information is used later to update the
// prediction cache
void FinalisePosition(RegTree* p_tree) {
const auto d_nodes =
temp_memory.GetSpan<RegTree::Node>(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpy(d_nodes.data(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
auto d_matrix = page->ellpack_matrix;
row_partitioner->FinalisePosition(
[=] __device__(bst_uint ridx, int position) {
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetElement(ridx, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(cudaSetDevice(device_id));
if (!prediction_cache_initialised) {
dh::safe_cuda(cudaMemcpyAsync(prediction_cache.data(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
cudaMemcpyDefault));
}
prediction_cache_initialised = true;
CalcWeightTrainParam param_d(param);
dh::safe_cuda(
cudaMemcpyAsync(node_sum_gradients_d.data(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_ridx = row_partitioner->GetRows();
auto d_node_sum_gradients = node_sum_gradients_d.data();
auto d_prediction_cache = prediction_cache.data();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(cudaMemcpy(
out_preds_d, prediction_cache.data(),
prediction_cache.size() * sizeof(bst_float), cudaMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.StartCuda("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->ellpack_matrix.BinCount() *
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize();
monitor.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
auto left_node_rows = row_partitioner->GetRows(nidx_left).size();
auto right_node_rows = row_partitioner->GetRows(nidx_right).size();
// Decide whether to build the left histogram or right histogram
// Find the largest number of training instances on any given device
// Assume this will be the bottleneck and avoid building this node if
// possible
std::vector<size_t> max_reduce;
max_reduce.push_back(left_node_rows);
max_reduce.push_back(right_node_rows);
reducer->HostMaxAllReduce(&max_reduce);
bool fewer_right = max_reduce[1] < max_reduce[0];
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
nidx_parent, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(nidx_parent, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
GradStats left_stats{};
left_stats.Add(candidate.split.left_sum);
GradStats right_stats{};
right_stats.Add(candidate.split.right_sum);
GradStats parent_sum{};
parent_sum.Add(left_stats);
parent_sum.Add(right_stats);
node_value_constraints.resize(tree.GetNodes().size());
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(param, parent_sum);
auto left_weight =
node_value_constraints[candidate.nid].CalcWeight(param, left_stats)*param.learning_rate;
auto right_weight =
node_value_constraints[candidate.nid].CalcWeight(param, right_stats)*param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.sum_hess);
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), left_stats, right_stats,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
void InitRoot(RegTree* p_tree, HostDeviceVector<GradientPair>* gpair_all,
dh::AllReducer* reducer, int64_t num_columns) {
constexpr int kRootNIdx = 0;
const auto &gpair = gpair_all->DeviceSpan();
dh::SumReduction(temp_memory, gpair, node_sum_gradients_d,
gpair.size());
reducer->AllReduceSum(
reinterpret_cast<float*>(node_sum_gradients_d.data()),
reinterpret_cast<float*>(node_sum_gradients_d.data()), 2);
reducer->Synchronize();
dh::safe_cuda(cudaMemcpy(node_sum_gradients.data(),
node_sum_gradients_d.data(), sizeof(GradientPair),
cudaMemcpyDeviceToHost));
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
p_tree->Stat(kRootNIdx).sum_hess = node_sum_gradients[kRootNIdx].GetHess();
auto weight = CalcWeight(param, node_sum_gradients[kRootNIdx]);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateSplits({kRootNIdx}, *p_tree, num_columns);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split.at(0), 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.StartCuda("Reset");
this->Reset(gpair_all, p_fmat->Info().num_col_);
monitor.StopCuda("Reset");
monitor.StartCuda("InitRoot");
this->InitRoot(p_tree, gpair_all, reducer, p_fmat->Info().num_col_);
monitor.StopCuda("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.StartCuda("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.StopCuda("UpdatePosition");
monitor.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate.nid, left_child_nidx, right_child_nidx, reducer);
monitor.StopCuda("BuildHist");
monitor.StartCuda("EvaluateSplits");
auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx},
*p_tree, p_fmat->Info().num_col_);
monitor.StopCuda("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.StartCuda("FinalisePosition");
this->FinalisePosition(p_tree);
monitor.StopCuda("FinalisePosition");
}
};
template <typename GradientSumT>
inline void GPUHistMakerDevice<GradientSumT>::InitHistogram() {
CHECK(!(param.max_leaves == 0 && param.max_depth == 0))
<< "Max leaves and max depth cannot both be unconstrained for "
"gpu_hist.";
int max_nodes =
param.max_leaves > 0 ? param.max_leaves * 2 : MaxNodesDepth(param.max_depth);
ba.Allocate(device_id,
&gpair, n_rows,
&prediction_cache, n_rows,
&node_sum_gradients_d, max_nodes,
&monotone_constraints, param.monotone_constraints.size());
dh::CopyVectorToDeviceSpan(monotone_constraints, param.monotone_constraints);
node_sum_gradients.resize(max_nodes);
// check if we can use shared memory for building histograms
// (assuming atleast we need 2 CTAs per SM to maintain decent latency
// hiding)
auto histogram_size = sizeof(GradientSumT) * page->n_bins;
auto max_smem = dh::MaxSharedMemory(device_id);
if (histogram_size <= max_smem) {
use_shared_memory_histograms = true;
}
// Init histogram
hist.Init(device_id, page->n_bins);
}
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() : initialised_{false}, p_last_fmat_{nullptr} {}
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.InitAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.InitAllowUnknown(args);
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
info_ = &dmat->Info();
reducer_.Init({device_});
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
// TODO(rongou): support multiple Ellpack pages.
EllpackPageImpl* page{};
for (auto& batch : dmat->GetBatches<EllpackPage>()) {
page = batch.Impl();
page->Init(device_, param_.max_bin, hist_maker_param_.gpu_batch_nrows);
}
dh::safe_cuda(cudaSetDevice(device_));
maker_.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_));
monitor_.StartCuda("InitHistogram");
dh::safe_cuda(cudaSetDevice(device_));
maker_->InitHistogram();
monitor_.StopCuda("InitHistogram");
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(const std::vector<RegTree>& local_trees) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_trees.front().Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree{};
reference_tree.Load(&fs);
for (const auto& tree : local_trees) {
CHECK(tree == reference_tree);
}
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.StartCuda("InitData");
this->InitData(p_fmat);
monitor_.StopCuda("InitData");
gpair->SetDevice(device_);
maker_->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker_ == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker_->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker_; // NOLINT
private:
bool initialised_;
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_;
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
hist_maker_param_.InitAllowUnknown(args);
float_maker_.reset();
double_maker_.reset();
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->Configure(args, tparam_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
65886ae71281999da6fc7d98771d1f92ac58e13e.hip | // !!! This is a file automatically generated by hipify!!!
#include "Geometry/LibraryCUDA.cuh"
#include "Geometry/Constants.hh"
#include "Simulation/ScreenFunctionsCUDA.cuh"
#include "Simulation/AngularDistributionCUDA.cuh"
#include "Simulation/Spawner.cuh"
#include "Simulation/TrackGPU.cuh"
#include <cfloat>
namespace na63 {
namespace {
__device__ __constant__ Float
ah10 = 4.67733E+00, ah11 =-6.19012E-01, ah12 = 2.02225E-02,
ah20 =-7.34101E+00, ah21 = 1.00462E+00, ah22 =-3.20985E-02,
ah30 = 2.93119E+00, ah31 =-4.03761E-01, ah32 = 1.25153E-02;
__device__ __constant__ Float
bh10 = 4.23071E+00, bh11 =-6.10995E-01, bh12 = 1.95531E-02,
bh20 =-7.12527E+00, bh21 = 9.69160E-01, bh22 =-2.74255E-02,
bh30 = 2.69925E+00, bh31 =-3.63283E-01, bh32 = 9.55316E-03;
__device__ __constant__ Float
al00 =-2.05398E+00, al01 = 2.38815E-02, al02 = 5.25483E-04,
al10 =-7.69748E-02, al11 =-6.91499E-02, al12 = 2.22453E-03,
al20 = 4.06463E-02, al21 =-1.01281E-02, al22 = 3.40919E-04;
__device__ __constant__ Float
bl00 = 1.04133E+00, bl01 =-9.43291E-03, bl02 =-4.54758E-04,
bl10 = 1.19253E-01, bl11 = 4.07467E-02, bl12 =-1.30718E-03,
bl20 =-1.59391E-02, bl21 = 7.27752E-03, bl22 =-1.94405E-04;
__device__ __constant__ Float t_low = 1.0;
__device__ __constant__ Float low_kinetic_energy = 10.0 * MeV;
__device__ __constant__ Float fac_fel_d = 5.21575064289;
__device__ __constant__ Float fac_finel_d = 7.08506429395;
__device__
Float ComputeParametrizedDXSectionPerAtom(Float kinetic_energy, Float gamma_energy, Float Z) {
Float lnZ = logf(Z); // 3.*(anElement->GetIonisation()->GetlogZ3());
Float FZ = lnZ* (4.- 0.55*lnZ);
Float ZZ = pow(Float(Z*(Z+1.)),Float(1.0/3.0)); // anElement->GetIonisation()->GetZZ3();
Float Z3 = pow(Z,Float(1.0/3.0)); // (anElement->GetIonisation()->GetZ3())
Float total_energy = kinetic_energy + kElectronMass;
// Float x, epsil, greject, migdal, grejmax, q;
Float epsil, greject;
Float U = logf(kinetic_energy/kElectronMass);
Float U2 = U*U;
// Precalculated parameters
Float ah, bh;
if (kinetic_energy > t_low) {
Float ah1 = ah10 + ZZ * (ah11 + ZZ * ah12);
Float ah2 = ah20 + ZZ * (ah21 + ZZ * ah22);
Float ah3 = ah30 + ZZ * (ah31 + ZZ * ah32);
Float bh1 = bh10 + ZZ * (bh11 + ZZ * bh12);
Float bh2 = bh20 + ZZ * (bh21 + ZZ * bh22);
Float bh3 = bh30 + ZZ * (bh31 + ZZ * bh32);
ah = 1.0 + (ah1*U2 + ah2*U + ah3) / (U2*U);
bh = 0.75 + (bh1*U2 + bh2*U + bh3) / (U2*U);
// Limit of the screening variable
Float screenfac = 136.0*kElectronMass/(Z3*total_energy);
// epsil = x*kinetic_energy/total_energy;
epsil = gamma_energy/total_energy;
Float screenvar = screenfac*epsil/(1.0-epsil);
Float F1 = max(CUDA_ScreenFunction1(screenvar) - FZ,Float(0.0));
Float F2 = max(CUDA_ScreenFunction2(screenvar) - FZ,Float(0.0));
greject = (F1 - epsil* (ah*F1 - bh*epsil*F2))/8.0; // 1./(42.392 - FZ);
/*
std::cout << " yy = "<<epsil<<std::endl;
std::cout << " F1/(...) "<<F1/(42.392 - FZ)<<std::endl;
std::cout << " F2/(...) "<<F2/(42.392 - FZ)<<std::endl;
std::cout << " (42.392 - FZ) " << (42.392 - FZ) <<std::endl;
*/
} else { // kinetic_energy < t_low
Float al0 = al00 + ZZ* (al01 + ZZ* al02);
Float al1 = al10 + ZZ* (al11 + ZZ* al12);
Float al2 = al20 + ZZ* (al21 + ZZ* al22);
Float bl0 = bl00 + ZZ* (bl01 + ZZ* bl02);
Float bl1 = bl10 + ZZ* (bl11 + ZZ* bl12);
Float bl2 = bl20 + ZZ* (bl21 + ZZ* bl22);
ah = al0 + al1*U + al2*U2;
bh = bl0 + bl1*U + bl2*U2;
Float x = gamma_energy/kinetic_energy;
greject = (1.0 + x* (ah + bh*x));
/*
// Compute the maximum of the rejection function
grejmax = max(1. + xmin* (ah + bh*xmin), 1.+ah+bh);
Float xm = -ah/(2.*bh);
if ( xmin < xm && xm < xmax) grejmax = max(grejmax, 1.+ xm* (ah + bh*xm));
*/
}
return greject;
}
__device__
void SampleSecondaries(
GPUTrack* track,
// MDJ: replaces MaterialCutsCouple kinda wrong, be aware...
const ParticlePars* particle,
const MaterialPars* couple,
const int index,
const int child_index,
hiprandState_t *rng_state,
Float cut_energy = 2*kElectronMass,
Float max_energy = FLT_MAX) {
Float kin_energy = track->momentum[3] - particle->mass;
if (kin_energy < low_kinetic_energy) return;
Float cut = min(cut_energy, kin_energy);
Float emax = min(max_energy, kin_energy);
/*
printf("eKin part: %f\n", kineticEnergy);
printf("lowKinThreshold %f\n", lowKinEnergy);
printf("cut %f\n",cut);
printf("emax %f\n", emax);
*/
if (cut >= emax) return;
// CUDA_GEANT4Bremsstrahlung_SetupForMaterial(couple,kin_energy);
Float density_factor = couple->electron_density * kMigdalConstant;
// Calculate threshold for density effect
Float kinetic_energy = kin_energy;
Float total_energy = kinetic_energy + particle->mass;
Float density_correction = density_factor * total_energy * total_energy;
// in VEmModel.cc get element based on cross section
//const Element* elm = SelectRandomAtom(couple,particle,kineticEnergy,cut,emax);
Float Z = couple->atomic_number;
int iz = int(Z);
// z13 = nist->GetZ13(iz);
Float z13 = pow(iz, 0.33333333333333);
// Float z23 = z13*z13;
Float lnZ = logf(iz);
// lnZ = nist->GetLOGZ(iz);
Float fel = fac_fel_d - lnZ/3.0;
Float finel = fac_finel_d - 2.0*lnZ/3.0;
Float coulomb_correction = couple->coulomb_correction;
/*
printf("------------------------\n");
printf("fCoulomb :%f\n", fCoulomb);
printf("------------------------\n");
*/
Float f_max = fel-coulomb_correction + finel/Z + (1.0 + 1.0/Z)/12.0;
Float xmin = logf(cut*cut + density_correction);
Float xmax = logf(emax*emax + density_correction);
Float gamma_energy, f, x;
do {
x = exp(xmin + hiprand_uniform(rng_state)*(xmax - xmin)) - density_correction;
if (x < 0.0) x = 0.0;
gamma_energy = sqrt(x);
// f = CUDA_GEANT4Bremsstrahlung_ComputeDXSectionPerAtom(gamma_energy,total_energy,Z);
if (gamma_energy < 0.0) {
f = 0;
} else {
f = ComputeParametrizedDXSectionPerAtom(kinetic_energy,gamma_energy,Z);
}
} while (f < f_max * hiprand_uniform(rng_state));
// Angles of the emitted gamma. ( Z - axis along the parent particle)
// Use general interface
GPUThreeVector gamma_momentum;
CUDA_ModifiedTsai_SampleDirection(gamma_momentum,track,particle->mass,rng_state);
GPUTrack *gamma = &tracks[child_index];
gamma->particle_id = 22;
gamma->particle_index = photon_index;
gamma->charge = 0;
FourVector_Set(gamma->momentum,gamma_momentum,gamma_energy);
Float total_momentum = sqrt(kinetic_energy*(total_energy + kElectronMass));
GPUThreeVector direction;
ThreeVector_Normalized(direction,track->momentum);
ThreeVector_Extend(direction,total_momentum);
ThreeVector_Subtract(direction,gamma_momentum,direction);
ThreeVector_Normalize(direction);
// Energy and momentum of primary
Float final_energy = total_energy - gamma_energy;
ThreeVector_Extend(direction,sqrt(final_energy*final_energy - kElectronMass*kElectronMass));
GPUFourVector momentum;
FourVector_Set(momentum,direction,final_energy);
if (gamma_energy > secondary_threshold) {
// Stop tracking and create new secondary instead of primary
/*
fParticleChange->ProposeTrackStatus(fStopAndKill);
fParticleChange->SetProposedKineticEnergy(0.0);
*/
GPUTrack *electron = &tracks[child_index-1];
electron->particle_id = 11;
electron->particle_index = electron_index;
electron->charge = -1;
FourVector_Copy(electron->momentum,momentum);
SpawnChild(*track,child_index-1,kElectronMass);
// Force track to kill itself...
CUDA_SetEnergy(track->momentum,particle->mass,0.0,index);
} else {
// Just update momentum and energy...
CUDA_SetMomentum(track->momentum,particle->mass,momentum,index);
}
SpawnChild(*track,child_index);
} // End SampleSecondaries()
} // End unnamed namespace
__device__
void CUDA_GEANT4Bremsstrahlung(
GPUTrack* track,
const ParticlePars* particle,
const MaterialPars* material,
const Float dl,
hiprandState_t *rng_state,
const int index) {
if (track->particle_id != 11) return;
int child_index = CanHaveTwoChildren(index);
// Must be able to spawn two children
if (child_index == -1) {
UpdateState(index,WAITING);
return;
}
// Use radiation length for probability
Float chance_to_interact = 1 - exp(-dl/material->radiation_length);
if (hiprand_uniform(rng_state) > chance_to_interact) return;
SampleSecondaries(track,particle,material,index,child_index,rng_state);
}
} // End namespace na63 | 65886ae71281999da6fc7d98771d1f92ac58e13e.cu | #include "Geometry/LibraryCUDA.cuh"
#include "Geometry/Constants.hh"
#include "Simulation/ScreenFunctionsCUDA.cuh"
#include "Simulation/AngularDistributionCUDA.cuh"
#include "Simulation/Spawner.cuh"
#include "Simulation/TrackGPU.cuh"
#include <cfloat>
namespace na63 {
namespace {
__device__ __constant__ Float
ah10 = 4.67733E+00, ah11 =-6.19012E-01, ah12 = 2.02225E-02,
ah20 =-7.34101E+00, ah21 = 1.00462E+00, ah22 =-3.20985E-02,
ah30 = 2.93119E+00, ah31 =-4.03761E-01, ah32 = 1.25153E-02;
__device__ __constant__ Float
bh10 = 4.23071E+00, bh11 =-6.10995E-01, bh12 = 1.95531E-02,
bh20 =-7.12527E+00, bh21 = 9.69160E-01, bh22 =-2.74255E-02,
bh30 = 2.69925E+00, bh31 =-3.63283E-01, bh32 = 9.55316E-03;
__device__ __constant__ Float
al00 =-2.05398E+00, al01 = 2.38815E-02, al02 = 5.25483E-04,
al10 =-7.69748E-02, al11 =-6.91499E-02, al12 = 2.22453E-03,
al20 = 4.06463E-02, al21 =-1.01281E-02, al22 = 3.40919E-04;
__device__ __constant__ Float
bl00 = 1.04133E+00, bl01 =-9.43291E-03, bl02 =-4.54758E-04,
bl10 = 1.19253E-01, bl11 = 4.07467E-02, bl12 =-1.30718E-03,
bl20 =-1.59391E-02, bl21 = 7.27752E-03, bl22 =-1.94405E-04;
__device__ __constant__ Float t_low = 1.0;
__device__ __constant__ Float low_kinetic_energy = 10.0 * MeV;
__device__ __constant__ Float fac_fel_d = 5.21575064289;
__device__ __constant__ Float fac_finel_d = 7.08506429395;
__device__
Float ComputeParametrizedDXSectionPerAtom(Float kinetic_energy, Float gamma_energy, Float Z) {
Float lnZ = logf(Z); // 3.*(anElement->GetIonisation()->GetlogZ3());
Float FZ = lnZ* (4.- 0.55*lnZ);
Float ZZ = pow(Float(Z*(Z+1.)),Float(1.0/3.0)); // anElement->GetIonisation()->GetZZ3();
Float Z3 = pow(Z,Float(1.0/3.0)); // (anElement->GetIonisation()->GetZ3())
Float total_energy = kinetic_energy + kElectronMass;
// Float x, epsil, greject, migdal, grejmax, q;
Float epsil, greject;
Float U = logf(kinetic_energy/kElectronMass);
Float U2 = U*U;
// Precalculated parameters
Float ah, bh;
if (kinetic_energy > t_low) {
Float ah1 = ah10 + ZZ * (ah11 + ZZ * ah12);
Float ah2 = ah20 + ZZ * (ah21 + ZZ * ah22);
Float ah3 = ah30 + ZZ * (ah31 + ZZ * ah32);
Float bh1 = bh10 + ZZ * (bh11 + ZZ * bh12);
Float bh2 = bh20 + ZZ * (bh21 + ZZ * bh22);
Float bh3 = bh30 + ZZ * (bh31 + ZZ * bh32);
ah = 1.0 + (ah1*U2 + ah2*U + ah3) / (U2*U);
bh = 0.75 + (bh1*U2 + bh2*U + bh3) / (U2*U);
// Limit of the screening variable
Float screenfac = 136.0*kElectronMass/(Z3*total_energy);
// epsil = x*kinetic_energy/total_energy;
epsil = gamma_energy/total_energy;
Float screenvar = screenfac*epsil/(1.0-epsil);
Float F1 = max(CUDA_ScreenFunction1(screenvar) - FZ,Float(0.0));
Float F2 = max(CUDA_ScreenFunction2(screenvar) - FZ,Float(0.0));
greject = (F1 - epsil* (ah*F1 - bh*epsil*F2))/8.0; // 1./(42.392 - FZ);
/*
std::cout << " yy = "<<epsil<<std::endl;
std::cout << " F1/(...) "<<F1/(42.392 - FZ)<<std::endl;
std::cout << " F2/(...) "<<F2/(42.392 - FZ)<<std::endl;
std::cout << " (42.392 - FZ) " << (42.392 - FZ) <<std::endl;
*/
} else { // kinetic_energy < t_low
Float al0 = al00 + ZZ* (al01 + ZZ* al02);
Float al1 = al10 + ZZ* (al11 + ZZ* al12);
Float al2 = al20 + ZZ* (al21 + ZZ* al22);
Float bl0 = bl00 + ZZ* (bl01 + ZZ* bl02);
Float bl1 = bl10 + ZZ* (bl11 + ZZ* bl12);
Float bl2 = bl20 + ZZ* (bl21 + ZZ* bl22);
ah = al0 + al1*U + al2*U2;
bh = bl0 + bl1*U + bl2*U2;
Float x = gamma_energy/kinetic_energy;
greject = (1.0 + x* (ah + bh*x));
/*
// Compute the maximum of the rejection function
grejmax = max(1. + xmin* (ah + bh*xmin), 1.+ah+bh);
Float xm = -ah/(2.*bh);
if ( xmin < xm && xm < xmax) grejmax = max(grejmax, 1.+ xm* (ah + bh*xm));
*/
}
return greject;
}
__device__
void SampleSecondaries(
GPUTrack* track,
// MDJ: replaces MaterialCutsCouple kinda wrong, be aware...
const ParticlePars* particle,
const MaterialPars* couple,
const int index,
const int child_index,
curandState *rng_state,
Float cut_energy = 2*kElectronMass,
Float max_energy = FLT_MAX) {
Float kin_energy = track->momentum[3] - particle->mass;
if (kin_energy < low_kinetic_energy) return;
Float cut = min(cut_energy, kin_energy);
Float emax = min(max_energy, kin_energy);
/*
printf("eKin part: %f\n", kineticEnergy);
printf("lowKinThreshold %f\n", lowKinEnergy);
printf("cut %f\n",cut);
printf("emax %f\n", emax);
*/
if (cut >= emax) return;
// CUDA_GEANT4Bremsstrahlung_SetupForMaterial(couple,kin_energy);
Float density_factor = couple->electron_density * kMigdalConstant;
// Calculate threshold for density effect
Float kinetic_energy = kin_energy;
Float total_energy = kinetic_energy + particle->mass;
Float density_correction = density_factor * total_energy * total_energy;
// in VEmModel.cc get element based on cross section
//const Element* elm = SelectRandomAtom(couple,particle,kineticEnergy,cut,emax);
Float Z = couple->atomic_number;
int iz = int(Z);
// z13 = nist->GetZ13(iz);
Float z13 = pow(iz, 0.33333333333333);
// Float z23 = z13*z13;
Float lnZ = logf(iz);
// lnZ = nist->GetLOGZ(iz);
Float fel = fac_fel_d - lnZ/3.0;
Float finel = fac_finel_d - 2.0*lnZ/3.0;
Float coulomb_correction = couple->coulomb_correction;
/*
printf("------------------------\n");
printf("fCoulomb :%f\n", fCoulomb);
printf("------------------------\n");
*/
Float f_max = fel-coulomb_correction + finel/Z + (1.0 + 1.0/Z)/12.0;
Float xmin = logf(cut*cut + density_correction);
Float xmax = logf(emax*emax + density_correction);
Float gamma_energy, f, x;
do {
x = exp(xmin + curand_uniform(rng_state)*(xmax - xmin)) - density_correction;
if (x < 0.0) x = 0.0;
gamma_energy = sqrt(x);
// f = CUDA_GEANT4Bremsstrahlung_ComputeDXSectionPerAtom(gamma_energy,total_energy,Z);
if (gamma_energy < 0.0) {
f = 0;
} else {
f = ComputeParametrizedDXSectionPerAtom(kinetic_energy,gamma_energy,Z);
}
} while (f < f_max * curand_uniform(rng_state));
// Angles of the emitted gamma. ( Z - axis along the parent particle)
// Use general interface
GPUThreeVector gamma_momentum;
CUDA_ModifiedTsai_SampleDirection(gamma_momentum,track,particle->mass,rng_state);
GPUTrack *gamma = &tracks[child_index];
gamma->particle_id = 22;
gamma->particle_index = photon_index;
gamma->charge = 0;
FourVector_Set(gamma->momentum,gamma_momentum,gamma_energy);
Float total_momentum = sqrt(kinetic_energy*(total_energy + kElectronMass));
GPUThreeVector direction;
ThreeVector_Normalized(direction,track->momentum);
ThreeVector_Extend(direction,total_momentum);
ThreeVector_Subtract(direction,gamma_momentum,direction);
ThreeVector_Normalize(direction);
// Energy and momentum of primary
Float final_energy = total_energy - gamma_energy;
ThreeVector_Extend(direction,sqrt(final_energy*final_energy - kElectronMass*kElectronMass));
GPUFourVector momentum;
FourVector_Set(momentum,direction,final_energy);
if (gamma_energy > secondary_threshold) {
// Stop tracking and create new secondary instead of primary
/*
fParticleChange->ProposeTrackStatus(fStopAndKill);
fParticleChange->SetProposedKineticEnergy(0.0);
*/
GPUTrack *electron = &tracks[child_index-1];
electron->particle_id = 11;
electron->particle_index = electron_index;
electron->charge = -1;
FourVector_Copy(electron->momentum,momentum);
SpawnChild(*track,child_index-1,kElectronMass);
// Force track to kill itself...
CUDA_SetEnergy(track->momentum,particle->mass,0.0,index);
} else {
// Just update momentum and energy...
CUDA_SetMomentum(track->momentum,particle->mass,momentum,index);
}
SpawnChild(*track,child_index);
} // End SampleSecondaries()
} // End unnamed namespace
__device__
void CUDA_GEANT4Bremsstrahlung(
GPUTrack* track,
const ParticlePars* particle,
const MaterialPars* material,
const Float dl,
curandState *rng_state,
const int index) {
if (track->particle_id != 11) return;
int child_index = CanHaveTwoChildren(index);
// Must be able to spawn two children
if (child_index == -1) {
UpdateState(index,WAITING);
return;
}
// Use radiation length for probability
Float chance_to_interact = 1 - exp(-dl/material->radiation_length);
if (curand_uniform(rng_state) > chance_to_interact) return;
SampleSecondaries(track,particle,material,index,child_index,rng_state);
}
} // End namespace na63 |
85de5ba936185a2a4c47c108dba1d6d97dcabae5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype, typename Mtype>
void CuDNNConvolutionLayer<Dtype,Mtype>::Forward_gpu(
const vector<Blob<Dtype,Mtype>*>& bottom, const vector<Blob<Dtype,Mtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i],
bottom_data + bottom_offset_ * g,
filter_desc_,
weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g],
workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i],
top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
#if CUDNN_VERSION_MIN(4, 0, 0)
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#else
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#endif
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype, typename Mtype>
void CuDNNConvolutionLayer<Dtype,Mtype>::Backward_gpu(const vector<Blob<Dtype,Mtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype,Mtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData_v3(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| 85de5ba936185a2a4c47c108dba1d6d97dcabae5.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype, typename Mtype>
void CuDNNConvolutionLayer<Dtype,Mtype>::Forward_gpu(
const vector<Blob<Dtype,Mtype>*>& bottom, const vector<Blob<Dtype,Mtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i],
bottom_data + bottom_offset_ * g,
filter_desc_,
weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g],
workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i],
top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
#if CUDNN_VERSION_MIN(4, 0, 0)
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#else
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#endif
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype, typename Mtype>
void CuDNNConvolutionLayer<Dtype,Mtype>::Backward_gpu(const vector<Blob<Dtype,Mtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype,Mtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData_v3(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
580493d3650e3e14f630a8b5c5af3933ac0838e7.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "Float16.cuh"
#include "nvidia/fp16_emu.cuh"
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#ifdef FAISS_USE_FLOAT16
namespace faiss { namespace gpu {
bool getDeviceSupportsFloat16Math(int device) {
const auto& prop = getDeviceProperties(device);
return (prop.major >= 6 ||
(prop.major == 5 && prop.minor >= 3));
}
struct FloatToHalf {
__device__ half operator()(float v) const { return __float2half(v); }
};
struct HalfToFloat {
__device__ float operator()(half v) const { return __half2float(v); }
};
void runConvertToFloat16(half* out,
const float* in,
size_t num,
hipStream_t stream) {
thrust::transform(thrust::hip::par.on(stream),
in, in + num, out, FloatToHalf());
}
void runConvertToFloat32(float* out,
const half* in,
size_t num,
hipStream_t stream) {
thrust::transform(thrust::hip::par.on(stream),
in, in + num, out, HalfToFloat());
}
__half hostFloat2Half(float a) {
#if TORCH_HIP_VERSION >= 9000
__half_raw raw;
raw.x = cpu_float2half_rn(a).x;
return __half(raw);
#else
__half h;
h.x = cpu_float2half_rn(a).x;
return h;
#endif
}
} } // namespace
#endif // FAISS_USE_FLOAT16
| 580493d3650e3e14f630a8b5c5af3933ac0838e7.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "Float16.cuh"
#include "nvidia/fp16_emu.cuh"
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#ifdef FAISS_USE_FLOAT16
namespace faiss { namespace gpu {
bool getDeviceSupportsFloat16Math(int device) {
const auto& prop = getDeviceProperties(device);
return (prop.major >= 6 ||
(prop.major == 5 && prop.minor >= 3));
}
struct FloatToHalf {
__device__ half operator()(float v) const { return __float2half(v); }
};
struct HalfToFloat {
__device__ float operator()(half v) const { return __half2float(v); }
};
void runConvertToFloat16(half* out,
const float* in,
size_t num,
cudaStream_t stream) {
thrust::transform(thrust::cuda::par.on(stream),
in, in + num, out, FloatToHalf());
}
void runConvertToFloat32(float* out,
const half* in,
size_t num,
cudaStream_t stream) {
thrust::transform(thrust::cuda::par.on(stream),
in, in + num, out, HalfToFloat());
}
__half hostFloat2Half(float a) {
#if CUDA_VERSION >= 9000
__half_raw raw;
raw.x = cpu_float2half_rn(a).x;
return __half(raw);
#else
__half h;
h.x = cpu_float2half_rn(a).x;
return h;
#endif
}
} } // namespace
#endif // FAISS_USE_FLOAT16
|
cd695c9e48df0173ee0ec7eb774c7b0a6da565ff.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "lab1.h"
#ifdef Debug
#include "cv.h"
#include "highgui.h"
#endif
using namespace std;
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
void YUV4202RGB(int W, int H, const uint8_t *yuv, uint8_t *rgb)
{
int size = W * H;
for (int i = 0; i < H; i++)
{
for (int j = 0; j < W; j++)
{
float Y = yuv[i * W + j];
float U = yuv[(int)(size + (i / 2)*(W / 2) + j / 2)];
float V = yuv[(int)(size * 1.25 + (i / 2)*(W / 2) + j / 2)];
float R = Y + 1.402 * (V - 128);
float G = Y - 0.344 * (U - 128) - 0.714 * (V - 128);
float B = Y + 1.772 * (U - 128);
rgb[(i * W + j) * 3] = R;
rgb[(i * W + j) * 3 + 1] = G;
rgb[(i * W + j) * 3 + 2] = B;
}
}
}
void RGB2YUV420(int W, int H, const uint8_t *rgb, uint8_t *yuv)
{
int size = W * H;
for (int i = 0; i < H; i++)
{
for (int j = 0; j < W; j++)
{
float R = rgb[(i * W + j) * 3];
float G = rgb[(i * W + j) * 3 + 1];
float B = rgb[(i * W + j) * 3 + 2];
float Y = 0.299 * R + 0.587 * G + 0.114 * B;
float U = -0.169 * R + -0.331 * G + 0.5 * B + 128;
float V = 0.5 * R + -0.419 * G + -0.081 * B + 128;
yuv[i * W + j] = Y;
yuv[(int)(size + (i / 2)*(W / 2) + j / 2)] = U;
yuv[(int)(size * 1.25 + (i / 2)*(W / 2) + j / 2)] = V;
}
}
}
int main(int argc, char **argv)
{
Lab1VideoGenerator g;
Lab1VideoInfo i;
g.get_info(i);
if (i.w == 0 || i.h == 0 || i.n_frame == 0 || i.fps_n == 0 || i.fps_d == 0) {
puts("Cannot be zero");
abort();
} else if (i.w%2 != 0 || i.h%2 != 0) {
puts("Only even frame size is supported");
abort();
}
unsigned FRAME_SIZE = i.w*i.h;
//unsigned FRAME_SIZE = i.w*i.h * 3/2;
MemoryBuffer<uint8_t> frameb(FRAME_SIZE * 3/2);
auto frames = frameb.CreateSync(FRAME_SIZE * 3/2);
FILE *fp = fopen("result.y4m", "wb");
fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d);
//uint8_t *YUV420Data = (uint8_t*)malloc(sizeof(uint8_t) * i.w * i.h * 3 / 2);
//uint8_t *rgbData = (uint8_t*)malloc(sizeof(uint8_t) * i.w * i.h * 3);
for (unsigned j = 0; j < i.n_frame; ++j) {
fputs("FRAME\n", fp);
g.Generate(frames.get_gpu_wo());
#ifdef Debug
rgbData = (uint8_t*)malloc(sizeof(uint8_t) * i.w * i.h * 3);
YUV4202RGB(i.w, i.h, frames.get_cpu_ro(), rgbData);
//uint8_t *YUV420Data = (uint8_t*)malloc(sizeof(uint8_t) * i.w * i.h * 3 / 2);
//RGB2YUV420(i.w, i.h, frames.get_cpu_ro(), YUV420Data);
//cv::Mat img(i.h, i.w, CV_8UC3, rgbData);
//RGB2YUV420(i.w, i.h, frames.get_cpu_ro(), YUV420Data);
cv::Mat img(i.h, i.w, CV_8UC3, rgbData);
cv::cvtColor(img, img, CV_RGB2BGR);
cv::imshow("TEST", img);
cv::waitKey(1.0/24.0 * 100);
#else
//RGB2YUV420(i.w, i.h, frames.get_cpu_ro(), YUV420Data);
fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE * 3 / 2, fp);
#endif
}
//free(rgbData);
//free(YUV420Data);
fclose(fp);
system("pause");
return 0;
}
| cd695c9e48df0173ee0ec7eb774c7b0a6da565ff.cu | #include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "lab1.h"
#ifdef Debug
#include "cv.h"
#include "highgui.h"
#endif
using namespace std;
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
void YUV4202RGB(int W, int H, const uint8_t *yuv, uint8_t *rgb)
{
int size = W * H;
for (int i = 0; i < H; i++)
{
for (int j = 0; j < W; j++)
{
float Y = yuv[i * W + j];
float U = yuv[(int)(size + (i / 2)*(W / 2) + j / 2)];
float V = yuv[(int)(size * 1.25 + (i / 2)*(W / 2) + j / 2)];
float R = Y + 1.402 * (V - 128);
float G = Y - 0.344 * (U - 128) - 0.714 * (V - 128);
float B = Y + 1.772 * (U - 128);
rgb[(i * W + j) * 3] = R;
rgb[(i * W + j) * 3 + 1] = G;
rgb[(i * W + j) * 3 + 2] = B;
}
}
}
void RGB2YUV420(int W, int H, const uint8_t *rgb, uint8_t *yuv)
{
int size = W * H;
for (int i = 0; i < H; i++)
{
for (int j = 0; j < W; j++)
{
float R = rgb[(i * W + j) * 3];
float G = rgb[(i * W + j) * 3 + 1];
float B = rgb[(i * W + j) * 3 + 2];
float Y = 0.299 * R + 0.587 * G + 0.114 * B;
float U = -0.169 * R + -0.331 * G + 0.5 * B + 128;
float V = 0.5 * R + -0.419 * G + -0.081 * B + 128;
yuv[i * W + j] = Y;
yuv[(int)(size + (i / 2)*(W / 2) + j / 2)] = U;
yuv[(int)(size * 1.25 + (i / 2)*(W / 2) + j / 2)] = V;
}
}
}
int main(int argc, char **argv)
{
Lab1VideoGenerator g;
Lab1VideoInfo i;
g.get_info(i);
if (i.w == 0 || i.h == 0 || i.n_frame == 0 || i.fps_n == 0 || i.fps_d == 0) {
puts("Cannot be zero");
abort();
} else if (i.w%2 != 0 || i.h%2 != 0) {
puts("Only even frame size is supported");
abort();
}
unsigned FRAME_SIZE = i.w*i.h;
//unsigned FRAME_SIZE = i.w*i.h * 3/2;
MemoryBuffer<uint8_t> frameb(FRAME_SIZE * 3/2);
auto frames = frameb.CreateSync(FRAME_SIZE * 3/2);
FILE *fp = fopen("result.y4m", "wb");
fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d);
//uint8_t *YUV420Data = (uint8_t*)malloc(sizeof(uint8_t) * i.w * i.h * 3 / 2);
//uint8_t *rgbData = (uint8_t*)malloc(sizeof(uint8_t) * i.w * i.h * 3);
for (unsigned j = 0; j < i.n_frame; ++j) {
fputs("FRAME\n", fp);
g.Generate(frames.get_gpu_wo());
#ifdef Debug
rgbData = (uint8_t*)malloc(sizeof(uint8_t) * i.w * i.h * 3);
YUV4202RGB(i.w, i.h, frames.get_cpu_ro(), rgbData);
//uint8_t *YUV420Data = (uint8_t*)malloc(sizeof(uint8_t) * i.w * i.h * 3 / 2);
//RGB2YUV420(i.w, i.h, frames.get_cpu_ro(), YUV420Data);
//cv::Mat img(i.h, i.w, CV_8UC3, rgbData);
//RGB2YUV420(i.w, i.h, frames.get_cpu_ro(), YUV420Data);
cv::Mat img(i.h, i.w, CV_8UC3, rgbData);
cv::cvtColor(img, img, CV_RGB2BGR);
cv::imshow("TEST", img);
cv::waitKey(1.0/24.0 * 100);
#else
//RGB2YUV420(i.w, i.h, frames.get_cpu_ro(), YUV420Data);
fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE * 3 / 2, fp);
#endif
}
//free(rgbData);
//free(YUV420Data);
fclose(fp);
system("pause");
return 0;
}
|
341d16fba6500d5a5745b639514835d02382a5aa.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <sparse.hpp>
#include <arith.hpp>
#include <cast.hpp>
#include <common/err_common.hpp>
#include <complex.hpp>
#include <copy.hpp>
#include <cusparse.hpp>
#include <kernel/sparse.hpp>
#include <lookup.hpp>
#include <math.hpp>
#include <platform.hpp>
#include <where.hpp>
#include <stdexcept>
#include <string>
namespace cuda {
using namespace common;
// hipsparseStatus_t hipsparseZcsr2csc(hipsparseHandle_t handle,
// int m, int n, int nnz,
// const hipDoubleComplex *csrSortedVal,
// const int *csrSortedRowPtr, const int
// *csrSortedColInd, hipDoubleComplex
// *cscSortedVal, int *cscSortedRowInd, int
// *cscSortedColPtr, hipsparseAction_t
// copyValues, hipsparseIndexBase_t idxBase);
template<typename T>
struct csr2csc_func_def_t {
typedef hipsparseStatus_t (*csr2csc_func_def)(hipsparseHandle_t, int, int,
int, const T *, const int *,
const int *, T *, int *, int *,
hipsparseAction_t,
hipsparseIndexBase_t);
};
// hipsparseStatus_t hipsparseZdense2csr(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *A, int lda,
// const int *nnzPerRow,
// hipDoubleComplex *csrValA,
// int *csrRowPtrA, int *csrColIndA)
template<typename T>
struct dense2csr_func_def_t {
typedef hipsparseStatus_t (*dense2csr_func_def)(hipsparseHandle_t, int, int,
const hipsparseMatDescr_t,
const T *, int, const int *,
T *, int *, int *);
};
// hipsparseStatus_t hipsparseZdense2csc(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *A, int lda,
// const int *nnzPerCol,
// hipDoubleComplex *cscValA,
// int *cscRowIndA, int *cscColPtrA)
template<typename T>
struct dense2csc_func_def_t {
typedef hipsparseStatus_t (*dense2csc_func_def)(hipsparseHandle_t, int, int,
const hipsparseMatDescr_t,
const T *, int, const int *,
T *, int *, int *);
};
// hipsparseStatus_t hipsparseZcsr2dense(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *csrValA,
// const int *csrRowPtrA,
// const int *csrColIndA,
// hipDoubleComplex *A, int lda)
template<typename T>
struct csr2dense_func_def_t {
typedef hipsparseStatus_t (*csr2dense_func_def)(hipsparseHandle_t, int, int,
const hipsparseMatDescr_t,
const T *, const int *,
const int *, T *, int);
};
// hipsparseStatus_t hipsparseZcsc2dense(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *cscValA,
// const int *cscRowIndA,
// const int *cscColPtrA,
// hipDoubleComplex *A, int lda)
template<typename T>
struct csc2dense_func_def_t {
typedef hipsparseStatus_t (*csc2dense_func_def)(hipsparseHandle_t, int, int,
const hipsparseMatDescr_t,
const T *, const int *,
const int *, T *, int);
};
// hipsparseStatus_t hipsparseZnnz(hipsparseHandle_t handle,
// hipsparseDirection_t dirA,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *A, int lda,
// int *nnzPerRowColumn,
// int *nnzTotalDevHostPtr)
template<typename T>
struct nnz_func_def_t {
typedef hipsparseStatus_t (*nnz_func_def)(hipsparseHandle_t,
hipsparseDirection_t, int, int,
const hipsparseMatDescr_t,
const T *, int, int *, int *);
};
// hipsparseStatus_t hipsparseZgthr(hipsparseHandle_t handle,
// int nnz,
// const hipDoubleComplex *y,
// hipDoubleComplex *xVal, const int *xInd,
// hipsparseIndexBase_t idxBase)
template<typename T>
struct gthr_func_def_t {
typedef hipsparseStatus_t (*gthr_func_def)(hipsparseHandle_t, int, const T *,
T *, const int *,
hipsparseIndexBase_t);
};
#define SPARSE_FUNC_DEF(FUNC) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def FUNC##_func();
#define SPARSE_FUNC(FUNC, TYPE, PREFIX) \
template<> \
typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() { \
return (FUNC##_func_def_t<TYPE>::FUNC##_func_def) & \
cusparse##PREFIX##FUNC; \
}
SPARSE_FUNC_DEF(csr2csc)
SPARSE_FUNC(csr2csc, float, S)
SPARSE_FUNC(csr2csc, double, D)
SPARSE_FUNC(csr2csc, cfloat, C)
SPARSE_FUNC(csr2csc, cdouble, Z)
SPARSE_FUNC_DEF(dense2csr)
SPARSE_FUNC(dense2csr, float, S)
SPARSE_FUNC(dense2csr, double, D)
SPARSE_FUNC(dense2csr, cfloat, C)
SPARSE_FUNC(dense2csr, cdouble, Z)
SPARSE_FUNC_DEF(dense2csc)
SPARSE_FUNC(dense2csc, float, S)
SPARSE_FUNC(dense2csc, double, D)
SPARSE_FUNC(dense2csc, cfloat, C)
SPARSE_FUNC(dense2csc, cdouble, Z)
SPARSE_FUNC_DEF(csr2dense)
SPARSE_FUNC(csr2dense, float, S)
SPARSE_FUNC(csr2dense, double, D)
SPARSE_FUNC(csr2dense, cfloat, C)
SPARSE_FUNC(csr2dense, cdouble, Z)
SPARSE_FUNC_DEF(csc2dense)
SPARSE_FUNC(csc2dense, float, S)
SPARSE_FUNC(csc2dense, double, D)
SPARSE_FUNC(csc2dense, cfloat, C)
SPARSE_FUNC(csc2dense, cdouble, Z)
SPARSE_FUNC_DEF(nnz)
SPARSE_FUNC(nnz, float, S)
SPARSE_FUNC(nnz, double, D)
SPARSE_FUNC(nnz, cfloat, C)
SPARSE_FUNC(nnz, cdouble, Z)
SPARSE_FUNC_DEF(gthr)
SPARSE_FUNC(gthr, float, S)
SPARSE_FUNC(gthr, double, D)
SPARSE_FUNC(gthr, cfloat, C)
SPARSE_FUNC(gthr, cdouble, Z)
#undef SPARSE_FUNC
#undef SPARSE_FUNC_DEF
// Partial template specialization of sparseConvertDenseToStorage for COO
// However, template specialization is not allowed
template<typename T>
SparseArray<T> sparseConvertDenseToCOO(const Array<T> &in) {
Array<uint> nonZeroIdx_ = where<T>(in);
Array<int> nonZeroIdx = cast<int, uint>(nonZeroIdx_);
dim_t nNZ = nonZeroIdx.elements();
Array<int> constDim = createValueArray<int>(dim4(nNZ), in.dims()[0]);
Array<int> rowIdx =
arithOp<int, af_mod_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<int> colIdx =
arithOp<int, af_div_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<T> values = copyArray<T>(in);
values.modDims(dim4(values.elements()));
values = lookup<T, int>(values, nonZeroIdx, 0);
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx,
AF_STORAGE_COO);
}
template<typename T, af_storage stype>
SparseArray<T> sparseConvertDenseToStorage(const Array<T> &in) {
const int M = in.dims()[0];
const int N = in.dims()[1];
// Create Sparse Matrix Descriptor
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr));
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
int d = -1;
hipsparseDirection_t dir = HIPSPARSE_DIRECTION_ROW;
if (stype == AF_STORAGE_CSR) {
d = M;
dir = HIPSPARSE_DIRECTION_ROW;
} else {
d = N;
dir = HIPSPARSE_DIRECTION_COLUMN;
}
Array<int> nnzPerDir = createEmptyArray<int>(dim4(d));
int nNZ = -1;
CUSPARSE_CHECK(nnz_func<T>()(sparseHandle(), dir, M, N, descr, in.get(),
in.strides()[1], nnzPerDir.get(), &nNZ));
Array<int> rowIdx = createEmptyArray<int>(dim4());
Array<int> colIdx = createEmptyArray<int>(dim4());
if (stype == AF_STORAGE_CSR) {
rowIdx = createEmptyArray<int>(dim4(M + 1));
colIdx = createEmptyArray<int>(dim4(nNZ));
} else {
rowIdx = createEmptyArray<int>(dim4(nNZ));
colIdx = createEmptyArray<int>(dim4(N + 1));
}
Array<T> values = createEmptyArray<T>(dim4(nNZ));
if (stype == AF_STORAGE_CSR)
CUSPARSE_CHECK(dense2csr_func<T>()(
sparseHandle(), M, N, descr, in.get(), in.strides()[1],
nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get()));
else
CUSPARSE_CHECK(dense2csc_func<T>()(
sparseHandle(), M, N, descr, in.get(), in.strides()[1],
nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get()));
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(hipsparseDestroyMatDescr(descr));
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx,
stype);
}
// Partial template specialization of sparseConvertStorageToDense for COO
// However, template specialization is not allowed
template<typename T>
Array<T> sparseConvertCOOToDense(const SparseArray<T> &in) {
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
const Array<T> values = in.getValues();
const Array<int> rowIdx = in.getRowIdx();
const Array<int> colIdx = in.getColIdx();
kernel::coo2dense<T>(dense, values, rowIdx, colIdx);
return dense;
}
template<typename T, af_storage stype>
Array<T> sparseConvertStorageToDense(const SparseArray<T> &in) {
// Create Sparse Matrix Descriptor
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr));
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
int M = in.dims()[0];
int N = in.dims()[1];
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
int d_strides1 = dense.strides()[1];
if (stype == AF_STORAGE_CSR)
CUSPARSE_CHECK(
csr2dense_func<T>()(sparseHandle(), M, N, descr,
in.getValues().get(), in.getRowIdx().get(),
in.getColIdx().get(), dense.get(), d_strides1));
else
CUSPARSE_CHECK(
csc2dense_func<T>()(sparseHandle(), M, N, descr,
in.getValues().get(), in.getRowIdx().get(),
in.getColIdx().get(), dense.get(), d_strides1));
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(hipsparseDestroyMatDescr(descr));
return dense;
}
template<typename T, af_storage dest, af_storage src>
SparseArray<T> sparseConvertStorageToStorage(const SparseArray<T> &in) {
using std::shared_ptr;
in.eval();
int nNZ = in.getNNZ();
SparseArray<T> converted = createEmptySparseArray<T>(in.dims(), nNZ, dest);
if (src == AF_STORAGE_CSR && dest == AF_STORAGE_COO) {
// Copy colIdx as is
CUDA_CHECK(
hipMemcpyAsync(converted.getColIdx().get(), in.getColIdx().get(),
in.getColIdx().elements() * sizeof(int),
hipMemcpyDeviceToDevice, cuda::getActiveStream()));
// cusparse function to expand compressed row into coordinate
CUSPARSE_CHECK(hipsparseXcsr2coo(
sparseHandle(), in.getRowIdx().get(), nNZ, in.dims()[0],
converted.getRowIdx().get(), HIPSPARSE_INDEX_BASE_ZERO));
// Call sort
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(hipsparseXcoosort_bufferSizeExt(
sparseHandle(), in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(),
&pBufferSizeInBytes));
shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes).release(),
memFree<char>);
shared_ptr<int> P(memAlloc<int>(nNZ).release(), memFree<int>);
CUSPARSE_CHECK(
hipsparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get()));
CUSPARSE_CHECK(hipsparseXcoosortByColumn(
sparseHandle(), in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(), P.get(),
(void *)pBuffer.get()));
CUSPARSE_CHECK(gthr_func<T>()(sparseHandle(), nNZ, in.getValues().get(),
converted.getValues().get(), P.get(),
HIPSPARSE_INDEX_BASE_ZERO));
} else if (src == AF_STORAGE_COO && dest == AF_STORAGE_CSR) {
// The cusparse csr sort function is not behaving correctly.
// So the work around is to convert the COO into row major and then
// convert it to CSR
// Deep copy input into temporary COO Row Major
SparseArray<T> cooT = createArrayDataSparseArray<T>(
in.dims(), in.getValues(), in.getRowIdx(), in.getColIdx(),
in.getStorage(), true);
// Call sort to convert column major to row major
{
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(hipsparseXcoosort_bufferSizeExt(
sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ,
cooT.getRowIdx().get(), cooT.getColIdx().get(),
&pBufferSizeInBytes));
shared_ptr<char> pBuffer(
memAlloc<char>(pBufferSizeInBytes).release(), memFree<char>);
shared_ptr<int> P(memAlloc<int>(nNZ).release(), memFree<int>);
CUSPARSE_CHECK(hipsparseCreateIdentityPermutation(sparseHandle(),
nNZ, P.get()));
CUSPARSE_CHECK(hipsparseXcoosortByRow(
sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ,
cooT.getRowIdx().get(), cooT.getColIdx().get(), P.get(),
(void *)pBuffer.get()));
CUSPARSE_CHECK(gthr_func<T>()(
sparseHandle(), nNZ, in.getValues().get(),
cooT.getValues().get(), P.get(), HIPSPARSE_INDEX_BASE_ZERO));
}
// Copy values and colIdx as is
CUDA_CHECK(
hipMemcpyAsync(converted.getValues().get(), cooT.getValues().get(),
cooT.getValues().elements() * sizeof(T),
hipMemcpyDeviceToDevice, cuda::getActiveStream()));
CUDA_CHECK(
hipMemcpyAsync(converted.getColIdx().get(), cooT.getColIdx().get(),
cooT.getColIdx().elements() * sizeof(int),
hipMemcpyDeviceToDevice, cuda::getActiveStream()));
// cusparse function to compress row from coordinate
CUSPARSE_CHECK(hipsparseXcoo2csr(
sparseHandle(), cooT.getRowIdx().get(), nNZ, cooT.dims()[0],
converted.getRowIdx().get(), HIPSPARSE_INDEX_BASE_ZERO));
// No need to call CSRSORT
} else {
// Should never come here
AF_ERROR("CUDA Backend invalid conversion combination",
AF_ERR_NOT_SUPPORTED);
}
return converted;
}
#define INSTANTIATE_TO_STORAGE(T, S) \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_CSR>( \
const SparseArray<T> &in); \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_CSC>( \
const SparseArray<T> &in); \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_COO>( \
const SparseArray<T> &in);
#define INSTANTIATE_COO_SPECIAL(T) \
template<> \
SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_COO>( \
const Array<T> &in) { \
return sparseConvertDenseToCOO<T>(in); \
} \
template<> \
Array<T> sparseConvertStorageToDense<T, AF_STORAGE_COO>( \
const SparseArray<T> &in) { \
return sparseConvertCOOToDense<T>(in); \
}
#define INSTANTIATE_SPARSE(T) \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSR>( \
const Array<T> &in); \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSC>( \
const Array<T> &in); \
\
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSR>( \
const SparseArray<T> &in); \
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSC>( \
const SparseArray<T> &in); \
\
INSTANTIATE_COO_SPECIAL(T) \
\
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSR) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSC) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_COO)
INSTANTIATE_SPARSE(float)
INSTANTIATE_SPARSE(double)
INSTANTIATE_SPARSE(cfloat)
INSTANTIATE_SPARSE(cdouble)
#undef INSTANTIATE_TO_STORAGE
#undef INSTANTIATE_COO_SPECIAL
#undef INSTANTIATE_SPARSE
} // namespace cuda
| 341d16fba6500d5a5745b639514835d02382a5aa.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <sparse.hpp>
#include <arith.hpp>
#include <cast.hpp>
#include <common/err_common.hpp>
#include <complex.hpp>
#include <copy.hpp>
#include <cusparse.hpp>
#include <kernel/sparse.hpp>
#include <lookup.hpp>
#include <math.hpp>
#include <platform.hpp>
#include <where.hpp>
#include <stdexcept>
#include <string>
namespace cuda {
using namespace common;
// cusparseStatus_t cusparseZcsr2csc(cusparseHandle_t handle,
// int m, int n, int nnz,
// const cuDoubleComplex *csrSortedVal,
// const int *csrSortedRowPtr, const int
// *csrSortedColInd, cuDoubleComplex
// *cscSortedVal, int *cscSortedRowInd, int
// *cscSortedColPtr, cusparseAction_t
// copyValues, cusparseIndexBase_t idxBase);
template<typename T>
struct csr2csc_func_def_t {
typedef cusparseStatus_t (*csr2csc_func_def)(cusparseHandle_t, int, int,
int, const T *, const int *,
const int *, T *, int *, int *,
cusparseAction_t,
cusparseIndexBase_t);
};
// cusparseStatus_t cusparseZdense2csr(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *A, int lda,
// const int *nnzPerRow,
// cuDoubleComplex *csrValA,
// int *csrRowPtrA, int *csrColIndA)
template<typename T>
struct dense2csr_func_def_t {
typedef cusparseStatus_t (*dense2csr_func_def)(cusparseHandle_t, int, int,
const cusparseMatDescr_t,
const T *, int, const int *,
T *, int *, int *);
};
// cusparseStatus_t cusparseZdense2csc(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *A, int lda,
// const int *nnzPerCol,
// cuDoubleComplex *cscValA,
// int *cscRowIndA, int *cscColPtrA)
template<typename T>
struct dense2csc_func_def_t {
typedef cusparseStatus_t (*dense2csc_func_def)(cusparseHandle_t, int, int,
const cusparseMatDescr_t,
const T *, int, const int *,
T *, int *, int *);
};
// cusparseStatus_t cusparseZcsr2dense(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *csrValA,
// const int *csrRowPtrA,
// const int *csrColIndA,
// cuDoubleComplex *A, int lda)
template<typename T>
struct csr2dense_func_def_t {
typedef cusparseStatus_t (*csr2dense_func_def)(cusparseHandle_t, int, int,
const cusparseMatDescr_t,
const T *, const int *,
const int *, T *, int);
};
// cusparseStatus_t cusparseZcsc2dense(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *cscValA,
// const int *cscRowIndA,
// const int *cscColPtrA,
// cuDoubleComplex *A, int lda)
template<typename T>
struct csc2dense_func_def_t {
typedef cusparseStatus_t (*csc2dense_func_def)(cusparseHandle_t, int, int,
const cusparseMatDescr_t,
const T *, const int *,
const int *, T *, int);
};
// cusparseStatus_t cusparseZnnz(cusparseHandle_t handle,
// cusparseDirection_t dirA,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *A, int lda,
// int *nnzPerRowColumn,
// int *nnzTotalDevHostPtr)
template<typename T>
struct nnz_func_def_t {
typedef cusparseStatus_t (*nnz_func_def)(cusparseHandle_t,
cusparseDirection_t, int, int,
const cusparseMatDescr_t,
const T *, int, int *, int *);
};
// cusparseStatus_t cusparseZgthr(cusparseHandle_t handle,
// int nnz,
// const cuDoubleComplex *y,
// cuDoubleComplex *xVal, const int *xInd,
// cusparseIndexBase_t idxBase)
template<typename T>
struct gthr_func_def_t {
typedef cusparseStatus_t (*gthr_func_def)(cusparseHandle_t, int, const T *,
T *, const int *,
cusparseIndexBase_t);
};
#define SPARSE_FUNC_DEF(FUNC) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def FUNC##_func();
#define SPARSE_FUNC(FUNC, TYPE, PREFIX) \
template<> \
typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() { \
return (FUNC##_func_def_t<TYPE>::FUNC##_func_def) & \
cusparse##PREFIX##FUNC; \
}
SPARSE_FUNC_DEF(csr2csc)
SPARSE_FUNC(csr2csc, float, S)
SPARSE_FUNC(csr2csc, double, D)
SPARSE_FUNC(csr2csc, cfloat, C)
SPARSE_FUNC(csr2csc, cdouble, Z)
SPARSE_FUNC_DEF(dense2csr)
SPARSE_FUNC(dense2csr, float, S)
SPARSE_FUNC(dense2csr, double, D)
SPARSE_FUNC(dense2csr, cfloat, C)
SPARSE_FUNC(dense2csr, cdouble, Z)
SPARSE_FUNC_DEF(dense2csc)
SPARSE_FUNC(dense2csc, float, S)
SPARSE_FUNC(dense2csc, double, D)
SPARSE_FUNC(dense2csc, cfloat, C)
SPARSE_FUNC(dense2csc, cdouble, Z)
SPARSE_FUNC_DEF(csr2dense)
SPARSE_FUNC(csr2dense, float, S)
SPARSE_FUNC(csr2dense, double, D)
SPARSE_FUNC(csr2dense, cfloat, C)
SPARSE_FUNC(csr2dense, cdouble, Z)
SPARSE_FUNC_DEF(csc2dense)
SPARSE_FUNC(csc2dense, float, S)
SPARSE_FUNC(csc2dense, double, D)
SPARSE_FUNC(csc2dense, cfloat, C)
SPARSE_FUNC(csc2dense, cdouble, Z)
SPARSE_FUNC_DEF(nnz)
SPARSE_FUNC(nnz, float, S)
SPARSE_FUNC(nnz, double, D)
SPARSE_FUNC(nnz, cfloat, C)
SPARSE_FUNC(nnz, cdouble, Z)
SPARSE_FUNC_DEF(gthr)
SPARSE_FUNC(gthr, float, S)
SPARSE_FUNC(gthr, double, D)
SPARSE_FUNC(gthr, cfloat, C)
SPARSE_FUNC(gthr, cdouble, Z)
#undef SPARSE_FUNC
#undef SPARSE_FUNC_DEF
// Partial template specialization of sparseConvertDenseToStorage for COO
// However, template specialization is not allowed
template<typename T>
SparseArray<T> sparseConvertDenseToCOO(const Array<T> &in) {
Array<uint> nonZeroIdx_ = where<T>(in);
Array<int> nonZeroIdx = cast<int, uint>(nonZeroIdx_);
dim_t nNZ = nonZeroIdx.elements();
Array<int> constDim = createValueArray<int>(dim4(nNZ), in.dims()[0]);
Array<int> rowIdx =
arithOp<int, af_mod_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<int> colIdx =
arithOp<int, af_div_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<T> values = copyArray<T>(in);
values.modDims(dim4(values.elements()));
values = lookup<T, int>(values, nonZeroIdx, 0);
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx,
AF_STORAGE_COO);
}
template<typename T, af_storage stype>
SparseArray<T> sparseConvertDenseToStorage(const Array<T> &in) {
const int M = in.dims()[0];
const int N = in.dims()[1];
// Create Sparse Matrix Descriptor
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
int d = -1;
cusparseDirection_t dir = CUSPARSE_DIRECTION_ROW;
if (stype == AF_STORAGE_CSR) {
d = M;
dir = CUSPARSE_DIRECTION_ROW;
} else {
d = N;
dir = CUSPARSE_DIRECTION_COLUMN;
}
Array<int> nnzPerDir = createEmptyArray<int>(dim4(d));
int nNZ = -1;
CUSPARSE_CHECK(nnz_func<T>()(sparseHandle(), dir, M, N, descr, in.get(),
in.strides()[1], nnzPerDir.get(), &nNZ));
Array<int> rowIdx = createEmptyArray<int>(dim4());
Array<int> colIdx = createEmptyArray<int>(dim4());
if (stype == AF_STORAGE_CSR) {
rowIdx = createEmptyArray<int>(dim4(M + 1));
colIdx = createEmptyArray<int>(dim4(nNZ));
} else {
rowIdx = createEmptyArray<int>(dim4(nNZ));
colIdx = createEmptyArray<int>(dim4(N + 1));
}
Array<T> values = createEmptyArray<T>(dim4(nNZ));
if (stype == AF_STORAGE_CSR)
CUSPARSE_CHECK(dense2csr_func<T>()(
sparseHandle(), M, N, descr, in.get(), in.strides()[1],
nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get()));
else
CUSPARSE_CHECK(dense2csc_func<T>()(
sparseHandle(), M, N, descr, in.get(), in.strides()[1],
nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get()));
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(cusparseDestroyMatDescr(descr));
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx,
stype);
}
// Partial template specialization of sparseConvertStorageToDense for COO
// However, template specialization is not allowed
template<typename T>
Array<T> sparseConvertCOOToDense(const SparseArray<T> &in) {
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
const Array<T> values = in.getValues();
const Array<int> rowIdx = in.getRowIdx();
const Array<int> colIdx = in.getColIdx();
kernel::coo2dense<T>(dense, values, rowIdx, colIdx);
return dense;
}
template<typename T, af_storage stype>
Array<T> sparseConvertStorageToDense(const SparseArray<T> &in) {
// Create Sparse Matrix Descriptor
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
int M = in.dims()[0];
int N = in.dims()[1];
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
int d_strides1 = dense.strides()[1];
if (stype == AF_STORAGE_CSR)
CUSPARSE_CHECK(
csr2dense_func<T>()(sparseHandle(), M, N, descr,
in.getValues().get(), in.getRowIdx().get(),
in.getColIdx().get(), dense.get(), d_strides1));
else
CUSPARSE_CHECK(
csc2dense_func<T>()(sparseHandle(), M, N, descr,
in.getValues().get(), in.getRowIdx().get(),
in.getColIdx().get(), dense.get(), d_strides1));
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(cusparseDestroyMatDescr(descr));
return dense;
}
template<typename T, af_storage dest, af_storage src>
SparseArray<T> sparseConvertStorageToStorage(const SparseArray<T> &in) {
using std::shared_ptr;
in.eval();
int nNZ = in.getNNZ();
SparseArray<T> converted = createEmptySparseArray<T>(in.dims(), nNZ, dest);
if (src == AF_STORAGE_CSR && dest == AF_STORAGE_COO) {
// Copy colIdx as is
CUDA_CHECK(
cudaMemcpyAsync(converted.getColIdx().get(), in.getColIdx().get(),
in.getColIdx().elements() * sizeof(int),
cudaMemcpyDeviceToDevice, cuda::getActiveStream()));
// cusparse function to expand compressed row into coordinate
CUSPARSE_CHECK(cusparseXcsr2coo(
sparseHandle(), in.getRowIdx().get(), nNZ, in.dims()[0],
converted.getRowIdx().get(), CUSPARSE_INDEX_BASE_ZERO));
// Call sort
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt(
sparseHandle(), in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(),
&pBufferSizeInBytes));
shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes).release(),
memFree<char>);
shared_ptr<int> P(memAlloc<int>(nNZ).release(), memFree<int>);
CUSPARSE_CHECK(
cusparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get()));
CUSPARSE_CHECK(cusparseXcoosortByColumn(
sparseHandle(), in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(), P.get(),
(void *)pBuffer.get()));
CUSPARSE_CHECK(gthr_func<T>()(sparseHandle(), nNZ, in.getValues().get(),
converted.getValues().get(), P.get(),
CUSPARSE_INDEX_BASE_ZERO));
} else if (src == AF_STORAGE_COO && dest == AF_STORAGE_CSR) {
// The cusparse csr sort function is not behaving correctly.
// So the work around is to convert the COO into row major and then
// convert it to CSR
// Deep copy input into temporary COO Row Major
SparseArray<T> cooT = createArrayDataSparseArray<T>(
in.dims(), in.getValues(), in.getRowIdx(), in.getColIdx(),
in.getStorage(), true);
// Call sort to convert column major to row major
{
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt(
sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ,
cooT.getRowIdx().get(), cooT.getColIdx().get(),
&pBufferSizeInBytes));
shared_ptr<char> pBuffer(
memAlloc<char>(pBufferSizeInBytes).release(), memFree<char>);
shared_ptr<int> P(memAlloc<int>(nNZ).release(), memFree<int>);
CUSPARSE_CHECK(cusparseCreateIdentityPermutation(sparseHandle(),
nNZ, P.get()));
CUSPARSE_CHECK(cusparseXcoosortByRow(
sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ,
cooT.getRowIdx().get(), cooT.getColIdx().get(), P.get(),
(void *)pBuffer.get()));
CUSPARSE_CHECK(gthr_func<T>()(
sparseHandle(), nNZ, in.getValues().get(),
cooT.getValues().get(), P.get(), CUSPARSE_INDEX_BASE_ZERO));
}
// Copy values and colIdx as is
CUDA_CHECK(
cudaMemcpyAsync(converted.getValues().get(), cooT.getValues().get(),
cooT.getValues().elements() * sizeof(T),
cudaMemcpyDeviceToDevice, cuda::getActiveStream()));
CUDA_CHECK(
cudaMemcpyAsync(converted.getColIdx().get(), cooT.getColIdx().get(),
cooT.getColIdx().elements() * sizeof(int),
cudaMemcpyDeviceToDevice, cuda::getActiveStream()));
// cusparse function to compress row from coordinate
CUSPARSE_CHECK(cusparseXcoo2csr(
sparseHandle(), cooT.getRowIdx().get(), nNZ, cooT.dims()[0],
converted.getRowIdx().get(), CUSPARSE_INDEX_BASE_ZERO));
// No need to call CSRSORT
} else {
// Should never come here
AF_ERROR("CUDA Backend invalid conversion combination",
AF_ERR_NOT_SUPPORTED);
}
return converted;
}
#define INSTANTIATE_TO_STORAGE(T, S) \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_CSR>( \
const SparseArray<T> &in); \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_CSC>( \
const SparseArray<T> &in); \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_COO>( \
const SparseArray<T> &in);
#define INSTANTIATE_COO_SPECIAL(T) \
template<> \
SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_COO>( \
const Array<T> &in) { \
return sparseConvertDenseToCOO<T>(in); \
} \
template<> \
Array<T> sparseConvertStorageToDense<T, AF_STORAGE_COO>( \
const SparseArray<T> &in) { \
return sparseConvertCOOToDense<T>(in); \
}
#define INSTANTIATE_SPARSE(T) \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSR>( \
const Array<T> &in); \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSC>( \
const Array<T> &in); \
\
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSR>( \
const SparseArray<T> &in); \
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSC>( \
const SparseArray<T> &in); \
\
INSTANTIATE_COO_SPECIAL(T) \
\
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSR) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSC) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_COO)
INSTANTIATE_SPARSE(float)
INSTANTIATE_SPARSE(double)
INSTANTIATE_SPARSE(cfloat)
INSTANTIATE_SPARSE(cdouble)
#undef INSTANTIATE_TO_STORAGE
#undef INSTANTIATE_COO_SPECIAL
#undef INSTANTIATE_SPARSE
} // namespace cuda
|
ac9235a9aa5e6864f42099e0a5dea063d39db297.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// User: [email protected]
// ExecutionRequest[P:'erCho.cu',P:1,T:1,args:'',q:'cudalb']
// May 16 2019 18:03:51
#include "cputils.h" // Added by tablon
/*30 30 100 2 9 18 2 29 26 3 2 3 6 4 800 25 20 2 900
* Simplified simulation of fire extinguishing
*
* Computacion Paralela, Grado en Informatica (Universidad de Valladolid)
* 2018/2019
*
* v1.4
*
* (c) 2019 Arturo Gonzalez Escribano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <cputils.h>
#define RADIUS_TYPE_1 3
#define RADIUS_TYPE_2_3 9
#define THRESHOLD 0.1f
/* Structure to store data of an extinguishing team */
typedef struct {
int x,y;
int type;
int target;
} Team;
/* Structure to store data of a fire focal point */
typedef struct {
int x,y;
int start;
int heat;
int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team
} FocalPoint;
/* Macro function to simplify accessing with two coordinates to a flattened array */
#define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ]
__global__ void init(float *surface,int rows,int columns){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
if (idX >= rows || idY>= columns) return;
surface[idX*columns+idY]=0;
}
__global__ void initInt(int *surface, int rows, int columns){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
if (idX >= rows || idY>= columns) return;
surface[idX*columns+idY]=0;
}
__global__ void get_first_activation(FocalPoint *focal, int num_focal,int *salida){
__shared__ int first_activation;
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id>=num_focal) return;
first_activation=0;
atomicMin(&first_activation,focal[id].start);
__syncthreads();
if(id==0)
salida[0]=first_activation;
}
__global__ void activate_focal(FocalPoint *focal,int num_focal,int *salida,int iter){
__shared__ int num_deactivated;
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id>=num_focal) return;
num_deactivated=0;
//printf("iter hilo %d num_ %d\n",iter,num_deactivated );
if ( focal[id].active == 2 ) {
atomicAdd(&num_deactivated,1);
}
if ( focal[id].start == iter ) {
focal[id].active = 1;
}
__syncthreads();
if(id==0)
salida[0]=num_deactivated;
// Count focal points already deactivated by a team
}
__global__ void update_heat(float *surface,FocalPoint *focal, int columns , int num_focal){
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id>=num_focal || focal[id].active!=1) return;
surface[focal[id].x*columns+focal[id].y]=focal[id].heat;
}
__global__ void copy_surface(float *surface, float *surfaceCopy,int rows,int columns){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
if (idX >= rows-1 || idX==0 || idY>= columns-1 || idY==0) return;
surfaceCopy[idX*columns+idY]=surface[idX*columns+idY];
}
__global__ void update_surface(float *surface, float *surfaceCopy,int rows, int columns){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
//printf("hola\n" );
if (idX >= rows-1 || idX==0 || idY>= columns-1 || idY==0) return;
surface[idX*columns+idY]=(
surfaceCopy[(idX-1)*columns+idY]+
surfaceCopy[(idX+1)*columns+idY]+
surfaceCopy[idX*columns+idY-1]+
surfaceCopy[idX*columns+idY+1])/4;
//printf("%f",surface[idX*columns+idY]);
/*int i, j;
for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( surface, i, j ) = (
accessMat( surfaceCopy, i-1, j ) +
accessMat( surfaceCopy, i+1, j ) +
accessMat( surfaceCopy, i, j-1 ) +
accessMat( surfaceCopy, i, j+1 ) ) / 4;*/
}
__global__ void compute_residual(float *surface, float *surfaceCopy,int rows,int columns,float *residuals){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
//printf("hola\n" );
//if (idX >= rows-1 || idX==0 || idY>= columns-1 || idY==0) return;
if(idX>=rows || idY>=columns) return;
residuals[idX*columns+idY]=surface[idX*columns+idY]-surfaceCopy[idX*columns+idY];
}
__global__ void move_teams(Team *teams,FocalPoint *focal, int num_teams,int num_focal){
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id>=num_teams) return;
unsigned int j;
int distance = INT_MAX;
int target = -1;
int teamX = teams[id].x;
int teamY = teams[id].y;
#pragma unroll
for( j=0; j<num_focal; j++ ) {
if ( focal[j].active != 1 ) continue; // Skip non-active focal points
int local_distance = (focal[j].x - teamX)*(focal[j].x - teamX) + (focal[j].y - teamY)*(focal[j].y - teamY) ;
if ( local_distance < distance ) {
distance = local_distance;
target = j;
}
}
/* 4.3.2. Annotate target for the next stage */
teams[id].target = target;
/* 4.3.3. No active focal point to choose, no movement */
if ( target == -1 ) return;
//__syncthreads();
/* 4.3.4. Move in the focal point direction */
int focalX = focal[target].x;
int focalY = focal[target].y;
if ( teams[id].type == 1 ) {
// Type 1: Can move in diagonal
if ( focalX < teams[id].x ) teams[id].x--;
if ( focalX > teams[id].x ) teams[id].x++;
if ( focalY < teams[id].y ) teams[id].y--;
if ( focalY > teams[id].y) teams[id].y++;
}
else if ( teams[id].type == 2 ) {
// Type 2: First in horizontal direction, then in vertical direction
if ( focalY < teamY ) teams[id].y--;
else if ( focalY > teamY ) teams[id].y++;
else if ( focalX < teamX ) teams[id].x--;
else if ( focalX > teamX ) teams[id].x++;
}
else {
// Type 3: First in vertical direction, then in horizontal direction
if ( focalX < teamX ) teams[id].x--;
else if ( focalX > teamX ) teams[id].x++;
else if ( focalY < teamY ) teams[id].y--;
else if ( focalY > teamY ) teams[id].y++;
}
//printf("x %d y %d id %d\n", teams[id].x,teams[id].y,id);
if ( target != -1 && focalX == teams[id].x && focalY == teams[id].y
&& focal[target].active == 1 ){
focal[target].active = 2;
//printf("id %d\n",id);
}
}
__global__ void compute_heat_reduction(Team *teams,int *gpuAux,int num_teams,int rows,int columns){
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id>=num_teams) return;
//int radius;
// Influence area of fixed radius depending on type
//if ( teams[id].type == 1 ) radius = 3;
//else radius = 9;
int teamX=teams[id].x;
int teamY=teams[id].y;
//#pragma unroll
//for( i=teams[id].x-radius; i<=teams[id].x+radius; i++ ) {
//#pragma unroll
//for( j=teams[id].y-radius; j<=teams[id].y+radius; j++ ) {
if (teams[id].type!=1){
if ( (teamX-9)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-9)*columns+teamY],1);
if ( (teamX-8)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY-4],1);
if ( (teamX-8)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY-3],1);
if ( (teamX-8)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY-2],1);
if ( (teamX-8)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY-1],1);
if ( (teamX-8)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY],1);
if ( (teamX-8)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY+1],1);
if ( (teamX-8)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY+2],1);
if ( (teamX-8)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY+3],1);
if ( (teamX-8)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY+4],1);
if ( (teamX-7)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY-5],1);
if ( (teamX-7)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY-4],1);
if ( (teamX-7)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY-3],1);
if ( (teamX-7)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY-2],1);
if ( (teamX-7)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY-1],1);
if ( (teamX-7)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY],1);
if ( (teamX-7)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY+1],1);
if ( (teamX-7)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY+2],1);
if ( (teamX-7)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY+3],1);
if ( (teamX-7)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY+4],1);
if ( (teamX-7)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY+5],1);
if ( (teamX-6)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-6],1);
if ( (teamX-6)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-5],1);
if ( (teamX-6)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-4],1);
if ( (teamX-6)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-3],1);
if ( (teamX-6)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-2],1);
if ( (teamX-6)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-1],1);
if ( (teamX-6)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY],1);
if ( (teamX-6)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+1],1);
if ( (teamX-6)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+2],1);
if ( (teamX-6)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+3],1);
if ( (teamX-6)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+4],1);
if ( (teamX-6)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+5],1);
if ( (teamX-6)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+6],1);
if ( (teamX-5)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-7],1);
if ( (teamX-5)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-6],1);
if ( (teamX-5)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-5],1);
if ( (teamX-5)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-4],1);
if ( (teamX-5)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-3],1);
if ( (teamX-5)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-2],1);
if ( (teamX-5)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-1],1);
if ( (teamX-5)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY],1);
if ( (teamX-5)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+1],1);
if ( (teamX-5)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+2],1);
if ( (teamX-5)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+3],1);
if ( (teamX-5)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+4],1);
if ( (teamX-5)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+5],1);
if ( (teamX-5)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+6],1);
if ( (teamX-5)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+7],1);
if ( (teamX-4)>0 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-8],1);
if ( (teamX-4)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-7],1);
if ( (teamX-4)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-6],1);
if ( (teamX-4)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-5],1);
if ( (teamX-4)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-4],1);
if ( (teamX-4)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-3],1);
if ( (teamX-4)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-2],1);
if ( (teamX-4)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-1],1);
if ( (teamX-4)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY],1);
if ( (teamX-4)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+1],1);
if ( (teamX-4)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+2],1);
if ( (teamX-4)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+3],1);
if ( (teamX-4)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+4],1);
if ( (teamX-4)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+5],1);
if ( (teamX-4)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+6],1);
if ( (teamX-4)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+7],1);
if ( (teamX-4)>0 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+8],1);
if ( (teamX-3)>0 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-8],1);
if ( (teamX-3)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-7],1);
if ( (teamX-3)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-6],1);
if ( (teamX-3)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-5],1);
if ( (teamX-3)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-4],1);
if ( (teamX-3)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-3],1);
if ( (teamX-3)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-2],1);
if ( (teamX-3)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-1],1);
if ( (teamX-3)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY],1);
if ( (teamX-3)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+1],1);
if ( (teamX-3)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+2],1);
if ( (teamX-3)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+3],1);
if ( (teamX-3)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+4],1);
if ( (teamX-3)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+5],1);
if ( (teamX-3)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+6],1);
if ( (teamX-3)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+7],1);
if ( (teamX-3)>0 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+8],1);
if ( (teamX-2)>0 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-8],1);
if ( (teamX-2)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-7],1);
if ( (teamX-2)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-6],1);
if ( (teamX-2)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-5],1);
if ( (teamX-2)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-4],1);
if ( (teamX-2)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-3],1);
if ( (teamX-2)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-2],1);
if ( (teamX-2)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-1],1);
if ( (teamX-2)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY],1);
if ( (teamX-2)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+1],1);
if ( (teamX-2)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+2],1);
if ( (teamX-2)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+3],1);
if ( (teamX-2)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+4],1);
if ( (teamX-2)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+5],1);
if ( (teamX-2)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+6],1);
if ( (teamX-2)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+7],1);
if ( (teamX-2)>0 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+8],1);
if ( (teamX-1)>0 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-8],1);
if ( (teamX-1)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-7],1);
if ( (teamX-1)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-6],1);
if ( (teamX-1)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-5],1);
if ( (teamX-1)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-4],1);
if ( (teamX-1)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-3],1);
if ( (teamX-1)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-2],1);
if ( (teamX-1)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-1],1);
if ( (teamX-1)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY],1);
if ( (teamX-1)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+1],1);
if ( (teamX-1)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+2],1);
if ( (teamX-1)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+3],1);
if ( (teamX-1)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+4],1);
if ( (teamX-1)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+5],1);
if ( (teamX-1)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+6],1);
if ( (teamX-1)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+7],1);
if ( (teamX-1)>0 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+8],1);
if ( (teamX)>0 && (teamY-9)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-9],1);
if ( (teamX)>0 && (teamY-8)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-8],1);
if ( (teamX)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-7],1);
if ( (teamX)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-6],1);
if ( (teamX)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-5],1);
if ( (teamX)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-4],1);
if ( (teamX)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-3],1);
if ( (teamX)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-2],1);
if ( (teamX)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-1],1);
if ( (teamX)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY],1);
if ( (teamX)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+1],1);
if ( (teamX)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+2],1);
if ( (teamX)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+3],1);
if ( (teamX)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+4],1);
if ( (teamX)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+5],1);
if ( (teamX)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+6],1);
if ( (teamX)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+7],1);
if ( (teamX)>0 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+8],1);
if ( (teamX)>0 && (teamY+9)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+9],1);
if ( (teamX+1)<rows-1 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-8],1);
if ( (teamX+1)<rows-1 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-7],1);
if ( (teamX+1)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-6],1);
if ( (teamX+1)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-5],1);
if ( (teamX+1)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-4],1);
if ( (teamX+1)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-3],1);
if ( (teamX+1)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-2],1);
if ( (teamX+1)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-1],1);
if ( (teamX+1)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY],1);
if ( (teamX+1)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+1],1);
if ( (teamX+1)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+2],1);
if ( (teamX+1)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+3],1);
if ( (teamX+1)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+4],1);
if ( (teamX+1)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+5],1);
if ( (teamX+1)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+6],1);
if ( (teamX+1)<rows-1 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+7],1);
if ( (teamX+1)<rows-1 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+8],1);
if ( (teamX+2)<rows-1 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-8],1);
if ( (teamX+2)<rows-1 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-7],1);
if ( (teamX+2)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-6],1);
if ( (teamX+2)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-5],1);
if ( (teamX+2)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-4],1);
if ( (teamX+2)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-3],1);
if ( (teamX+2)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-2],1);
if ( (teamX+2)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-1],1);
if ( (teamX+2)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY],1);
if ( (teamX+2)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+1],1);
if ( (teamX+2)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+2],1);
if ( (teamX+2)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+3],1);
if ( (teamX+2)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+4],1);
if ( (teamX+2)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+5],1);
if ( (teamX+2)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+6],1);
if ( (teamX+2)<rows-1 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+7],1);
if ( (teamX+2)<rows-1 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+8],1);
if ( (teamX+3)<rows-1 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-8],1);
if ( (teamX+3)<rows-1 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-7],1);
if ( (teamX+3)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-6],1);
if ( (teamX+3)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-5],1);
if ( (teamX+3)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-4],1);
if ( (teamX+3)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-3],1);
if ( (teamX+3)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-2],1);
if ( (teamX+3)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-1],1);
if ( (teamX+3)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY],1);
if ( (teamX+3)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+1],1);
if ( (teamX+3)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+2],1);
if ( (teamX+3)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+3],1);
if ( (teamX+3)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+4],1);
if ( (teamX+3)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+5],1);
if ( (teamX+3)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+6],1);
if ( (teamX+3)<rows-1 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+7],1);
if ( (teamX+3)<rows-1 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+8],1);
if ( (teamX+4)<rows-1 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-8],1);
if ( (teamX+4)<rows-1 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-7],1);
if ( (teamX+4)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-6],1);
if ( (teamX+4)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-5],1);
if ( (teamX+4)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-4],1);
if ( (teamX+4)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-3],1);
if ( (teamX+4)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-2],1);
if ( (teamX+4)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-1],1);
if ( (teamX+4)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY],1);
if ( (teamX+4)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+1],1);
if ( (teamX+4)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+2],1);
if ( (teamX+4)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+3],1);
if ( (teamX+4)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+4],1);
if ( (teamX+4)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+5],1);
if ( (teamX+4)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+6],1);
if ( (teamX+4)<rows-1 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+7],1);
if ( (teamX+4)<rows-1 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+8],1);
if ( (teamX+5)<rows-1 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-7],1);
if ( (teamX+5)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-6],1);
if ( (teamX+5)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-5],1);
if ( (teamX+5)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-4],1);
if ( (teamX+5)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-3],1);
if ( (teamX+5)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-2],1);
if ( (teamX+5)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-1],1);
if ( (teamX+5)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY],1);
if ( (teamX+5)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+1],1);
if ( (teamX+5)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+2],1);
if ( (teamX+5)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+3],1);
if ( (teamX+5)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+4],1);
if ( (teamX+5)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+5],1);
if ( (teamX+5)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+6],1);
if ( (teamX+5)<rows-1 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+7],1);
if ( (teamX+6)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-6],1);
if ( (teamX+6)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-5],1);
if ( (teamX+6)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-4],1);
if ( (teamX+6)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-3],1);
if ( (teamX+6)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-2],1);
if ( (teamX+6)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-1],1);
if ( (teamX+6)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY],1);
if ( (teamX+6)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+1],1);
if ( (teamX+6)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+2],1);
if ( (teamX+6)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+3],1);
if ( (teamX+6)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+4],1);
if ( (teamX+6)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+5],1);
if ( (teamX+6)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+6],1);
if ( (teamX+7)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY-5],1);
if ( (teamX+7)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY-4],1);
if ( (teamX+7)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY-3],1);
if ( (teamX+7)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY-2],1);
if ( (teamX+7)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY-1],1);
if ( (teamX+7)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY],1);
if ( (teamX+7)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY+1],1);
if ( (teamX+7)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY+2],1);
if ( (teamX+7)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY+3],1);
if ( (teamX+7)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY+4],1);
if ( (teamX+7)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY+5],1);
if ( (teamX+8)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY-4],1);
if ( (teamX+8)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY-3],1);
if ( (teamX+8)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY-2],1);
if ( (teamX+8)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY-1],1);
if ( (teamX+8)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY],1);
if ( (teamX+8)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY+1],1);
if ( (teamX+8)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY+2],1);
if ( (teamX+8)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY+3],1);
if ( (teamX+8)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY+4],1);
if ( (teamX+9)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+9)*columns+teamY],1);
}
else{
if ( (teamX-3)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY],1);
if ( (teamX-2)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-2],1);
if ( (teamX-2)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-1],1);
if ( (teamX-2)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY],1);
if ( (teamX-2)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+1],1);
if ( (teamX-2)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+2],1);
if ( (teamX-1)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-2],1);
if ( (teamX-1)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-1],1);
if ( (teamX-1)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY],1);
if ( (teamX-1)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+1],1);
if ( (teamX-1)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+2],1);
if ( (teamX)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-3],1);
if ( (teamX)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-2],1);
if ( (teamX)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-1],1);
if ( (teamX)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY],1);
if ( (teamX)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+1],1);
if ( (teamX)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+2],1);
if ( (teamX)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+3],1);
if ( (teamX+1)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-2],1);
if ( (teamX+1)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-1],1);
if ( (teamX+1)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY],1);
if ( (teamX+1)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+1],1);
if ( (teamX+1)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+2],1);
if ( (teamX+2)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-2],1);
if ( (teamX+2)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-1],1);
if ( (teamX+2)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY],1);
if ( (teamX+2)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+1],1);
if ( (teamX+2)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+2],1);
if ( (teamX+3)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY],1);
}
}
__global__ void reduce_heat3(float *surface, int *aux,int rows,int columns){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
//printf("hola\n" );
if (idX >= rows-1 || idX==0 || idY>= columns-1 || idY==0) return;
#pragma unroll
for(unsigned int i=aux[idX*columns+idY];i>0;i--)
surface[idX*columns+idY]*=0.75;
aux[idX*columns+idY]=0;
}
__global__ void reduce_kernel(const float* g_idata, float* g_odata, int size)
{
// Memoria shared
extern __shared__ float tmp[];
// Desactivar hilos que excedan los lmites del array de entrada
int gid = threadIdx.x+blockDim.x*blockIdx.x;
if ( gid >= size ) return;
// Cargar dato en memoria shared
int tid = threadIdx.x;
tmp[ tid ] = g_idata[ gid ];
//printf("entrada %f glob red %f\n",g_idata[gid],tmp[tid]);
// Asegurarse que todos los warps del bloque han cargado los datos
__syncthreads();
// Generalizacin: El nico bloque del ltimo nivel puede tener menos datos para reducir
int mysize = blockDim.x;
if ( gridDim.x==1 )
mysize = size;
// Hacemos la reduccin en memoria shared
#pragma unroll
for(unsigned int s = mysize/2; s >0; s /= 2) {
// Comprobamos si el hilo actual es activo para esta iteracin
if (tid<s) {
// Hacemos la reduccin sumando los dos elementos que le tocan a este hilo
if(tmp[tid+s]>tmp[tid])
tmp[tid] =tmp[tid+s];
}
__syncthreads();
}
// El hilo 0 de cada bloque escribe el resultado final de la reduccin
// en la memoria global del dispositivo pasada por parmetro (g_odata[])
if (tid == 0){
g_odata[blockIdx.x] = tmp[tid];
}
}
/*
* Function: Print usage line in stderr
*/
void show_usage( char *program_name ) {
fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name );
fprintf(stderr,"\t<config_file> ::= -f <file_name>\n");
fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n");
fprintf(stderr,"\n");
}
#ifdef DEBUG
/*
* Function: Print the current state of the simulation
*/
void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) {
/*
* You don't need to optimize this function, it is only for pretty printing and debugging purposes.
* It is not compiled in the production versions of the program.
* Thus, it is never used when measuring times in the leaderboard
*/
int i,j;
printf("Iteration: %d\n", iteration );
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
for( i=0; i<rows; i++ ) {
printf("|");
for( j=0; j<columns; j++ ) {
char symbol;
if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*';
else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100);
else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+';
else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.';
else symbol = '0';
int t;
int flag_team = 0;
for( t=0; t<num_teams; t++ )
if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; }
if ( flag_team ) printf("[%c]", symbol );
else {
int f;
int flag_focal = 0;
for( f=0; f<num_focal; f++ )
if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; }
if ( flag_focal ) printf("(%c)", symbol );
else printf(" %c ", symbol );
}
}
printf("|\n");
}
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
printf("Global residual: %f\n\n", global_residual);
}
#endif
/*
* MAIN PROGRAM
*/
int main(int argc, char *argv[]) {
int i,j,t;
// Simulation data
int rows, columns, max_iter;
float *surface, *surfaceCopy;
int num_teams, num_focal;
Team *teams;
FocalPoint *focal;
/* 1. Read simulation arguments */
/* 1.1. Check minimum number of arguments */
if (argc<2) {
fprintf(stderr,"-- Error in arguments: No arguments\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
int read_from_file = ! strcmp( argv[1], "-f" );
/* 1.2. Read configuration from file */
if ( read_from_file ) {
/* 1.2.1. Open file */
if (argc<3) {
fprintf(stderr,"-- Error in arguments: file-name argument missing\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
FILE *args = cp_abrir_fichero( argv[2] );
if ( args == NULL ) {
fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
/* 1.2.2. Read surface and maximum number of iterations */
int ok;
ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
if ( surface == NULL || surfaceCopy == NULL ) {
fprintf(stderr,"-- Error allocating: surface structures\n");
exit( EXIT_FAILURE );
}
/* 1.2.3. Teams information */
ok = fscanf(args, "%d", &num_teams );
if ( ok != 1 ) {
fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
}
/* 1.2.4. Focal points information */
ok = fscanf(args, "%d", &num_focal );
if ( ok != 1 ) {
fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( focal == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat);
if ( ok != 4 ) {
fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
focal[i].active = 0;
}
}
/* 1.3. Read configuration from arguments */
else {
/* 1.3.1. Check minimum number of arguments */
if (argc<6) {
fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
/* 1.3.2. Surface and maximum number of iterations */
rows = atoi( argv[1] );
columns = atoi( argv[2] );
max_iter = atoi( argv[3] );
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
/* 1.3.3. Teams information */
num_teams = atoi( argv[4] );
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
if ( argc < num_teams*3 + 5 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
teams[i].x = atoi( argv[5+i*3] );
teams[i].y = atoi( argv[6+i*3] );
teams[i].type = atoi( argv[7+i*3] );
}
/* 1.3.4. Focal points information */
int focal_args = 5 + i*3;
if ( argc < focal_args+1 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
num_focal = atoi( argv[focal_args] );
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
if ( argc < focal_args + 1 + num_focal*4 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
focal[i].x = atoi( argv[focal_args+i*4+1] );
focal[i].y = atoi( argv[focal_args+i*4+2] );
focal[i].start = atoi( argv[focal_args+i*4+3] );
focal[i].heat = atoi( argv[focal_args+i*4+4] );
focal[i].active = 0;
}
/* 1.3.5. Sanity check: No extra arguments at the end of line */
if ( argc > focal_args+i*4+1 ) {
fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
}
#ifdef DEBUG
/* 1.4. Print arguments */
printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter);
printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal );
for( i=0; i<num_teams; i++ ) {
printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type );
}
for( i=0; i<num_focal; i++ ) {
printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i,
focal[i].x,
focal[i].y,
focal[i].start,
focal[i].heat );
}
#endif // DEBUG
/* 2. Select GPU and start global timer */
hipSetDevice(0);
hipDeviceSynchronize();
double ttotal = cp_Wtime();
/*
*
* START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT
*
*/
float *gpuSurface, *gpuSurfaceCopy, *gpuResiduals;
int *gpuAux;
FocalPoint *gpuFocal;
Team *gpuTeam;
// double time1,time2;
int nearestUpperPow2 = pow(2,ceil(log2((double) rows*columns)));
hipMalloc((void **)&gpuSurface,sizeof(float)*rows*columns);
hipMalloc((void **)&gpuAux,sizeof(int)*rows*columns);
hipMalloc((void **) &gpuSurfaceCopy,sizeof(float)*rows*columns);
hipMalloc((void **) &gpuResiduals,sizeof(float)*nearestUpperPow2);
hipMalloc((void **) &gpuTeam,sizeof(Team)*num_teams);
hipMemcpy(gpuTeam,teams,sizeof(Team)*num_teams,hipMemcpyHostToDevice);
hipMalloc((void **) &gpuFocal,sizeof(FocalPoint)*num_focal);
hipMemcpy(gpuFocal,focal,sizeof(FocalPoint)*num_focal,hipMemcpyHostToDevice);
int tamBlockX= 128;
int tamBlockY= 1;
int tamGridX, tamGridY;
int tamBlockTeams=224;
int tamGridTeams;
int tamBlockFocal=224;
int tamGridFocal;
tamGridTeams= num_teams/tamBlockTeams;
if (num_teams%tamBlockTeams!=0) tamGridTeams++;
tamGridFocal= num_focal/tamBlockFocal;
if (num_focal%tamBlockFocal!=0) tamGridFocal++;
tamGridX= columns/tamBlockX;
if (columns%tamBlockX!=0) tamGridX++;
tamGridY= rows/tamBlockY;
if (rows%tamBlockY!=0) tamGridY++;
dim3 blockSize(tamBlockX,tamBlockY);
dim3 gridSize(tamGridX,tamGridY);
#ifdef DEBUG
printf("tamGx %d tamGy %d\n",tamGridX,tamGridY);
#endif
hipLaunchKernelGGL(( init), dim3(blockSize),dim3(gridSize), 0, 0, gpuSurface,rows,columns);
//CUDA_CHECK();
hipLaunchKernelGGL(( init), dim3(blockSize),dim3(gridSize), 0, 0, gpuSurfaceCopy,rows,columns);
//CUDA_CHECK();
/* 3. Initialize surface */
/*for( i=0; i<rows; i++ )
for( j=0; j<columns; j++ )
accessMat( surface, i, j ) = 0.0;
/* 4. Simulation */
int *gpuNum_deactivated;
//gpuNum_deactivated[0]=0;
hipHostMalloc((void**) &gpuNum_deactivated,sizeof(int));
int iter;
int flag_stability = 0;
//int first_activation = 0;
//int *gpuFirstActivation;
//hipHostMalloc((void**) &gpuFirstActivation,sizeof(int));
//check_first_activation<<<tamGridFocal,tamBlockFocal>>>(gpuFocal,num_focal); hace falta reduccion
//get_first_activation<<<tamGridFocal,tamBlockFocal>>>(gpuFocal,num_focal,gpuFirstActivation);
#pragma unroll
for( iter=0; iter<max_iter && ! flag_stability; iter++ ) {
//printf("iter %d\n",iter);
/* 4.1. Activate focal points */
//printf("num %d\n",gpuNum_deactivated[0] );
//hipMemcpy(gpuNum_deactivated,&num_deactivated,sizeof(int),hipMemcpyHostToDevice);
//printf("num %d\n",num_deactivated);
if(gpuNum_deactivated[0]<num_focal){
hipLaunchKernelGGL(( activate_focal), dim3(tamGridFocal),dim3(tamBlockFocal), 0, 0, gpuFocal,num_focal,gpuNum_deactivated,iter);
hipDeviceSynchronize();
//hipMemcpyAsync(&num_deactivated,gpuNum_deactivated,sizeof(int),hipMemcpyDeviceToHost,0);
}
//printf("num %d",num_deactivated);
//if(!first_activation) continue;
/* 4.2. Propagate heat (10 steps per each team movement) */
float global_residual;
int step;
//hipMemcpy(surfaceCopy,gpuSurfaceCopy,sizeof(float)*rows*columns,hipMemcpyDeviceToHost);
#pragma unroll
for( step=0; step<10; step++ ) {
/* 4.2.1. Update heat on active focal points */
//if(gpuNum_deactivated[0]<num_focal)
hipLaunchKernelGGL(( update_heat), dim3(tamGridFocal),dim3(tamBlockFocal), 0, 0, gpuSurface,gpuFocal,columns,num_focal);
//CUDA_CHECK();
//accessMat( surface, x, y ) = focal[i].heat;
/* 4.2.2. Copy values of the surface in ancillary structure (Skip borders) */
//copy_surface<<<gridSize,blockSize>>>(gpuSurface,gpuSurfaceCopy,rows,columns);
// error=hipGetLastError();
// if(error!= hipSuccess)
// printf("%s\n",hipGetErrorString(error));
float *aux=gpuSurface;
gpuSurface=gpuSurfaceCopy;
gpuSurfaceCopy=aux;
//CUDA_CHECK();
/*for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( surfaceCopy, i, j ) = accessMat( surface, i, j );
/* 4.2.3. Update surface values (skip borders) */
hipLaunchKernelGGL(( update_surface), dim3(gridSize),dim3(blockSize), 0, 0, gpuSurface,gpuSurfaceCopy,rows,columns);
//CUDA_CHECK();
/*for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( surface, i, j ) = (
accessMat( surfaceCopy, i-1, j ) +
accessMat( surfaceCopy, i+1, j ) +
accessMat( surfaceCopy, i, j-1 ) +
accessMat( surfaceCopy, i, j+1 ) ) / 4;
/* 4.2.4. Compute the maximum residual difference (absolute value) */
if(step==0 && gpuNum_deactivated[0]==num_focal){
//time1=cp_Wtime();
//init<<<blockSize,gridSize>>>(gpuResiduals,rows,columns);
hipLaunchKernelGGL(( compute_residual), dim3(gridSize),dim3(blockSize), 0, 0, gpuSurface,gpuSurfaceCopy,rows,columns,gpuResiduals);
//int numValues = nearestUpperPow2;
int redSize = nearestUpperPow2;
int blockSizeR = (1024);
int sharedMemorySize = blockSizeR * sizeof(float);
while ( redSize > 1 )
{
int baseNumBlocks = redSize/blockSizeR;
int additionalBlock;
if(redSize%blockSizeR==0)
additionalBlock = 0;
else
additionalBlock = 1;
int numBlocks = baseNumBlocks + additionalBlock;
//printf("numB %d size %d\n",numBlocks,redSize);
//if(numBlocks==1) exit(0);
hipLaunchKernelGGL(( reduce_kernel), dim3(numBlocks), dim3(blockSizeR), sharedMemorySize , 0, gpuResiduals, gpuResiduals, redSize);
redSize = numBlocks;
}
hipMemcpyAsync(&global_residual, gpuResiduals, sizeof(float), hipMemcpyDeviceToHost,0);
//printf("glob %f\n",global_residual);
// printf("reesiduo %f\n",global_residual);
//time2+=cp_Wtime()-time1;
}
}
/* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */
/* 4.3. Move teams */
if(gpuNum_deactivated[0]<num_focal){
hipLaunchKernelGGL(( move_teams), dim3(tamGridTeams),dim3(tamBlockTeams), 0, 0, gpuTeam,gpuFocal,num_teams,num_focal);
}
/* 4.4. Team actions */
//hipMemcpy(surface,gpuSurface,sizeof(float)*rows*columns,hipMemcpyDeviceToHost);
//initInt<<<gridSize,blockSize>>>()
hipLaunchKernelGGL(( compute_heat_reduction), dim3(tamGridTeams),dim3(tamBlockTeams), 0, 0, gpuTeam,gpuAux,num_teams,rows,columns);
#ifdef UNROLL
int *aux;
aux = (int *)malloc( sizeof(int) * (size_t)rows * (size_t)columns );
hipMemcpy(aux,gpuAux,sizeof(int)*rows*columns,hipMemcpyDeviceToHost);
for( i=0;i<rows;i++){
for( j=0;j<columns;j++)
printf("%d ",aux[i*columns+j]);
printf("\n" );
}
exit(0);
#endif
hipLaunchKernelGGL(( reduce_heat3), dim3(gridSize),dim3(blockSize), 0, 0, gpuSurface,gpuAux,rows,columns);
#ifdef DEBUG
/* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */
hipMemcpy(teams,gpuTeam,sizeof(Team)*num_teams,hipMemcpyDeviceToHost);
hipMemcpy(surface,gpuSurface,sizeof(float)*rows*columns,hipMemcpyDeviceToHost);
print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual );
#endif // DEBUG
if( gpuNum_deactivated[0] == num_focal && global_residual < THRESHOLD ) flag_stability = 1;
}
hipMemcpy(surface,gpuSurface,sizeof(float)*rows*columns,hipMemcpyDeviceToHost);
//hipMemcpy(focal,gpuFocal,sizeof(FocalPoint)*num_focal,hipMemcpyDeviceToHost);
//hipFree(gpuSurface);
//hipFree(gpuSurfaceCopy);
//hipFree(gpuTeam);
//hipFree(gpuFocal);
//printf("time1 %f\n",time2);
/*
*
* STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT
*
*/
/* 5. Stop global time */
hipDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 6. Output for leaderboard */
printf("\n");
/* 6.1. Total computation time */
printf("Time: %lf\n", ttotal );
/* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */
printf("Result: %d", iter);
/*
for (i=0; i<num_teams; i++)
printf(" %d %d", teams[i].x, teams[i].y );
*/
for (i=0; i<num_focal; i++)
printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) );
printf("\n");
/* 7. Free resources */
free( teams );
free( focal );
free( surface );
free( surfaceCopy );
/* 8. End */
return 0;
}
| ac9235a9aa5e6864f42099e0a5dea063d39db297.cu | // User: [email protected]
// ExecutionRequest[P:'erCho.cu',P:1,T:1,args:'',q:'cudalb']
// May 16 2019 18:03:51
#include "cputils.h" // Added by tablon
/*30 30 100 2 9 18 2 29 26 3 2 3 6 4 800 25 20 2 900
* Simplified simulation of fire extinguishing
*
* Computacion Paralela, Grado en Informatica (Universidad de Valladolid)
* 2018/2019
*
* v1.4
*
* (c) 2019 Arturo Gonzalez Escribano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <cputils.h>
#define RADIUS_TYPE_1 3
#define RADIUS_TYPE_2_3 9
#define THRESHOLD 0.1f
/* Structure to store data of an extinguishing team */
typedef struct {
int x,y;
int type;
int target;
} Team;
/* Structure to store data of a fire focal point */
typedef struct {
int x,y;
int start;
int heat;
int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team
} FocalPoint;
/* Macro function to simplify accessing with two coordinates to a flattened array */
#define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ]
__global__ void init(float *surface,int rows,int columns){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
if (idX >= rows || idY>= columns) return;
surface[idX*columns+idY]=0;
}
__global__ void initInt(int *surface, int rows, int columns){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
if (idX >= rows || idY>= columns) return;
surface[idX*columns+idY]=0;
}
__global__ void get_first_activation(FocalPoint *focal, int num_focal,int *salida){
__shared__ int first_activation;
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id>=num_focal) return;
first_activation=0;
atomicMin(&first_activation,focal[id].start);
__syncthreads();
if(id==0)
salida[0]=first_activation;
}
__global__ void activate_focal(FocalPoint *focal,int num_focal,int *salida,int iter){
__shared__ int num_deactivated;
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id>=num_focal) return;
num_deactivated=0;
//printf("iter hilo %d num_ %d\n",iter,num_deactivated );
if ( focal[id].active == 2 ) {
atomicAdd(&num_deactivated,1);
}
if ( focal[id].start == iter ) {
focal[id].active = 1;
}
__syncthreads();
if(id==0)
salida[0]=num_deactivated;
// Count focal points already deactivated by a team
}
__global__ void update_heat(float *surface,FocalPoint *focal, int columns , int num_focal){
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id>=num_focal || focal[id].active!=1) return;
surface[focal[id].x*columns+focal[id].y]=focal[id].heat;
}
__global__ void copy_surface(float *surface, float *surfaceCopy,int rows,int columns){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
if (idX >= rows-1 || idX==0 || idY>= columns-1 || idY==0) return;
surfaceCopy[idX*columns+idY]=surface[idX*columns+idY];
}
__global__ void update_surface(float *surface, float *surfaceCopy,int rows, int columns){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
//printf("hola\n" );
if (idX >= rows-1 || idX==0 || idY>= columns-1 || idY==0) return;
surface[idX*columns+idY]=(
surfaceCopy[(idX-1)*columns+idY]+
surfaceCopy[(idX+1)*columns+idY]+
surfaceCopy[idX*columns+idY-1]+
surfaceCopy[idX*columns+idY+1])/4;
//printf("%f",surface[idX*columns+idY]);
/*int i, j;
for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( surface, i, j ) = (
accessMat( surfaceCopy, i-1, j ) +
accessMat( surfaceCopy, i+1, j ) +
accessMat( surfaceCopy, i, j-1 ) +
accessMat( surfaceCopy, i, j+1 ) ) / 4;*/
}
__global__ void compute_residual(float *surface, float *surfaceCopy,int rows,int columns,float *residuals){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
//printf("hola\n" );
//if (idX >= rows-1 || idX==0 || idY>= columns-1 || idY==0) return;
if(idX>=rows || idY>=columns) return;
residuals[idX*columns+idY]=surface[idX*columns+idY]-surfaceCopy[idX*columns+idY];
}
__global__ void move_teams(Team *teams,FocalPoint *focal, int num_teams,int num_focal){
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id>=num_teams) return;
unsigned int j;
int distance = INT_MAX;
int target = -1;
int teamX = teams[id].x;
int teamY = teams[id].y;
#pragma unroll
for( j=0; j<num_focal; j++ ) {
if ( focal[j].active != 1 ) continue; // Skip non-active focal points
int local_distance = (focal[j].x - teamX)*(focal[j].x - teamX) + (focal[j].y - teamY)*(focal[j].y - teamY) ;
if ( local_distance < distance ) {
distance = local_distance;
target = j;
}
}
/* 4.3.2. Annotate target for the next stage */
teams[id].target = target;
/* 4.3.3. No active focal point to choose, no movement */
if ( target == -1 ) return;
//__syncthreads();
/* 4.3.4. Move in the focal point direction */
int focalX = focal[target].x;
int focalY = focal[target].y;
if ( teams[id].type == 1 ) {
// Type 1: Can move in diagonal
if ( focalX < teams[id].x ) teams[id].x--;
if ( focalX > teams[id].x ) teams[id].x++;
if ( focalY < teams[id].y ) teams[id].y--;
if ( focalY > teams[id].y) teams[id].y++;
}
else if ( teams[id].type == 2 ) {
// Type 2: First in horizontal direction, then in vertical direction
if ( focalY < teamY ) teams[id].y--;
else if ( focalY > teamY ) teams[id].y++;
else if ( focalX < teamX ) teams[id].x--;
else if ( focalX > teamX ) teams[id].x++;
}
else {
// Type 3: First in vertical direction, then in horizontal direction
if ( focalX < teamX ) teams[id].x--;
else if ( focalX > teamX ) teams[id].x++;
else if ( focalY < teamY ) teams[id].y--;
else if ( focalY > teamY ) teams[id].y++;
}
//printf("x %d y %d id %d\n", teams[id].x,teams[id].y,id);
if ( target != -1 && focalX == teams[id].x && focalY == teams[id].y
&& focal[target].active == 1 ){
focal[target].active = 2;
//printf("id %d\n",id);
}
}
__global__ void compute_heat_reduction(Team *teams,int *gpuAux,int num_teams,int rows,int columns){
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id>=num_teams) return;
//int radius;
// Influence area of fixed radius depending on type
//if ( teams[id].type == 1 ) radius = 3;
//else radius = 9;
int teamX=teams[id].x;
int teamY=teams[id].y;
//#pragma unroll
//for( i=teams[id].x-radius; i<=teams[id].x+radius; i++ ) {
//#pragma unroll
//for( j=teams[id].y-radius; j<=teams[id].y+radius; j++ ) {
if (teams[id].type!=1){
if ( (teamX-9)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-9)*columns+teamY],1);
if ( (teamX-8)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY-4],1);
if ( (teamX-8)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY-3],1);
if ( (teamX-8)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY-2],1);
if ( (teamX-8)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY-1],1);
if ( (teamX-8)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY],1);
if ( (teamX-8)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY+1],1);
if ( (teamX-8)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY+2],1);
if ( (teamX-8)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY+3],1);
if ( (teamX-8)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-8)*columns+teamY+4],1);
if ( (teamX-7)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY-5],1);
if ( (teamX-7)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY-4],1);
if ( (teamX-7)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY-3],1);
if ( (teamX-7)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY-2],1);
if ( (teamX-7)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY-1],1);
if ( (teamX-7)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY],1);
if ( (teamX-7)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY+1],1);
if ( (teamX-7)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY+2],1);
if ( (teamX-7)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY+3],1);
if ( (teamX-7)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY+4],1);
if ( (teamX-7)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-7)*columns+teamY+5],1);
if ( (teamX-6)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-6],1);
if ( (teamX-6)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-5],1);
if ( (teamX-6)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-4],1);
if ( (teamX-6)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-3],1);
if ( (teamX-6)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-2],1);
if ( (teamX-6)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY-1],1);
if ( (teamX-6)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY],1);
if ( (teamX-6)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+1],1);
if ( (teamX-6)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+2],1);
if ( (teamX-6)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+3],1);
if ( (teamX-6)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+4],1);
if ( (teamX-6)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+5],1);
if ( (teamX-6)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-6)*columns+teamY+6],1);
if ( (teamX-5)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-7],1);
if ( (teamX-5)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-6],1);
if ( (teamX-5)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-5],1);
if ( (teamX-5)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-4],1);
if ( (teamX-5)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-3],1);
if ( (teamX-5)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-2],1);
if ( (teamX-5)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY-1],1);
if ( (teamX-5)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY],1);
if ( (teamX-5)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+1],1);
if ( (teamX-5)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+2],1);
if ( (teamX-5)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+3],1);
if ( (teamX-5)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+4],1);
if ( (teamX-5)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+5],1);
if ( (teamX-5)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+6],1);
if ( (teamX-5)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX-5)*columns+teamY+7],1);
if ( (teamX-4)>0 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-8],1);
if ( (teamX-4)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-7],1);
if ( (teamX-4)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-6],1);
if ( (teamX-4)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-5],1);
if ( (teamX-4)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-4],1);
if ( (teamX-4)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-3],1);
if ( (teamX-4)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-2],1);
if ( (teamX-4)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY-1],1);
if ( (teamX-4)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY],1);
if ( (teamX-4)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+1],1);
if ( (teamX-4)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+2],1);
if ( (teamX-4)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+3],1);
if ( (teamX-4)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+4],1);
if ( (teamX-4)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+5],1);
if ( (teamX-4)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+6],1);
if ( (teamX-4)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+7],1);
if ( (teamX-4)>0 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX-4)*columns+teamY+8],1);
if ( (teamX-3)>0 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-8],1);
if ( (teamX-3)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-7],1);
if ( (teamX-3)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-6],1);
if ( (teamX-3)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-5],1);
if ( (teamX-3)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-4],1);
if ( (teamX-3)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-3],1);
if ( (teamX-3)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-2],1);
if ( (teamX-3)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY-1],1);
if ( (teamX-3)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY],1);
if ( (teamX-3)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+1],1);
if ( (teamX-3)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+2],1);
if ( (teamX-3)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+3],1);
if ( (teamX-3)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+4],1);
if ( (teamX-3)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+5],1);
if ( (teamX-3)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+6],1);
if ( (teamX-3)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+7],1);
if ( (teamX-3)>0 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY+8],1);
if ( (teamX-2)>0 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-8],1);
if ( (teamX-2)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-7],1);
if ( (teamX-2)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-6],1);
if ( (teamX-2)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-5],1);
if ( (teamX-2)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-4],1);
if ( (teamX-2)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-3],1);
if ( (teamX-2)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-2],1);
if ( (teamX-2)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-1],1);
if ( (teamX-2)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY],1);
if ( (teamX-2)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+1],1);
if ( (teamX-2)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+2],1);
if ( (teamX-2)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+3],1);
if ( (teamX-2)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+4],1);
if ( (teamX-2)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+5],1);
if ( (teamX-2)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+6],1);
if ( (teamX-2)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+7],1);
if ( (teamX-2)>0 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+8],1);
if ( (teamX-1)>0 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-8],1);
if ( (teamX-1)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-7],1);
if ( (teamX-1)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-6],1);
if ( (teamX-1)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-5],1);
if ( (teamX-1)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-4],1);
if ( (teamX-1)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-3],1);
if ( (teamX-1)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-2],1);
if ( (teamX-1)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-1],1);
if ( (teamX-1)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY],1);
if ( (teamX-1)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+1],1);
if ( (teamX-1)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+2],1);
if ( (teamX-1)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+3],1);
if ( (teamX-1)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+4],1);
if ( (teamX-1)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+5],1);
if ( (teamX-1)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+6],1);
if ( (teamX-1)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+7],1);
if ( (teamX-1)>0 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+8],1);
if ( (teamX)>0 && (teamY-9)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-9],1);
if ( (teamX)>0 && (teamY-8)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-8],1);
if ( (teamX)>0 && (teamY-7)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-7],1);
if ( (teamX)>0 && (teamY-6)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-6],1);
if ( (teamX)>0 && (teamY-5)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-5],1);
if ( (teamX)>0 && (teamY-4)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-4],1);
if ( (teamX)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-3],1);
if ( (teamX)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-2],1);
if ( (teamX)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-1],1);
if ( (teamX)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY],1);
if ( (teamX)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+1],1);
if ( (teamX)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+2],1);
if ( (teamX)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+3],1);
if ( (teamX)>0 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+4],1);
if ( (teamX)>0 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+5],1);
if ( (teamX)>0 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+6],1);
if ( (teamX)>0 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+7],1);
if ( (teamX)>0 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+8],1);
if ( (teamX)>0 && (teamY+9)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+9],1);
if ( (teamX+1)<rows-1 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-8],1);
if ( (teamX+1)<rows-1 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-7],1);
if ( (teamX+1)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-6],1);
if ( (teamX+1)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-5],1);
if ( (teamX+1)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-4],1);
if ( (teamX+1)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-3],1);
if ( (teamX+1)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-2],1);
if ( (teamX+1)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-1],1);
if ( (teamX+1)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY],1);
if ( (teamX+1)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+1],1);
if ( (teamX+1)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+2],1);
if ( (teamX+1)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+3],1);
if ( (teamX+1)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+4],1);
if ( (teamX+1)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+5],1);
if ( (teamX+1)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+6],1);
if ( (teamX+1)<rows-1 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+7],1);
if ( (teamX+1)<rows-1 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+8],1);
if ( (teamX+2)<rows-1 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-8],1);
if ( (teamX+2)<rows-1 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-7],1);
if ( (teamX+2)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-6],1);
if ( (teamX+2)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-5],1);
if ( (teamX+2)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-4],1);
if ( (teamX+2)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-3],1);
if ( (teamX+2)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-2],1);
if ( (teamX+2)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-1],1);
if ( (teamX+2)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY],1);
if ( (teamX+2)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+1],1);
if ( (teamX+2)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+2],1);
if ( (teamX+2)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+3],1);
if ( (teamX+2)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+4],1);
if ( (teamX+2)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+5],1);
if ( (teamX+2)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+6],1);
if ( (teamX+2)<rows-1 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+7],1);
if ( (teamX+2)<rows-1 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+8],1);
if ( (teamX+3)<rows-1 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-8],1);
if ( (teamX+3)<rows-1 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-7],1);
if ( (teamX+3)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-6],1);
if ( (teamX+3)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-5],1);
if ( (teamX+3)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-4],1);
if ( (teamX+3)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-3],1);
if ( (teamX+3)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-2],1);
if ( (teamX+3)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY-1],1);
if ( (teamX+3)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY],1);
if ( (teamX+3)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+1],1);
if ( (teamX+3)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+2],1);
if ( (teamX+3)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+3],1);
if ( (teamX+3)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+4],1);
if ( (teamX+3)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+5],1);
if ( (teamX+3)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+6],1);
if ( (teamX+3)<rows-1 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+7],1);
if ( (teamX+3)<rows-1 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY+8],1);
if ( (teamX+4)<rows-1 && (teamY-8)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-8],1);
if ( (teamX+4)<rows-1 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-7],1);
if ( (teamX+4)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-6],1);
if ( (teamX+4)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-5],1);
if ( (teamX+4)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-4],1);
if ( (teamX+4)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-3],1);
if ( (teamX+4)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-2],1);
if ( (teamX+4)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY-1],1);
if ( (teamX+4)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY],1);
if ( (teamX+4)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+1],1);
if ( (teamX+4)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+2],1);
if ( (teamX+4)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+3],1);
if ( (teamX+4)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+4],1);
if ( (teamX+4)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+5],1);
if ( (teamX+4)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+6],1);
if ( (teamX+4)<rows-1 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+7],1);
if ( (teamX+4)<rows-1 && (teamY+8)<columns-1 )
atomicAdd(&gpuAux[(teamX+4)*columns+teamY+8],1);
if ( (teamX+5)<rows-1 && (teamY-7)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-7],1);
if ( (teamX+5)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-6],1);
if ( (teamX+5)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-5],1);
if ( (teamX+5)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-4],1);
if ( (teamX+5)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-3],1);
if ( (teamX+5)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-2],1);
if ( (teamX+5)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY-1],1);
if ( (teamX+5)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY],1);
if ( (teamX+5)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+1],1);
if ( (teamX+5)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+2],1);
if ( (teamX+5)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+3],1);
if ( (teamX+5)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+4],1);
if ( (teamX+5)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+5],1);
if ( (teamX+5)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+6],1);
if ( (teamX+5)<rows-1 && (teamY+7)<columns-1 )
atomicAdd(&gpuAux[(teamX+5)*columns+teamY+7],1);
if ( (teamX+6)<rows-1 && (teamY-6)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-6],1);
if ( (teamX+6)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-5],1);
if ( (teamX+6)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-4],1);
if ( (teamX+6)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-3],1);
if ( (teamX+6)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-2],1);
if ( (teamX+6)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY-1],1);
if ( (teamX+6)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY],1);
if ( (teamX+6)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+1],1);
if ( (teamX+6)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+2],1);
if ( (teamX+6)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+3],1);
if ( (teamX+6)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+4],1);
if ( (teamX+6)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+5],1);
if ( (teamX+6)<rows-1 && (teamY+6)<columns-1 )
atomicAdd(&gpuAux[(teamX+6)*columns+teamY+6],1);
if ( (teamX+7)<rows-1 && (teamY-5)>0 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY-5],1);
if ( (teamX+7)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY-4],1);
if ( (teamX+7)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY-3],1);
if ( (teamX+7)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY-2],1);
if ( (teamX+7)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY-1],1);
if ( (teamX+7)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY],1);
if ( (teamX+7)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY+1],1);
if ( (teamX+7)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY+2],1);
if ( (teamX+7)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY+3],1);
if ( (teamX+7)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY+4],1);
if ( (teamX+7)<rows-1 && (teamY+5)<columns-1 )
atomicAdd(&gpuAux[(teamX+7)*columns+teamY+5],1);
if ( (teamX+8)<rows-1 && (teamY-4)>0 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY-4],1);
if ( (teamX+8)<rows-1 && (teamY-3)>0 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY-3],1);
if ( (teamX+8)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY-2],1);
if ( (teamX+8)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY-1],1);
if ( (teamX+8)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY],1);
if ( (teamX+8)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY+1],1);
if ( (teamX+8)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY+2],1);
if ( (teamX+8)<rows-1 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY+3],1);
if ( (teamX+8)<rows-1 && (teamY+4)<columns-1 )
atomicAdd(&gpuAux[(teamX+8)*columns+teamY+4],1);
if ( (teamX+9)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+9)*columns+teamY],1);
}
else{
if ( (teamX-3)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-3)*columns+teamY],1);
if ( (teamX-2)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-2],1);
if ( (teamX-2)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY-1],1);
if ( (teamX-2)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY],1);
if ( (teamX-2)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+1],1);
if ( (teamX-2)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-2)*columns+teamY+2],1);
if ( (teamX-1)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-2],1);
if ( (teamX-1)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY-1],1);
if ( (teamX-1)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY],1);
if ( (teamX-1)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+1],1);
if ( (teamX-1)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX-1)*columns+teamY+2],1);
if ( (teamX)>0 && (teamY-3)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-3],1);
if ( (teamX)>0 && (teamY-2)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-2],1);
if ( (teamX)>0 && (teamY-1)>0 )
atomicAdd(&gpuAux[teamX*columns+teamY-1],1);
if ( (teamX)>0 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY],1);
if ( (teamX)>0 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+1],1);
if ( (teamX)>0 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+2],1);
if ( (teamX)>0 && (teamY+3)<columns-1 )
atomicAdd(&gpuAux[teamX*columns+teamY+3],1);
if ( (teamX+1)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-2],1);
if ( (teamX+1)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY-1],1);
if ( (teamX+1)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY],1);
if ( (teamX+1)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+1],1);
if ( (teamX+1)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+1)*columns+teamY+2],1);
if ( (teamX+2)<rows-1 && (teamY-2)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-2],1);
if ( (teamX+2)<rows-1 && (teamY-1)>0 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY-1],1);
if ( (teamX+2)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY],1);
if ( (teamX+2)<rows-1 && (teamY+1)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+1],1);
if ( (teamX+2)<rows-1 && (teamY+2)<columns-1 )
atomicAdd(&gpuAux[(teamX+2)*columns+teamY+2],1);
if ( (teamX+3)<rows-1 && (teamY)>0 && teamY<columns-1 )
atomicAdd(&gpuAux[(teamX+3)*columns+teamY],1);
}
}
__global__ void reduce_heat3(float *surface, int *aux,int rows,int columns){
int idX=threadIdx.y+blockDim.y*blockIdx.y;
int idY=threadIdx.x+blockDim.x*blockIdx.x;
//printf("hola\n" );
if (idX >= rows-1 || idX==0 || idY>= columns-1 || idY==0) return;
#pragma unroll
for(unsigned int i=aux[idX*columns+idY];i>0;i--)
surface[idX*columns+idY]*=0.75;
aux[idX*columns+idY]=0;
}
__global__ void reduce_kernel(const float* g_idata, float* g_odata, int size)
{
// Memoria shared
extern __shared__ float tmp[];
// Desactivar hilos que excedan los límites del array de entrada
int gid = threadIdx.x+blockDim.x*blockIdx.x;
if ( gid >= size ) return;
// Cargar dato en memoria shared
int tid = threadIdx.x;
tmp[ tid ] = g_idata[ gid ];
//printf("entrada %f glob red %f\n",g_idata[gid],tmp[tid]);
// Asegurarse que todos los warps del bloque han cargado los datos
__syncthreads();
// Generalización: El único bloque del último nivel puede tener menos datos para reducir
int mysize = blockDim.x;
if ( gridDim.x==1 )
mysize = size;
// Hacemos la reducción en memoria shared
#pragma unroll
for(unsigned int s = mysize/2; s >0; s /= 2) {
// Comprobamos si el hilo actual es activo para esta iteración
if (tid<s) {
// Hacemos la reducción sumando los dos elementos que le tocan a este hilo
if(tmp[tid+s]>tmp[tid])
tmp[tid] =tmp[tid+s];
}
__syncthreads();
}
// El hilo 0 de cada bloque escribe el resultado final de la reducción
// en la memoria global del dispositivo pasada por parámetro (g_odata[])
if (tid == 0){
g_odata[blockIdx.x] = tmp[tid];
}
}
/*
* Function: Print usage line in stderr
*/
void show_usage( char *program_name ) {
fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name );
fprintf(stderr,"\t<config_file> ::= -f <file_name>\n");
fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n");
fprintf(stderr,"\n");
}
#ifdef DEBUG
/*
* Function: Print the current state of the simulation
*/
void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) {
/*
* You don't need to optimize this function, it is only for pretty printing and debugging purposes.
* It is not compiled in the production versions of the program.
* Thus, it is never used when measuring times in the leaderboard
*/
int i,j;
printf("Iteration: %d\n", iteration );
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
for( i=0; i<rows; i++ ) {
printf("|");
for( j=0; j<columns; j++ ) {
char symbol;
if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*';
else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100);
else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+';
else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.';
else symbol = '0';
int t;
int flag_team = 0;
for( t=0; t<num_teams; t++ )
if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; }
if ( flag_team ) printf("[%c]", symbol );
else {
int f;
int flag_focal = 0;
for( f=0; f<num_focal; f++ )
if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; }
if ( flag_focal ) printf("(%c)", symbol );
else printf(" %c ", symbol );
}
}
printf("|\n");
}
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
printf("Global residual: %f\n\n", global_residual);
}
#endif
/*
* MAIN PROGRAM
*/
int main(int argc, char *argv[]) {
int i,j,t;
// Simulation data
int rows, columns, max_iter;
float *surface, *surfaceCopy;
int num_teams, num_focal;
Team *teams;
FocalPoint *focal;
/* 1. Read simulation arguments */
/* 1.1. Check minimum number of arguments */
if (argc<2) {
fprintf(stderr,"-- Error in arguments: No arguments\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
int read_from_file = ! strcmp( argv[1], "-f" );
/* 1.2. Read configuration from file */
if ( read_from_file ) {
/* 1.2.1. Open file */
if (argc<3) {
fprintf(stderr,"-- Error in arguments: file-name argument missing\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
FILE *args = cp_abrir_fichero( argv[2] );
if ( args == NULL ) {
fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
/* 1.2.2. Read surface and maximum number of iterations */
int ok;
ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
if ( surface == NULL || surfaceCopy == NULL ) {
fprintf(stderr,"-- Error allocating: surface structures\n");
exit( EXIT_FAILURE );
}
/* 1.2.3. Teams information */
ok = fscanf(args, "%d", &num_teams );
if ( ok != 1 ) {
fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
}
/* 1.2.4. Focal points information */
ok = fscanf(args, "%d", &num_focal );
if ( ok != 1 ) {
fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( focal == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat);
if ( ok != 4 ) {
fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
focal[i].active = 0;
}
}
/* 1.3. Read configuration from arguments */
else {
/* 1.3.1. Check minimum number of arguments */
if (argc<6) {
fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
/* 1.3.2. Surface and maximum number of iterations */
rows = atoi( argv[1] );
columns = atoi( argv[2] );
max_iter = atoi( argv[3] );
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
/* 1.3.3. Teams information */
num_teams = atoi( argv[4] );
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
if ( argc < num_teams*3 + 5 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
teams[i].x = atoi( argv[5+i*3] );
teams[i].y = atoi( argv[6+i*3] );
teams[i].type = atoi( argv[7+i*3] );
}
/* 1.3.4. Focal points information */
int focal_args = 5 + i*3;
if ( argc < focal_args+1 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
num_focal = atoi( argv[focal_args] );
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
if ( argc < focal_args + 1 + num_focal*4 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
focal[i].x = atoi( argv[focal_args+i*4+1] );
focal[i].y = atoi( argv[focal_args+i*4+2] );
focal[i].start = atoi( argv[focal_args+i*4+3] );
focal[i].heat = atoi( argv[focal_args+i*4+4] );
focal[i].active = 0;
}
/* 1.3.5. Sanity check: No extra arguments at the end of line */
if ( argc > focal_args+i*4+1 ) {
fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
}
#ifdef DEBUG
/* 1.4. Print arguments */
printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter);
printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal );
for( i=0; i<num_teams; i++ ) {
printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type );
}
for( i=0; i<num_focal; i++ ) {
printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i,
focal[i].x,
focal[i].y,
focal[i].start,
focal[i].heat );
}
#endif // DEBUG
/* 2. Select GPU and start global timer */
cudaSetDevice(0);
cudaDeviceSynchronize();
double ttotal = cp_Wtime();
/*
*
* START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT
*
*/
float *gpuSurface, *gpuSurfaceCopy, *gpuResiduals;
int *gpuAux;
FocalPoint *gpuFocal;
Team *gpuTeam;
// double time1,time2;
int nearestUpperPow2 = pow(2,ceil(log2((double) rows*columns)));
cudaMalloc((void **)&gpuSurface,sizeof(float)*rows*columns);
cudaMalloc((void **)&gpuAux,sizeof(int)*rows*columns);
cudaMalloc((void **) &gpuSurfaceCopy,sizeof(float)*rows*columns);
cudaMalloc((void **) &gpuResiduals,sizeof(float)*nearestUpperPow2);
cudaMalloc((void **) &gpuTeam,sizeof(Team)*num_teams);
cudaMemcpy(gpuTeam,teams,sizeof(Team)*num_teams,cudaMemcpyHostToDevice);
cudaMalloc((void **) &gpuFocal,sizeof(FocalPoint)*num_focal);
cudaMemcpy(gpuFocal,focal,sizeof(FocalPoint)*num_focal,cudaMemcpyHostToDevice);
int tamBlockX= 128;
int tamBlockY= 1;
int tamGridX, tamGridY;
int tamBlockTeams=224;
int tamGridTeams;
int tamBlockFocal=224;
int tamGridFocal;
tamGridTeams= num_teams/tamBlockTeams;
if (num_teams%tamBlockTeams!=0) tamGridTeams++;
tamGridFocal= num_focal/tamBlockFocal;
if (num_focal%tamBlockFocal!=0) tamGridFocal++;
tamGridX= columns/tamBlockX;
if (columns%tamBlockX!=0) tamGridX++;
tamGridY= rows/tamBlockY;
if (rows%tamBlockY!=0) tamGridY++;
dim3 blockSize(tamBlockX,tamBlockY);
dim3 gridSize(tamGridX,tamGridY);
#ifdef DEBUG
printf("tamGx %d tamGy %d\n",tamGridX,tamGridY);
#endif
init<<<blockSize,gridSize>>>(gpuSurface,rows,columns);
//CUDA_CHECK();
init<<<blockSize,gridSize>>>(gpuSurfaceCopy,rows,columns);
//CUDA_CHECK();
/* 3. Initialize surface */
/*for( i=0; i<rows; i++ )
for( j=0; j<columns; j++ )
accessMat( surface, i, j ) = 0.0;
/* 4. Simulation */
int *gpuNum_deactivated;
//gpuNum_deactivated[0]=0;
cudaMallocHost((void**) &gpuNum_deactivated,sizeof(int));
int iter;
int flag_stability = 0;
//int first_activation = 0;
//int *gpuFirstActivation;
//cudaMallocHost((void**) &gpuFirstActivation,sizeof(int));
//check_first_activation<<<tamGridFocal,tamBlockFocal>>>(gpuFocal,num_focal); hace falta reduccion
//get_first_activation<<<tamGridFocal,tamBlockFocal>>>(gpuFocal,num_focal,gpuFirstActivation);
#pragma unroll
for( iter=0; iter<max_iter && ! flag_stability; iter++ ) {
//printf("iter %d\n",iter);
/* 4.1. Activate focal points */
//printf("num %d\n",gpuNum_deactivated[0] );
//cudaMemcpy(gpuNum_deactivated,&num_deactivated,sizeof(int),cudaMemcpyHostToDevice);
//printf("num %d\n",num_deactivated);
if(gpuNum_deactivated[0]<num_focal){
activate_focal<<<tamGridFocal,tamBlockFocal>>>(gpuFocal,num_focal,gpuNum_deactivated,iter);
cudaDeviceSynchronize();
//cudaMemcpyAsync(&num_deactivated,gpuNum_deactivated,sizeof(int),cudaMemcpyDeviceToHost,0);
}
//printf("num %d",num_deactivated);
//if(!first_activation) continue;
/* 4.2. Propagate heat (10 steps per each team movement) */
float global_residual;
int step;
//cudaMemcpy(surfaceCopy,gpuSurfaceCopy,sizeof(float)*rows*columns,cudaMemcpyDeviceToHost);
#pragma unroll
for( step=0; step<10; step++ ) {
/* 4.2.1. Update heat on active focal points */
//if(gpuNum_deactivated[0]<num_focal)
update_heat<<<tamGridFocal,tamBlockFocal>>>(gpuSurface,gpuFocal,columns,num_focal);
//CUDA_CHECK();
//accessMat( surface, x, y ) = focal[i].heat;
/* 4.2.2. Copy values of the surface in ancillary structure (Skip borders) */
//copy_surface<<<gridSize,blockSize>>>(gpuSurface,gpuSurfaceCopy,rows,columns);
// error=cudaGetLastError();
// if(error!= cudaSuccess)
// printf("%s\n",cudaGetErrorString(error));
float *aux=gpuSurface;
gpuSurface=gpuSurfaceCopy;
gpuSurfaceCopy=aux;
//CUDA_CHECK();
/*for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( surfaceCopy, i, j ) = accessMat( surface, i, j );
/* 4.2.3. Update surface values (skip borders) */
update_surface<<<gridSize,blockSize>>>(gpuSurface,gpuSurfaceCopy,rows,columns);
//CUDA_CHECK();
/*for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( surface, i, j ) = (
accessMat( surfaceCopy, i-1, j ) +
accessMat( surfaceCopy, i+1, j ) +
accessMat( surfaceCopy, i, j-1 ) +
accessMat( surfaceCopy, i, j+1 ) ) / 4;
/* 4.2.4. Compute the maximum residual difference (absolute value) */
if(step==0 && gpuNum_deactivated[0]==num_focal){
//time1=cp_Wtime();
//init<<<blockSize,gridSize>>>(gpuResiduals,rows,columns);
compute_residual<<<gridSize,blockSize>>>(gpuSurface,gpuSurfaceCopy,rows,columns,gpuResiduals);
//int numValues = nearestUpperPow2;
int redSize = nearestUpperPow2;
int blockSizeR = (1024);
int sharedMemorySize = blockSizeR * sizeof(float);
while ( redSize > 1 )
{
int baseNumBlocks = redSize/blockSizeR;
int additionalBlock;
if(redSize%blockSizeR==0)
additionalBlock = 0;
else
additionalBlock = 1;
int numBlocks = baseNumBlocks + additionalBlock;
//printf("numB %d size %d\n",numBlocks,redSize);
//if(numBlocks==1) exit(0);
reduce_kernel<<< numBlocks, blockSizeR, sharedMemorySize >>>(gpuResiduals, gpuResiduals, redSize);
redSize = numBlocks;
}
cudaMemcpyAsync(&global_residual, gpuResiduals, sizeof(float), cudaMemcpyDeviceToHost,0);
//printf("glob %f\n",global_residual);
// printf("reesiduo %f\n",global_residual);
//time2+=cp_Wtime()-time1;
}
}
/* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */
/* 4.3. Move teams */
if(gpuNum_deactivated[0]<num_focal){
move_teams<<<tamGridTeams,tamBlockTeams>>>(gpuTeam,gpuFocal,num_teams,num_focal);
}
/* 4.4. Team actions */
//cudaMemcpy(surface,gpuSurface,sizeof(float)*rows*columns,cudaMemcpyDeviceToHost);
//initInt<<<gridSize,blockSize>>>()
compute_heat_reduction<<<tamGridTeams,tamBlockTeams>>>(gpuTeam,gpuAux,num_teams,rows,columns);
#ifdef UNROLL
int *aux;
aux = (int *)malloc( sizeof(int) * (size_t)rows * (size_t)columns );
cudaMemcpy(aux,gpuAux,sizeof(int)*rows*columns,cudaMemcpyDeviceToHost);
for( i=0;i<rows;i++){
for( j=0;j<columns;j++)
printf("%d ",aux[i*columns+j]);
printf("\n" );
}
exit(0);
#endif
reduce_heat3<<<gridSize,blockSize>>>(gpuSurface,gpuAux,rows,columns);
#ifdef DEBUG
/* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */
cudaMemcpy(teams,gpuTeam,sizeof(Team)*num_teams,cudaMemcpyDeviceToHost);
cudaMemcpy(surface,gpuSurface,sizeof(float)*rows*columns,cudaMemcpyDeviceToHost);
print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual );
#endif // DEBUG
if( gpuNum_deactivated[0] == num_focal && global_residual < THRESHOLD ) flag_stability = 1;
}
cudaMemcpy(surface,gpuSurface,sizeof(float)*rows*columns,cudaMemcpyDeviceToHost);
//cudaMemcpy(focal,gpuFocal,sizeof(FocalPoint)*num_focal,cudaMemcpyDeviceToHost);
//cudaFree(gpuSurface);
//cudaFree(gpuSurfaceCopy);
//cudaFree(gpuTeam);
//cudaFree(gpuFocal);
//printf("time1 %f\n",time2);
/*
*
* STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT
*
*/
/* 5. Stop global time */
cudaDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 6. Output for leaderboard */
printf("\n");
/* 6.1. Total computation time */
printf("Time: %lf\n", ttotal );
/* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */
printf("Result: %d", iter);
/*
for (i=0; i<num_teams; i++)
printf(" %d %d", teams[i].x, teams[i].y );
*/
for (i=0; i<num_focal; i++)
printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) );
printf("\n");
/* 7. Free resources */
free( teams );
free( focal );
free( surface );
free( surfaceCopy );
/* 8. End */
return 0;
}
|
18ea8517478e41a7ecb1e563dbafc62bf88bba67.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/cudf_utils.h"
#include "utilities/error_utils.hpp"
#include "utilities/type_dispatcher.hpp"
#include "utilities/wrapper_types.hpp"
#include <cub/device/device_segmented_radix_sort.cuh>
struct SegmentedRadixSortPlan{
const gdf_size_type num_items;
// temporary storage
void *storage;
size_t storage_bytes;
void *back_key, *back_val;
size_t back_key_size, back_val_size;
hipStream_t stream;
int descending;
unsigned begin_bit, end_bit;
SegmentedRadixSortPlan(size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
: num_items(num_items),
storage(nullptr), storage_bytes(0),
back_key(nullptr), back_val(nullptr),
back_key_size(0), back_val_size(0),
stream(0), descending(descending),
begin_bit(begin_bit), end_bit(end_bit)
{}
gdf_error setup(size_t sizeof_key, size_t sizeof_val) {
back_key_size = num_items * sizeof_key;
back_val_size = num_items * sizeof_val;
RMM_TRY( RMM_ALLOC(&back_key, back_key_size, stream) ); // TODO: non-default stream
RMM_TRY( RMM_ALLOC(&back_val, back_val_size, stream) );
return GDF_SUCCESS;
}
gdf_error teardown() {
RMM_TRY(RMM_FREE(back_key, stream));
RMM_TRY(RMM_FREE(back_val, stream));
RMM_TRY(RMM_FREE(storage, stream));
return GDF_SUCCESS;
}
};
template <typename Tk, typename Tv>
struct SegmentedRadixSort {
static
gdf_error sort( SegmentedRadixSortPlan *plan,
Tk *d_key_buf, Tv *d_value_buf,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets) {
unsigned num_items = plan->num_items;
Tk *d_key_alt_buf = (Tk*)plan->back_key;
Tv *d_value_alt_buf = (Tv*)plan->back_val;
hipStream_t stream = plan->stream;
int descending = plan->descending;
unsigned begin_bit = plan->begin_bit;
unsigned end_bit = plan->end_bit;
cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf);
typedef hipcub::DeviceSegmentedRadixSort Sorter;
if (d_value_buf) {
// Sort KeyValue pairs
cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf);
if (descending) {
Sorter::SortPairsDescending(plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
} else {
Sorter::SortPairs( plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
if (plan->storage && d_value_buf != d_values.Current()){
hipMemcpyAsync(d_value_buf, d_value_alt_buf,
num_items * sizeof(Tv),
hipMemcpyDeviceToDevice,
stream);
CUDA_CHECK_LAST();
}
} else {
// Sort Keys only
if (descending) {
Sorter::SortKeysDescending( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
CUDA_CHECK_LAST()
} else {
Sorter::SortKeys( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
}
if ( plan->storage ) {
// We have operated and the result is not in front buffer
if (d_key_buf != d_keys.Current()){
hipMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk),
hipMemcpyDeviceToDevice, stream);
CUDA_CHECK_LAST();
}
} else {
// We have not operated.
// Just checking for temporary storage requirement
RMM_TRY( RMM_ALLOC(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream
CUDA_CHECK_LAST();
// Now that we have allocated, do real work.
return sort(plan, d_key_buf, d_value_buf, num_segments,
d_begin_offsets, d_end_offsets);
}
return GDF_SUCCESS;
}
};
gdf_segmented_radixsort_plan_type* cffi_wrap(SegmentedRadixSortPlan* obj){
return reinterpret_cast<gdf_segmented_radixsort_plan_type*>(obj);
}
SegmentedRadixSortPlan* cffi_unwrap(gdf_segmented_radixsort_plan_type* hdl){
return reinterpret_cast<SegmentedRadixSortPlan*>(hdl);
}
gdf_segmented_radixsort_plan_type* gdf_segmented_radixsort_plan(
size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
{
return cffi_wrap(new SegmentedRadixSortPlan(num_items, descending,
begin_bit, end_bit));
}
gdf_error gdf_segmented_radixsort_plan_setup(
gdf_segmented_radixsort_plan_type *hdl,
size_t sizeof_key, size_t sizeof_val)
{
return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val);
}
gdf_error gdf_segmented_radixsort_plan_free(gdf_segmented_radixsort_plan_type *hdl)
{
auto plan = cffi_unwrap(hdl);
gdf_error status = plan->teardown();
delete plan;
return status;
}
template <typename Tv>
struct gdf_segmented_radixsort_functor
{
template <typename Tk>
gdf_error
operator()( gdf_segmented_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets)
{
/* validity mask must be empty */
GDF_REQUIRE(!keycol->valid || !keycol->null_count, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(!valcol->valid || !valcol->null_count, GDF_VALIDITY_UNSUPPORTED);
/* size of columns must match */
GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH);
SegmentedRadixSortPlan *plan = cffi_unwrap(hdl);
/* num_items must match */
GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH);
/* back buffer size must match */
GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size,
GDF_COLUMN_SIZE_MISMATCH);
GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size,
GDF_COLUMN_SIZE_MISMATCH);
/* Do sort */
return SegmentedRadixSort<Tk, Tv>::sort(plan,
(Tk*)keycol->data, (Tv*)valcol->data,
num_segments, d_begin_offsets, d_end_offsets);
}
};
gdf_error gdf_segmented_radixsort(gdf_segmented_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets)
{
GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE);
return cudf::type_dispatcher(keycol->dtype,
gdf_segmented_radixsort_functor<int64_t>{},
hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
}
| 18ea8517478e41a7ecb1e563dbafc62bf88bba67.cu | #include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/cudf_utils.h"
#include "utilities/error_utils.hpp"
#include "utilities/type_dispatcher.hpp"
#include "utilities/wrapper_types.hpp"
#include <cub/device/device_segmented_radix_sort.cuh>
struct SegmentedRadixSortPlan{
const gdf_size_type num_items;
// temporary storage
void *storage;
size_t storage_bytes;
void *back_key, *back_val;
size_t back_key_size, back_val_size;
cudaStream_t stream;
int descending;
unsigned begin_bit, end_bit;
SegmentedRadixSortPlan(size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
: num_items(num_items),
storage(nullptr), storage_bytes(0),
back_key(nullptr), back_val(nullptr),
back_key_size(0), back_val_size(0),
stream(0), descending(descending),
begin_bit(begin_bit), end_bit(end_bit)
{}
gdf_error setup(size_t sizeof_key, size_t sizeof_val) {
back_key_size = num_items * sizeof_key;
back_val_size = num_items * sizeof_val;
RMM_TRY( RMM_ALLOC(&back_key, back_key_size, stream) ); // TODO: non-default stream
RMM_TRY( RMM_ALLOC(&back_val, back_val_size, stream) );
return GDF_SUCCESS;
}
gdf_error teardown() {
RMM_TRY(RMM_FREE(back_key, stream));
RMM_TRY(RMM_FREE(back_val, stream));
RMM_TRY(RMM_FREE(storage, stream));
return GDF_SUCCESS;
}
};
template <typename Tk, typename Tv>
struct SegmentedRadixSort {
static
gdf_error sort( SegmentedRadixSortPlan *plan,
Tk *d_key_buf, Tv *d_value_buf,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets) {
unsigned num_items = plan->num_items;
Tk *d_key_alt_buf = (Tk*)plan->back_key;
Tv *d_value_alt_buf = (Tv*)plan->back_val;
cudaStream_t stream = plan->stream;
int descending = plan->descending;
unsigned begin_bit = plan->begin_bit;
unsigned end_bit = plan->end_bit;
cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf);
typedef cub::DeviceSegmentedRadixSort Sorter;
if (d_value_buf) {
// Sort KeyValue pairs
cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf);
if (descending) {
Sorter::SortPairsDescending(plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
} else {
Sorter::SortPairs( plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
if (plan->storage && d_value_buf != d_values.Current()){
cudaMemcpyAsync(d_value_buf, d_value_alt_buf,
num_items * sizeof(Tv),
cudaMemcpyDeviceToDevice,
stream);
CUDA_CHECK_LAST();
}
} else {
// Sort Keys only
if (descending) {
Sorter::SortKeysDescending( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
CUDA_CHECK_LAST()
} else {
Sorter::SortKeys( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
}
if ( plan->storage ) {
// We have operated and the result is not in front buffer
if (d_key_buf != d_keys.Current()){
cudaMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk),
cudaMemcpyDeviceToDevice, stream);
CUDA_CHECK_LAST();
}
} else {
// We have not operated.
// Just checking for temporary storage requirement
RMM_TRY( RMM_ALLOC(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream
CUDA_CHECK_LAST();
// Now that we have allocated, do real work.
return sort(plan, d_key_buf, d_value_buf, num_segments,
d_begin_offsets, d_end_offsets);
}
return GDF_SUCCESS;
}
};
gdf_segmented_radixsort_plan_type* cffi_wrap(SegmentedRadixSortPlan* obj){
return reinterpret_cast<gdf_segmented_radixsort_plan_type*>(obj);
}
SegmentedRadixSortPlan* cffi_unwrap(gdf_segmented_radixsort_plan_type* hdl){
return reinterpret_cast<SegmentedRadixSortPlan*>(hdl);
}
gdf_segmented_radixsort_plan_type* gdf_segmented_radixsort_plan(
size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
{
return cffi_wrap(new SegmentedRadixSortPlan(num_items, descending,
begin_bit, end_bit));
}
gdf_error gdf_segmented_radixsort_plan_setup(
gdf_segmented_radixsort_plan_type *hdl,
size_t sizeof_key, size_t sizeof_val)
{
return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val);
}
gdf_error gdf_segmented_radixsort_plan_free(gdf_segmented_radixsort_plan_type *hdl)
{
auto plan = cffi_unwrap(hdl);
gdf_error status = plan->teardown();
delete plan;
return status;
}
template <typename Tv>
struct gdf_segmented_radixsort_functor
{
template <typename Tk>
gdf_error
operator()( gdf_segmented_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets)
{
/* validity mask must be empty */
GDF_REQUIRE(!keycol->valid || !keycol->null_count, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(!valcol->valid || !valcol->null_count, GDF_VALIDITY_UNSUPPORTED);
/* size of columns must match */
GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH);
SegmentedRadixSortPlan *plan = cffi_unwrap(hdl);
/* num_items must match */
GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH);
/* back buffer size must match */
GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size,
GDF_COLUMN_SIZE_MISMATCH);
GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size,
GDF_COLUMN_SIZE_MISMATCH);
/* Do sort */
return SegmentedRadixSort<Tk, Tv>::sort(plan,
(Tk*)keycol->data, (Tv*)valcol->data,
num_segments, d_begin_offsets, d_end_offsets);
}
};
gdf_error gdf_segmented_radixsort(gdf_segmented_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets)
{
GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE);
return cudf::type_dispatcher(keycol->dtype,
gdf_segmented_radixsort_functor<int64_t>{},
hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
}
|
integrator_ao.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
renderbox2 - a physically based gpu renderer for research purposes
Copyright (C) - 2014 - Srinath Ravichandran
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Application specific headers.
#include <accelerators/sbvh/cudatracerkernels.h>
#include <accelerators/sbvh/cudatracer.h>
#include <core/buffer.h>
#include <core/intersection.h>
#include <core/montecarlo.h>
#include <core/util.h>
#include <integrators/integrator_ao.h>
#include <util/cudatimer.h>
// Cuda specific headers.
#include <thrust/random.h>
// Standard c++ headers.
namespace renderbox2
{
//
// Ambient Occlusion Integrator kernels
//
__global__ void kernel_ambient_occlusion(
IntersectionBuffer isects,
CameraSampleBuffer csamples,
BvhStruct bvh,
SceneBuffer scene_data,
float ao_radius,
uint32_t num_ao_samples
)
{
for (GIndexType tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < csamples.m_size; tidx += gridDim.x * blockDim.x)
{
// Get the current intersection point and normal.
float3 position = make_float3(isects.m_position[0][tidx], isects.m_position[1][tidx], isects.m_position[2][tidx]);
float3 normal = make_float3(isects.m_shading_normal[0][tidx], isects.m_shading_normal[1][tidx], isects.m_shading_normal[2][tidx]);
float epsilon = isects.m_epsilon[tidx];
float4 m_contribution = make_float4(0.0f);
bool intersected = (isects.m_intersected[tidx] == 1);
if (intersected)
{
int32_t seed = simplehash(tidx);
thrust::uniform_real_distribution<float> u01(0.0f, 1.0f);
thrust::default_random_engine rng(seed);
for (uint32_t i = 0; i < num_ao_samples; i++)
{
// Sample a ray in the hemisphere.
float3 direction = cosine_sample_hemisphere_normal(u01(rng), u01(rng), normal);
Ray ao_ray(position, direction, epsilon, ao_radius);
// Trace the ray with max radius and if so accumulate.
bool occluded = trace_ray(ao_ray, true, nullptr, bvh.m_nodes, bvh.m_tris, bvh.m_tri_idx, scene_data);
if (!occluded)
{
m_contribution += make_float4(1.0f, 1.0f, 1.0f, 0.0f);
}
}
}
// Finally divide to give value.
csamples.m_contribution[tidx] = m_contribution / num_ao_samples;
}
}
//
// Ambient Occlusion Integrators compute method shoots a bunch of rays and computes occlusion for each primary ray.
//
void IntegratorAO::compute(CameraSampleBuffer* csb, RayBuffer* rb)
{
IntersectionBufferClass ibc(m_allocator);
ibc.allocate(csb->m_size);
IntersectionBuffer ib = ibc.get_buffer();
SceneBuffer scene_data = m_scene->gpu_get_buffer();
BvhStruct bvh = m_tracer->get_bvh();
m_tracer->trace(*rb, ib, scene_data, nullptr, false);
dim3 grid_size(256, 1, 1);
dim3 block_size(256, 1, 1);
CudaTimer t1("ao timer");
t1.start();
hipLaunchKernelGGL(( kernel_ambient_occlusion), dim3(grid_size), dim3(block_size), 0, 0, ib, *csb, bvh, scene_data, m_params.m_radius, m_params.m_samples);
t1.stop();
}
}
| integrator_ao.cu |
/**
renderbox2 - a physically based gpu renderer for research purposes
Copyright (C) - 2014 - Srinath Ravichandran
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Application specific headers.
#include <accelerators/sbvh/cudatracerkernels.h>
#include <accelerators/sbvh/cudatracer.h>
#include <core/buffer.h>
#include <core/intersection.h>
#include <core/montecarlo.h>
#include <core/util.h>
#include <integrators/integrator_ao.h>
#include <util/cudatimer.h>
// Cuda specific headers.
#include <thrust/random.h>
// Standard c++ headers.
namespace renderbox2
{
//
// Ambient Occlusion Integrator kernels
//
__global__ void kernel_ambient_occlusion(
IntersectionBuffer isects,
CameraSampleBuffer csamples,
BvhStruct bvh,
SceneBuffer scene_data,
float ao_radius,
uint32_t num_ao_samples
)
{
for (GIndexType tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < csamples.m_size; tidx += gridDim.x * blockDim.x)
{
// Get the current intersection point and normal.
float3 position = make_float3(isects.m_position[0][tidx], isects.m_position[1][tidx], isects.m_position[2][tidx]);
float3 normal = make_float3(isects.m_shading_normal[0][tidx], isects.m_shading_normal[1][tidx], isects.m_shading_normal[2][tidx]);
float epsilon = isects.m_epsilon[tidx];
float4 m_contribution = make_float4(0.0f);
bool intersected = (isects.m_intersected[tidx] == 1);
if (intersected)
{
int32_t seed = simplehash(tidx);
thrust::uniform_real_distribution<float> u01(0.0f, 1.0f);
thrust::default_random_engine rng(seed);
for (uint32_t i = 0; i < num_ao_samples; i++)
{
// Sample a ray in the hemisphere.
float3 direction = cosine_sample_hemisphere_normal(u01(rng), u01(rng), normal);
Ray ao_ray(position, direction, epsilon, ao_radius);
// Trace the ray with max radius and if so accumulate.
bool occluded = trace_ray(ao_ray, true, nullptr, bvh.m_nodes, bvh.m_tris, bvh.m_tri_idx, scene_data);
if (!occluded)
{
m_contribution += make_float4(1.0f, 1.0f, 1.0f, 0.0f);
}
}
}
// Finally divide to give value.
csamples.m_contribution[tidx] = m_contribution / num_ao_samples;
}
}
//
// Ambient Occlusion Integrators compute method shoots a bunch of rays and computes occlusion for each primary ray.
//
void IntegratorAO::compute(CameraSampleBuffer* csb, RayBuffer* rb)
{
IntersectionBufferClass ibc(m_allocator);
ibc.allocate(csb->m_size);
IntersectionBuffer ib = ibc.get_buffer();
SceneBuffer scene_data = m_scene->gpu_get_buffer();
BvhStruct bvh = m_tracer->get_bvh();
m_tracer->trace(*rb, ib, scene_data, nullptr, false);
dim3 grid_size(256, 1, 1);
dim3 block_size(256, 1, 1);
CudaTimer t1("ao timer");
t1.start();
kernel_ambient_occlusion<<<grid_size, block_size>>>(ib, *csb, bvh, scene_data, m_params.m_radius, m_params.m_samples);
t1.stop();
}
}
|
c2e94be14fa024e5dd4d2e1eb5653a820101afb0.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by cao on 19-12-20.
//
#include "mish.h"
#include <iostream>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define CHECK_CUDA(call) do { \
hipError_t status = call; \
if( status != hipSuccess ) { \
return status; \
} \
} while(0)
__device__ float softplus_kernel(float x, const float threshold = 20) {
if (x > threshold) return x; // too large
else if (x < -threshold) return expf(x); // too small
return logf(expf(x) + 1.);
}
__device__ half softplus_kernel(half x, const half threshold) {
if (x > threshold) return x; // too large
else if (x < -threshold) return hexp(x); // too small
return hlog(hexp(x) + half(1.));
}
__device__ half tanh_activate_kernel(half x){return (half(2.)/(half(1.) + hexp(half(-2.)*x)) - half(1.));}
__device__ float tanh_activate_kernel(float x){return (2./(1. + expf(-2.*x)) - 1.);}
template <typename T>
__global__ void mishKernel( int n, const T* input, T* output, const T MISH_THRESHOLD)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n)
{
T x_val = input[idx];
output[idx] = x_val * tanh_activate_kernel( softplus_kernel(x_val, MISH_THRESHOLD) );
}
}
inline int computeMish(hipStream_t stream, int n, const float* input, float* output)
{
constexpr int blockSize = 1024;
const int gridSize = (n + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( mishKernel<float>), dim3(gridSize), dim3(blockSize), 0, stream, n, input, output,20.);
CHECK_CUDA(hipPeekAtLastError());
return 0;
}
inline int computeMish(hipStream_t stream, int n, const half* input, half* output)
{
const int blockSize = 1024;
const int gridSize = (n + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( mishKernel<half>), dim3(gridSize), dim3(blockSize), 0, stream, n, input, output,20.);
CHECK_CUDA(hipPeekAtLastError());
return 0;
}
MishPlugin::MishPlugin():_initialized(false){
}
int MishPlugin::initialize() {
if(_initialized) return 0;
_initialized = true;
return 0;
}
void MishPlugin::terminate() {
if (!_initialized) {
return;
}
_initialized = false;
}
MishPlugin::~MishPlugin() {
terminate();
}
nvinfer1::Dims MishPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) {
assert(index == 0);
assert(inputDims);
assert(nbInputs == 1);
return inputDims[0];
}
size_t MishPlugin::getWorkspaceSize(int maxBatchSize) const {
return 0;
}
int MishPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace,
hipStream_t stream) {
nvinfer1::Dims input_dims = this->getInputDims(0);
nvinfer1::DataType type = this->getDataType();
const int C = input_dims.d[0];
const int H = input_dims.d[1];
const int W = input_dims.d[2];
const int num = batchSize*C*H*W;
switch (type)
{
case nvinfer1::DataType::kFLOAT:
{
const float* input_data = static_cast<const float*>(inputs[0]);
float* out_data= static_cast<float*>(outputs[0]);
computeMish(stream,num,input_data,out_data);
break;
}
case nvinfer1::DataType::kHALF:
{
const half* input_data = static_cast<const half*>(inputs[0]);
half* out_data= static_cast<half*>(outputs[0]);
computeMish(stream,num,input_data,out_data);
break;
}
default: std::cerr << "error data type" << std::endl;;
}
return 0;
}
| c2e94be14fa024e5dd4d2e1eb5653a820101afb0.cu | //
// Created by cao on 19-12-20.
//
#include "mish.h"
#include <iostream>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define CHECK_CUDA(call) do { \
cudaError_t status = call; \
if( status != cudaSuccess ) { \
return status; \
} \
} while(0)
__device__ float softplus_kernel(float x, const float threshold = 20) {
if (x > threshold) return x; // too large
else if (x < -threshold) return expf(x); // too small
return logf(expf(x) + 1.);
}
__device__ half softplus_kernel(half x, const half threshold) {
if (x > threshold) return x; // too large
else if (x < -threshold) return hexp(x); // too small
return hlog(hexp(x) + half(1.));
}
__device__ half tanh_activate_kernel(half x){return (half(2.)/(half(1.) + hexp(half(-2.)*x)) - half(1.));}
__device__ float tanh_activate_kernel(float x){return (2./(1. + expf(-2.*x)) - 1.);}
template <typename T>
__global__ void mishKernel( int n, const T* input, T* output, const T MISH_THRESHOLD)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n)
{
T x_val = input[idx];
output[idx] = x_val * tanh_activate_kernel( softplus_kernel(x_val, MISH_THRESHOLD) );
}
}
inline int computeMish(cudaStream_t stream, int n, const float* input, float* output)
{
constexpr int blockSize = 1024;
const int gridSize = (n + blockSize - 1) / blockSize;
mishKernel<float><<<gridSize, blockSize, 0, stream>>>(n, input, output,20.);
CHECK_CUDA(cudaPeekAtLastError());
return 0;
}
inline int computeMish(cudaStream_t stream, int n, const half* input, half* output)
{
const int blockSize = 1024;
const int gridSize = (n + blockSize - 1) / blockSize;
mishKernel<half><<<gridSize, blockSize, 0, stream>>>(n, input, output,20.);
CHECK_CUDA(cudaPeekAtLastError());
return 0;
}
MishPlugin::MishPlugin():_initialized(false){
}
int MishPlugin::initialize() {
if(_initialized) return 0;
_initialized = true;
return 0;
}
void MishPlugin::terminate() {
if (!_initialized) {
return;
}
_initialized = false;
}
MishPlugin::~MishPlugin() {
terminate();
}
nvinfer1::Dims MishPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) {
assert(index == 0);
assert(inputDims);
assert(nbInputs == 1);
return inputDims[0];
}
size_t MishPlugin::getWorkspaceSize(int maxBatchSize) const {
return 0;
}
int MishPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace,
cudaStream_t stream) {
nvinfer1::Dims input_dims = this->getInputDims(0);
nvinfer1::DataType type = this->getDataType();
const int C = input_dims.d[0];
const int H = input_dims.d[1];
const int W = input_dims.d[2];
const int num = batchSize*C*H*W;
switch (type)
{
case nvinfer1::DataType::kFLOAT:
{
const float* input_data = static_cast<const float*>(inputs[0]);
float* out_data= static_cast<float*>(outputs[0]);
computeMish(stream,num,input_data,out_data);
break;
}
case nvinfer1::DataType::kHALF:
{
const half* input_data = static_cast<const half*>(inputs[0]);
half* out_data= static_cast<half*>(outputs[0]);
computeMish(stream,num,input_data,out_data);
break;
}
default: std::cerr << "error data type" << std::endl;;
}
return 0;
}
|
460db2b086dca94ff6c01dd47a6be16ffe9b7e20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <chrono>
#include <cstdlib>
#include <hiprand/hiprand_kernel.h>
#include "Vec3.h"
#include "Color.h"
#include "Ray.h"
#include "Sphere.h"
#include "Hittable_List.h"
#include "Camera.h"
#include "Texture.h"
#include "Render.h"
#include "Moving-Sphere.h"
#include "shader_stb_image.h"
using namespace std::chrono;
#define RND (hiprand_uniform(&local_rand_state))
__global__ void glow_balls(Hittable **d_list, Hittable **d_world, Camera **d_camera, int nx, int ny, hiprandState_t *rand_state)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprandState_t local_rand_state = *rand_state;
int i = 0;
d_list[i++] = new Sphere(Vec3(0, -1000, -1), 1000, new Lambertian(new Solid_Color(0.5, 0.5, 0.5)));
for (int a = -8; a < 8; a++) {
for (int b = -8; b < 8; b++) {
float choose_mat = RND;
Vec3 center(a + 0.9f * RND, 0.2, b + 0.9f * RND);
if (choose_mat < .5f) {
d_list[i++] = new Sphere(center, 0.2,
new Diffuse_Light(new Solid_Color(RND * RND, 0, RND * RND)));
} else {
d_list[i++] = new Moving_Sphere(center, center + Vec3(0, 0, RND), 0.0, 1.0, 0.2,
new Diffuse_Light(new Solid_Color(RND * RND, 0, RND * RND)));
}
}
}
d_list[i++] = new Sphere(Vec3(4, 1, 1.5f), 1.0, new Dielectric(1.5));
// World
*rand_state = local_rand_state;
*d_world = new Hittable_List(d_list, 16 * 16 + 2);
// Camera
Vec3 lookfrom = Vec3(13, 2, 5);
Vec3 lookat = Vec3(0, 0, 0);
float dist_to_focus = 10.0;
float aperture = .1f;
*d_camera = new Camera(lookfrom, lookat, Vec3(0,1,0), 25.0, float(nx)/float(ny), aperture, dist_to_focus, 0 ,1);
}
}
__global__ void free_glow_balls(Hittable **d_list, Hittable **d_world, Camera **d_camera)
{
for(int i=0; i < (16 * 16 + 2); i++) {
delete ((Hittable *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
__global__ void create_world(Hittable **d_list, Hittable **d_world, Camera **d_camera, int nx, int ny, hiprandState_t *rand_state)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprandState_t local_rand_state = *rand_state;
d_list[0] = new Sphere(Vec3(0,-1000.0,-1), 1000, new Lambertian(new Solid_Color(Vec3(0.5,0.5,0.5))));
int i = 1;
for(int a = -11; a < 11; a++) {
for(int b = -11; b < 11; b++) {
float choose_mat = RND;
Vec3 center(a+RND,0.2,b+RND);
if(choose_mat < 0.8f) {
d_list[i++] = new Sphere(center, 0.2, new Lambertian(new Solid_Color(Vec3(RND*RND, RND*RND, RND*RND))));
}
else if(choose_mat < 0.95f) {
d_list[i++] = new Sphere(center, 0.2, new Metal(Vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)), 0.5f*RND));
}
else {
d_list[i++] = new Sphere(center, 0.2, new Dielectric(1.5));
}
}
}
d_list[i++] = new Sphere(Vec3(0, 1,0), 1.0, new Dielectric(1.5));
d_list[i++] = new Sphere(Vec3(-4, 1, 0), 1.0 , new Lambertian(new Solid_Color(Vec3(0.4,0.3,0.1))));
//d_list[i++] = new Sphere(Vec3(0, 4, 5), 1.0, new Diffuse_Light( new Solid_Color(Vec3(7, 7, 7))));
d_list[i++] = new Sphere(Vec3(4, 1, 0), 1.0, new Metal(Vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new Hittable_List(d_list, 22*22+1+3);
Vec3 lookfrom = Vec3(13,2,3);
Vec3 lookat = Vec3(0,0,0);
float dist_to_focus = 10.0;
float aperture = 0.1;
*d_camera = new Camera(lookfrom, lookat, Vec3(0,1,0), 25.0, float(nx)/float(ny), aperture, dist_to_focus, 0 ,1);
}
}
__global__ void free_world(Hittable **d_list, Hittable **d_world, Camera **d_camera)
{
for(int i=0; i < 22*22+1+3; i++) {
delete ((Sphere *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
__global__ void solar_system(Hittable **d_list, Hittable **d_world, Camera **d_camera, int nx, int ny, hiprandState_t *rand_state, int tex_nx, int tex_ny, int texHQ_nx, int texHQ_ny, unsigned char *sun,
unsigned char *mercury, unsigned char *venus, unsigned char *earth, unsigned char *mars, unsigned char *jupiter, unsigned char *saturn, unsigned char *uranus, unsigned char *neptune, unsigned char* pluto)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprandState_t local_rand_state = *rand_state;
int i = 0;
Texture *sunText = new Image_Text(sun, texHQ_nx, texHQ_ny);
Texture *star1Text = new Solid_Color(Vec3(1, 1, 1)); // White
Texture *star2Text = new Solid_Color(Vec3(0.75, 0.6, 0.5)); // Yellow
Texture *star3Text = new Solid_Color(Vec3(0.93, 0.41, 0.24)); // Red
Texture *star4Text = new Solid_Color(Vec3(0.4, .82, 0.95)); // Blue
// Create sun and slightly bigger light source
d_list[i++] = new Sphere(Vec3(0, 0, -320), 300.0 , new Diffuse_Light(sunText));
d_list[i++] = new Sphere(Vec3(0, 0, -1300), 600.0 , new Diffuse_Light(new Solid_Color(Vec3(0.25, 0.2, 0.12))));
// Create each planet in a line
d_list[i++] = new Sphere(Vec3(0, 0, -10), 2, new Lambertian(new Image_Text(mercury, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 0), 3.6, new Lambertian(new Image_Text(venus, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 13), 4.4, new Lambertian(new Image_Text(earth, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 27), 2.4, new Lambertian(new Image_Text(mars, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 80), 34.0, new Lambertian(new Image_Text(jupiter, texHQ_nx, texHQ_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 190), 28.0, new Lambertian(new Image_Text(saturn, texHQ_nx, texHQ_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 310), 16.4 , new Lambertian(new Image_Text(uranus, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 450), 16.0, new Lambertian(new Image_Text(neptune, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 575), 2.75, new Lambertian(new Image_Text(pluto, tex_nx, tex_ny)));
// Generates random stars in the background
// DEPENDS GREATLY on lookfrom, lookat, and fov
for(int a = -450; a < 450; a+=20) {
for(int c = -20; c < 1100; c+=20) {
float starColor = RND;
float rand1 = RND;
rand1 *= (20.f+0.999999f);
rand1 = truncf(rand1);
float rand2 = RND;
rand2 *= (20.f+0.999999f);
rand2 = truncf(rand2);
float rand3 = RND;
rand3 *= (20.f+0.999999f);
rand3 = truncf(rand3);
Vec3 center(250 + rand1 + (800 - c), a+rand2, c+rand3);
if (starColor < 0.7f) {
d_list[i++] = new Sphere(center, RND, new Diffuse_Light(star1Text));
} else if (starColor < 0.9f) {
d_list[i++] = new Sphere(center, RND, new Diffuse_Light(star2Text));
} else if (starColor < 0.95f) {
d_list[i++] = new Sphere(center, RND, new Diffuse_Light(star3Text));
} else {
d_list[i++] = new Sphere(center, RND, new Diffuse_Light(star4Text));
}
}
}
*rand_state = local_rand_state;
*d_world = new Hittable_List(d_list, 11+45*56);
Vec3 lookfrom = Vec3(-145,0, -25);
Vec3 lookat = Vec3(-110,0, 5);
float dist_to_focus = 100.0;
float aperture = 0.1;
float fov = 52.0;
*d_camera = new Camera(lookfrom, lookat, Vec3(0,1,0), fov, float(nx)/float(ny), aperture, dist_to_focus, 0 ,1);
}
}
__global__ void free_solar_system(Hittable **d_list, Hittable **d_world, Camera **d_camera)
{
for(int i=0; i < 11+45*56; i++) {
delete ((Hittable *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
__global__ void pool_table(Hittable **d_list, Hittable **d_world, Camera **d_camera, int nx, int ny, hiprandState_t *rand_state)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprandState_t local_rand_state = *rand_state;
int i = 0;
// Pool balls
d_list[i++] = new Sphere(Vec3(-.05, 0, 2), .5, new Metal(Vec3(4.f*0.8314f, 4.f*0.83f, 4.f*0.09f), 1));
d_list[i++] = new Sphere(Vec3(-.1, .5, 2.85), .5, new Metal(Vec3(4.f*0.059f, 4.f*0.333f, 4.f*0.694f), 1));
d_list[i++] = new Sphere(Vec3(-.1, -.5, 2.85), .5, new Metal(Vec3(4.f*0.73f, 4.f*0.102f, 4.f*0.102f), 1));
d_list[i++] = new Sphere(Vec3(-.15, -1, 3.7), .5, new Metal(Vec3(4.f*0.431f, 4.f*0.102f, 4.f*0.53f), 1));
d_list[i++] = new Sphere(Vec3(-.15, 0, 3.7), .5, new Metal(Vec3(0, 0, 0), 1));
d_list[i++] = new Sphere(Vec3(-.15, 1, 3.7), .5, new Metal(Vec3(4.f*0.059f, 4.f*0.302f, 4.f*0.059f), 1));
d_list[i++] = new Sphere(Vec3(-.185, 1.5, 4.55), .5, new Metal(Vec3(4.f*0.37f, 4.f*0.02f, 4.f*0.01568f), 1));
d_list[i++] = new Sphere(Vec3(-.185, .5, 4.55), .5, new Metal(Vec3(4.f*0.80f, 4.f*0.333f, 4.f*0.063f), 1));
d_list[i++] = new Sphere(Vec3(-.185, -.5, 4.55), .5, new Metal(Vec3(4.f*0.8314f, 4.f*0.83f, 4.f*0.09f), 1));
d_list[i++] = new Sphere(Vec3(-.185, -1.5, 4.55), .5, new Metal(Vec3(4.f*0.059f, 4.f*0.333f, 4.f*0.694f), 1));
d_list[i++] = new Sphere(Vec3(-.25, -2.0, 5.40), .5, new Metal(Vec3(4.f*0.37f, 4.f*0.02f, 4.f*0.01568f), 1));
d_list[i++] = new Sphere(Vec3(-.25, -1.0, 5.40), .5, new Metal(Vec3(4.f*0.059f, 4.f*0.302f, 4.f*0.059f), 1));
d_list[i++] = new Sphere(Vec3(-.25, 0.0, 5.40), .5, new Metal(Vec3(4.f*0.80f, 4.f*0.333f, 4.f*0.063f), 1));
d_list[i++] = new Sphere(Vec3(-.25, 1.0, 5.40), .5, new Metal(Vec3(4.f*0.431f, 4.f*0.102f, 4.f*0.53f), 1));
d_list[i++] = new Sphere(Vec3(-.25, 2.0, 5.40), .5, new Metal(Vec3(4.f*0.73f, 4.f*0.102f, 4.f*0.102f), 1));
// Cue ball
d_list[i++] = new Sphere(Vec3(0, 0, -5), .5, new Metal(Vec3(1, 1, 1), 1));
// The ground of the table
d_list[i++] = new Sphere(Vec3(-100.5, 0.0, -1.0), 100, new Lambertian(Vec3(.212, .4706, .294)));
// Lighting above the table
d_list[i++] = new Sphere(Vec3(100, 5, 0), 10, new Diffuse_Light(Vec3(20, 20, 20)));
// Table
*rand_state = local_rand_state;
*d_world = new Hittable_List(d_list, 18);
// Camera
Vec3 lookfrom = Vec3(3, 0, -15);
Vec3 lookat = Vec3(-2, 0, 10);
float dist_to_focus = 25.0;
float aperture = 0;
*d_camera = new Camera(lookfrom, lookat, Vec3(0,0,1), 10.0, float(nx)/float(ny), aperture, dist_to_focus, 0 ,1);
}
}
__global__ void free_pool_table(Hittable **d_list, Hittable **d_world, Camera **d_camera)
{
for(int i=0; i < 17; i++) {
delete ((Hittable *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
int main(int argc, char **argv)
{
auto program_start = high_resolution_clock::now();
/****** Set up image size, block size, and frame buffer ******/
int depth = 50;
int tx = 8;
int ty = 8;
int image, nx, ny, ns;
if (argc < 5) {
nx = 400;
ny = 225;
ns = 10000;
image = 2;
} else {
image = atoi(argv[1]);
nx = atoi(argv[2]);
ny = atoi(argv[3]);
ns = atoi(argv[4]);
}
/****** Allocate and copy memory for any image textures ******/
int tex_nx, tex_ny, tex_nn;
int texHQ_nx, texHQ_ny, texHQ_nn;
unsigned char *dev_mercury;
unsigned char *dev_venus;
unsigned char *dev_earth;
unsigned char *dev_mars;
unsigned char *dev_jupiter;
unsigned char *dev_saturn;
unsigned char *dev_uranus;
unsigned char *dev_neptune;
unsigned char *dev_sun;
unsigned char *dev_pluto;
auto texture_time_start = high_resolution_clock::now();
if (image == 1) {
/****** Standard quality textures ******/
unsigned char *mercury = stbi_load("../Common/textures/mercury.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *venus = stbi_load("../Common/textures/venus.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *earth = stbi_load("../Common/textures/earth.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *mars = stbi_load("../Common/textures/mars.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *uranus = stbi_load("../Common/textures/uranus.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *neptune = stbi_load("../Common/textures/neptune.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *pluto = stbi_load("../Common/textures/pluto.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
/****** High quality textures for larger bodies ******/
unsigned char *sun = stbi_load("../Common/textures/sunHQ.jpg", &texHQ_nx, &texHQ_ny, &texHQ_nn, 0);
unsigned char *jupiter = stbi_load("../Common/textures/jupiterHQ.jpg", &texHQ_nx, &texHQ_ny, &texHQ_nn, 0);
unsigned char *saturn = stbi_load("../Common/textures/saturnHQ.jpg", &texHQ_nx, &texHQ_ny, &texHQ_nn, 0);
/****** Allocate memory and copy each texture to the GPU ******/
size_t texSize = tex_nx*tex_ny*tex_nn*sizeof(unsigned char);
size_t texHQSize = texHQ_nx*texHQ_ny*texHQ_nn*sizeof(unsigned char);
checkCudaErrors(hipMalloc((void **)&dev_mercury, texSize));
checkCudaErrors(hipMemcpy(dev_mercury, mercury, texSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&dev_venus, texSize));
checkCudaErrors(hipMemcpy(dev_venus, venus, texSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&dev_earth, texSize));
checkCudaErrors(hipMemcpy(dev_earth, earth, texSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&dev_mars, texSize));
checkCudaErrors(hipMemcpy(dev_mars, mars, texSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&dev_uranus, texSize));
checkCudaErrors(hipMemcpy(dev_uranus, uranus, texSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&dev_neptune, texSize));
checkCudaErrors(hipMemcpy(dev_neptune, neptune, texSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&dev_pluto, texSize));
checkCudaErrors(hipMemcpy(dev_pluto, pluto, texSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&dev_sun, texHQSize));
checkCudaErrors(hipMemcpy(dev_sun, sun, texHQSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&dev_jupiter, texHQSize));
checkCudaErrors(hipMemcpy(dev_jupiter, jupiter, texHQSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&dev_saturn, texHQSize));
checkCudaErrors(hipMemcpy(dev_saturn, saturn, texHQSize, hipMemcpyHostToDevice));
}
/****** Allocate and copy memory for adjustable background color ******/
Color background;
if (image == 0) background = Color(0, 0, 0);
else if (image == 1) background = Color(0, 0, 0);
else if (image == 2) background = Color(0, 0, 0);
else background = Color(0.70, 0.80, 1.00);
Color *dev_background;
checkCudaErrors(hipMallocManaged((void **)&dev_background, sizeof(Color)));
checkCudaErrors(hipMemcpy(dev_background, &background, sizeof(Color), hipMemcpyHostToDevice));
auto texture_time_end = high_resolution_clock::now();
std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
auto create_time_start = high_resolution_clock::now();
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(Vec3);
// allocate frame buffer (unified memory)
Vec3 *fb;
checkCudaErrors(hipMallocManaged((void **)&fb, fb_size));
// allocate random state
hiprandState_t *d_rand_state;
checkCudaErrors(hipMalloc((void **)&d_rand_state, num_pixels*sizeof(hiprandState_t)));
hiprandState_t *d_rand_state2;
checkCudaErrors(hipMalloc((void **)&d_rand_state2, 1*sizeof(hiprandState_t)));
/****** Render and time frame buffer ******/
// we need that 2nd random state to be initialized for the world creation
hipLaunchKernelGGL(( rand_init), dim3(1),dim3(1), 0, 0, d_rand_state2);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// make our world of hittables
Hittable **d_list;
int numHittables;
if (image == 0) {
numHittables = 16 * 16 + 2;
checkCudaErrors(hipMalloc((void **)&d_list, numHittables*sizeof(Hittable *)));
} else if (image == 1) {
numHittables = 11+45*56;
checkCudaErrors(hipMalloc((void **)&d_list, numHittables*sizeof(Hittable *)));
} else if (image == 2) {
numHittables = 18;
checkCudaErrors(hipMalloc((void **)&d_list, numHittables*sizeof(Hittable *)));
} else {
numHittables = 22*22+1+4;
checkCudaErrors(hipMalloc((void **)&d_list, numHittables*sizeof(Hittable *)));
}
Hittable **d_world;
checkCudaErrors(hipMalloc((void **)&d_world, sizeof(Hittable *)));
Camera **d_camera;
checkCudaErrors(hipMalloc((void **)&d_camera, sizeof(Camera *)));
if (image == 0) {
hipLaunchKernelGGL(( glow_balls), dim3(1),dim3(1), 0, 0, d_list, d_world, d_camera, nx, ny, d_rand_state2);
} else if (image == 1) {
hipLaunchKernelGGL(( solar_system), dim3(1),dim3(1), 0, 0, d_list,d_world,d_camera, nx, ny, d_rand_state2, tex_nx, tex_ny, texHQ_nx, texHQ_ny, dev_sun,
dev_mercury, dev_venus, dev_earth, dev_mars, dev_jupiter, dev_saturn, dev_uranus, dev_neptune, dev_pluto);
} else if (image == 2) {
hipLaunchKernelGGL(( pool_table), dim3(1),dim3(1), 0, 0, d_list, d_world, d_camera, nx, ny, d_rand_state2);
} else {
hipLaunchKernelGGL(( create_world), dim3(1),dim3(1), 0, 0, d_list,d_world,d_camera, nx, ny, d_rand_state2);
}
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// Render our buffer
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
hipLaunchKernelGGL(( render_init), dim3(blocks), dim3(threads), 0, 0, nx, ny, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
auto create_time_end = high_resolution_clock::now();
std::cerr << "Starting Render.\n";
auto render_time_start = high_resolution_clock::now();
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny, ns, d_camera, d_world, d_rand_state, depth, dev_background);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
std::cerr << "Render Finished.\n";
auto render_time_end = high_resolution_clock::now();
// Output File
std::fstream file;
file.open("out.ppm", std::ios::out);
// std::streambuf *ppm_out = file.rdbuf();
// Redirect Cout
// std::cout.rdbuf(ppm_out);
auto save_time_start = high_resolution_clock::now();
// Output FB as Image
file << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
writeColor(file,fb[pixel_index]);
}
}
auto save_time_end = high_resolution_clock::now();
// clean up
checkCudaErrors(hipDeviceSynchronize());
if (image == 0) {
hipLaunchKernelGGL(( free_glow_balls), dim3(1),dim3(1), 0, 0, d_list, d_world, d_camera);
} else if (image == 1) {
hipLaunchKernelGGL(( free_solar_system), dim3(1),dim3(1), 0, 0, d_list, d_world, d_camera);
checkCudaErrors(hipFree(dev_mercury));
checkCudaErrors(hipFree(dev_venus));
checkCudaErrors(hipFree(dev_earth));
checkCudaErrors(hipFree(dev_mars));
checkCudaErrors(hipFree(dev_jupiter));
checkCudaErrors(hipFree(dev_saturn));
checkCudaErrors(hipFree(dev_uranus));
checkCudaErrors(hipFree(dev_neptune));
checkCudaErrors(hipFree(dev_pluto));
checkCudaErrors(hipFree(dev_sun));
} else if (image == 2) {
hipLaunchKernelGGL(( free_pool_table), dim3(1),dim3(1), 0, 0, d_list, d_world, d_camera);
}else {
hipLaunchKernelGGL(( free_world), dim3(1),dim3(1), 0, 0, d_list, d_world, d_camera);
}
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_camera));
checkCudaErrors(hipFree(d_world));
checkCudaErrors(hipFree(d_list));
checkCudaErrors(hipFree(d_rand_state));
checkCudaErrors(hipFree(d_rand_state2));
checkCudaErrors(hipFree(fb));
checkCudaErrors(hipFree(dev_background));
std::cerr << "Image Successfully Saved." << std::endl;
file.close();
// useful for cuda-memcheck --leak-check full
hipDeviceReset();
auto program_end = high_resolution_clock::now();
// Texture Time
auto texture_time = duration_cast<milliseconds>(texture_time_end - texture_time_start);
std::cout << "Texture Transfer Time: " << texture_time.count() << "ms" << std::endl;
// Create Time
auto create_time = duration_cast<milliseconds>(create_time_end - create_time_start);
std::cout << "World Creation Time: " << create_time.count() << "ms" << std::endl;
// Render Time
auto render_time = duration_cast<milliseconds>(render_time_end - render_time_start);
std::cout << "Render Time: " << render_time.count() << "ms" << std::endl;
// Save image time
auto save_time = duration_cast<milliseconds>(save_time_end - save_time_start);
std::cout << "Image Save Time: " << save_time.count() << "ms" << std::endl;
// Total Time
auto time = duration_cast<milliseconds>(program_end - program_start);
std::cout << "Total Time: " << time.count() << "ms" << std::endl;
return 0;
} | 460db2b086dca94ff6c01dd47a6be16ffe9b7e20.cu | #include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <chrono>
#include <cstdlib>
#include <curand_kernel.h>
#include "Vec3.h"
#include "Color.h"
#include "Ray.h"
#include "Sphere.h"
#include "Hittable_List.h"
#include "Camera.h"
#include "Texture.h"
#include "Render.h"
#include "Moving-Sphere.h"
#include "shader_stb_image.h"
using namespace std::chrono;
#define RND (curand_uniform(&local_rand_state))
__global__ void glow_balls(Hittable **d_list, Hittable **d_world, Camera **d_camera, int nx, int ny, curandState *rand_state)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
curandState local_rand_state = *rand_state;
int i = 0;
d_list[i++] = new Sphere(Vec3(0, -1000, -1), 1000, new Lambertian(new Solid_Color(0.5, 0.5, 0.5)));
for (int a = -8; a < 8; a++) {
for (int b = -8; b < 8; b++) {
float choose_mat = RND;
Vec3 center(a + 0.9f * RND, 0.2, b + 0.9f * RND);
if (choose_mat < .5f) {
d_list[i++] = new Sphere(center, 0.2,
new Diffuse_Light(new Solid_Color(RND * RND, 0, RND * RND)));
} else {
d_list[i++] = new Moving_Sphere(center, center + Vec3(0, 0, RND), 0.0, 1.0, 0.2,
new Diffuse_Light(new Solid_Color(RND * RND, 0, RND * RND)));
}
}
}
d_list[i++] = new Sphere(Vec3(4, 1, 1.5f), 1.0, new Dielectric(1.5));
// World
*rand_state = local_rand_state;
*d_world = new Hittable_List(d_list, 16 * 16 + 2);
// Camera
Vec3 lookfrom = Vec3(13, 2, 5);
Vec3 lookat = Vec3(0, 0, 0);
float dist_to_focus = 10.0;
float aperture = .1f;
*d_camera = new Camera(lookfrom, lookat, Vec3(0,1,0), 25.0, float(nx)/float(ny), aperture, dist_to_focus, 0 ,1);
}
}
__global__ void free_glow_balls(Hittable **d_list, Hittable **d_world, Camera **d_camera)
{
for(int i=0; i < (16 * 16 + 2); i++) {
delete ((Hittable *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
__global__ void create_world(Hittable **d_list, Hittable **d_world, Camera **d_camera, int nx, int ny, curandState *rand_state)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
curandState local_rand_state = *rand_state;
d_list[0] = new Sphere(Vec3(0,-1000.0,-1), 1000, new Lambertian(new Solid_Color(Vec3(0.5,0.5,0.5))));
int i = 1;
for(int a = -11; a < 11; a++) {
for(int b = -11; b < 11; b++) {
float choose_mat = RND;
Vec3 center(a+RND,0.2,b+RND);
if(choose_mat < 0.8f) {
d_list[i++] = new Sphere(center, 0.2, new Lambertian(new Solid_Color(Vec3(RND*RND, RND*RND, RND*RND))));
}
else if(choose_mat < 0.95f) {
d_list[i++] = new Sphere(center, 0.2, new Metal(Vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)), 0.5f*RND));
}
else {
d_list[i++] = new Sphere(center, 0.2, new Dielectric(1.5));
}
}
}
d_list[i++] = new Sphere(Vec3(0, 1,0), 1.0, new Dielectric(1.5));
d_list[i++] = new Sphere(Vec3(-4, 1, 0), 1.0 , new Lambertian(new Solid_Color(Vec3(0.4,0.3,0.1))));
//d_list[i++] = new Sphere(Vec3(0, 4, 5), 1.0, new Diffuse_Light( new Solid_Color(Vec3(7, 7, 7))));
d_list[i++] = new Sphere(Vec3(4, 1, 0), 1.0, new Metal(Vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new Hittable_List(d_list, 22*22+1+3);
Vec3 lookfrom = Vec3(13,2,3);
Vec3 lookat = Vec3(0,0,0);
float dist_to_focus = 10.0;
float aperture = 0.1;
*d_camera = new Camera(lookfrom, lookat, Vec3(0,1,0), 25.0, float(nx)/float(ny), aperture, dist_to_focus, 0 ,1);
}
}
__global__ void free_world(Hittable **d_list, Hittable **d_world, Camera **d_camera)
{
for(int i=0; i < 22*22+1+3; i++) {
delete ((Sphere *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
__global__ void solar_system(Hittable **d_list, Hittable **d_world, Camera **d_camera, int nx, int ny, curandState *rand_state, int tex_nx, int tex_ny, int texHQ_nx, int texHQ_ny, unsigned char *sun,
unsigned char *mercury, unsigned char *venus, unsigned char *earth, unsigned char *mars, unsigned char *jupiter, unsigned char *saturn, unsigned char *uranus, unsigned char *neptune, unsigned char* pluto)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
curandState local_rand_state = *rand_state;
int i = 0;
Texture *sunText = new Image_Text(sun, texHQ_nx, texHQ_ny);
Texture *star1Text = new Solid_Color(Vec3(1, 1, 1)); // White
Texture *star2Text = new Solid_Color(Vec3(0.75, 0.6, 0.5)); // Yellow
Texture *star3Text = new Solid_Color(Vec3(0.93, 0.41, 0.24)); // Red
Texture *star4Text = new Solid_Color(Vec3(0.4, .82, 0.95)); // Blue
// Create sun and slightly bigger light source
d_list[i++] = new Sphere(Vec3(0, 0, -320), 300.0 , new Diffuse_Light(sunText));
d_list[i++] = new Sphere(Vec3(0, 0, -1300), 600.0 , new Diffuse_Light(new Solid_Color(Vec3(0.25, 0.2, 0.12))));
// Create each planet in a line
d_list[i++] = new Sphere(Vec3(0, 0, -10), 2, new Lambertian(new Image_Text(mercury, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 0), 3.6, new Lambertian(new Image_Text(venus, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 13), 4.4, new Lambertian(new Image_Text(earth, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 27), 2.4, new Lambertian(new Image_Text(mars, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 80), 34.0, new Lambertian(new Image_Text(jupiter, texHQ_nx, texHQ_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 190), 28.0, new Lambertian(new Image_Text(saturn, texHQ_nx, texHQ_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 310), 16.4 , new Lambertian(new Image_Text(uranus, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 450), 16.0, new Lambertian(new Image_Text(neptune, tex_nx, tex_ny)));
d_list[i++] = new Sphere(Vec3(0, 0, 575), 2.75, new Lambertian(new Image_Text(pluto, tex_nx, tex_ny)));
// Generates random stars in the background
// DEPENDS GREATLY on lookfrom, lookat, and fov
for(int a = -450; a < 450; a+=20) {
for(int c = -20; c < 1100; c+=20) {
float starColor = RND;
float rand1 = RND;
rand1 *= (20.f+0.999999f);
rand1 = truncf(rand1);
float rand2 = RND;
rand2 *= (20.f+0.999999f);
rand2 = truncf(rand2);
float rand3 = RND;
rand3 *= (20.f+0.999999f);
rand3 = truncf(rand3);
Vec3 center(250 + rand1 + (800 - c), a+rand2, c+rand3);
if (starColor < 0.7f) {
d_list[i++] = new Sphere(center, RND, new Diffuse_Light(star1Text));
} else if (starColor < 0.9f) {
d_list[i++] = new Sphere(center, RND, new Diffuse_Light(star2Text));
} else if (starColor < 0.95f) {
d_list[i++] = new Sphere(center, RND, new Diffuse_Light(star3Text));
} else {
d_list[i++] = new Sphere(center, RND, new Diffuse_Light(star4Text));
}
}
}
*rand_state = local_rand_state;
*d_world = new Hittable_List(d_list, 11+45*56);
Vec3 lookfrom = Vec3(-145,0, -25);
Vec3 lookat = Vec3(-110,0, 5);
float dist_to_focus = 100.0;
float aperture = 0.1;
float fov = 52.0;
*d_camera = new Camera(lookfrom, lookat, Vec3(0,1,0), fov, float(nx)/float(ny), aperture, dist_to_focus, 0 ,1);
}
}
__global__ void free_solar_system(Hittable **d_list, Hittable **d_world, Camera **d_camera)
{
for(int i=0; i < 11+45*56; i++) {
delete ((Hittable *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
__global__ void pool_table(Hittable **d_list, Hittable **d_world, Camera **d_camera, int nx, int ny, curandState *rand_state)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
curandState local_rand_state = *rand_state;
int i = 0;
// Pool balls
d_list[i++] = new Sphere(Vec3(-.05, 0, 2), .5, new Metal(Vec3(4.f*0.8314f, 4.f*0.83f, 4.f*0.09f), 1));
d_list[i++] = new Sphere(Vec3(-.1, .5, 2.85), .5, new Metal(Vec3(4.f*0.059f, 4.f*0.333f, 4.f*0.694f), 1));
d_list[i++] = new Sphere(Vec3(-.1, -.5, 2.85), .5, new Metal(Vec3(4.f*0.73f, 4.f*0.102f, 4.f*0.102f), 1));
d_list[i++] = new Sphere(Vec3(-.15, -1, 3.7), .5, new Metal(Vec3(4.f*0.431f, 4.f*0.102f, 4.f*0.53f), 1));
d_list[i++] = new Sphere(Vec3(-.15, 0, 3.7), .5, new Metal(Vec3(0, 0, 0), 1));
d_list[i++] = new Sphere(Vec3(-.15, 1, 3.7), .5, new Metal(Vec3(4.f*0.059f, 4.f*0.302f, 4.f*0.059f), 1));
d_list[i++] = new Sphere(Vec3(-.185, 1.5, 4.55), .5, new Metal(Vec3(4.f*0.37f, 4.f*0.02f, 4.f*0.01568f), 1));
d_list[i++] = new Sphere(Vec3(-.185, .5, 4.55), .5, new Metal(Vec3(4.f*0.80f, 4.f*0.333f, 4.f*0.063f), 1));
d_list[i++] = new Sphere(Vec3(-.185, -.5, 4.55), .5, new Metal(Vec3(4.f*0.8314f, 4.f*0.83f, 4.f*0.09f), 1));
d_list[i++] = new Sphere(Vec3(-.185, -1.5, 4.55), .5, new Metal(Vec3(4.f*0.059f, 4.f*0.333f, 4.f*0.694f), 1));
d_list[i++] = new Sphere(Vec3(-.25, -2.0, 5.40), .5, new Metal(Vec3(4.f*0.37f, 4.f*0.02f, 4.f*0.01568f), 1));
d_list[i++] = new Sphere(Vec3(-.25, -1.0, 5.40), .5, new Metal(Vec3(4.f*0.059f, 4.f*0.302f, 4.f*0.059f), 1));
d_list[i++] = new Sphere(Vec3(-.25, 0.0, 5.40), .5, new Metal(Vec3(4.f*0.80f, 4.f*0.333f, 4.f*0.063f), 1));
d_list[i++] = new Sphere(Vec3(-.25, 1.0, 5.40), .5, new Metal(Vec3(4.f*0.431f, 4.f*0.102f, 4.f*0.53f), 1));
d_list[i++] = new Sphere(Vec3(-.25, 2.0, 5.40), .5, new Metal(Vec3(4.f*0.73f, 4.f*0.102f, 4.f*0.102f), 1));
// Cue ball
d_list[i++] = new Sphere(Vec3(0, 0, -5), .5, new Metal(Vec3(1, 1, 1), 1));
// The ground of the table
d_list[i++] = new Sphere(Vec3(-100.5, 0.0, -1.0), 100, new Lambertian(Vec3(.212, .4706, .294)));
// Lighting above the table
d_list[i++] = new Sphere(Vec3(100, 5, 0), 10, new Diffuse_Light(Vec3(20, 20, 20)));
// Table
*rand_state = local_rand_state;
*d_world = new Hittable_List(d_list, 18);
// Camera
Vec3 lookfrom = Vec3(3, 0, -15);
Vec3 lookat = Vec3(-2, 0, 10);
float dist_to_focus = 25.0;
float aperture = 0;
*d_camera = new Camera(lookfrom, lookat, Vec3(0,0,1), 10.0, float(nx)/float(ny), aperture, dist_to_focus, 0 ,1);
}
}
__global__ void free_pool_table(Hittable **d_list, Hittable **d_world, Camera **d_camera)
{
for(int i=0; i < 17; i++) {
delete ((Hittable *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
int main(int argc, char **argv)
{
auto program_start = high_resolution_clock::now();
/****** Set up image size, block size, and frame buffer ******/
int depth = 50;
int tx = 8;
int ty = 8;
int image, nx, ny, ns;
if (argc < 5) {
nx = 400;
ny = 225;
ns = 10000;
image = 2;
} else {
image = atoi(argv[1]);
nx = atoi(argv[2]);
ny = atoi(argv[3]);
ns = atoi(argv[4]);
}
/****** Allocate and copy memory for any image textures ******/
int tex_nx, tex_ny, tex_nn;
int texHQ_nx, texHQ_ny, texHQ_nn;
unsigned char *dev_mercury;
unsigned char *dev_venus;
unsigned char *dev_earth;
unsigned char *dev_mars;
unsigned char *dev_jupiter;
unsigned char *dev_saturn;
unsigned char *dev_uranus;
unsigned char *dev_neptune;
unsigned char *dev_sun;
unsigned char *dev_pluto;
auto texture_time_start = high_resolution_clock::now();
if (image == 1) {
/****** Standard quality textures ******/
unsigned char *mercury = stbi_load("../Common/textures/mercury.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *venus = stbi_load("../Common/textures/venus.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *earth = stbi_load("../Common/textures/earth.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *mars = stbi_load("../Common/textures/mars.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *uranus = stbi_load("../Common/textures/uranus.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *neptune = stbi_load("../Common/textures/neptune.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
unsigned char *pluto = stbi_load("../Common/textures/pluto.jpg", &tex_nx, &tex_ny, &tex_nn, 0);
/****** High quality textures for larger bodies ******/
unsigned char *sun = stbi_load("../Common/textures/sunHQ.jpg", &texHQ_nx, &texHQ_ny, &texHQ_nn, 0);
unsigned char *jupiter = stbi_load("../Common/textures/jupiterHQ.jpg", &texHQ_nx, &texHQ_ny, &texHQ_nn, 0);
unsigned char *saturn = stbi_load("../Common/textures/saturnHQ.jpg", &texHQ_nx, &texHQ_ny, &texHQ_nn, 0);
/****** Allocate memory and copy each texture to the GPU ******/
size_t texSize = tex_nx*tex_ny*tex_nn*sizeof(unsigned char);
size_t texHQSize = texHQ_nx*texHQ_ny*texHQ_nn*sizeof(unsigned char);
checkCudaErrors(cudaMalloc((void **)&dev_mercury, texSize));
checkCudaErrors(cudaMemcpy(dev_mercury, mercury, texSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&dev_venus, texSize));
checkCudaErrors(cudaMemcpy(dev_venus, venus, texSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&dev_earth, texSize));
checkCudaErrors(cudaMemcpy(dev_earth, earth, texSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&dev_mars, texSize));
checkCudaErrors(cudaMemcpy(dev_mars, mars, texSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&dev_uranus, texSize));
checkCudaErrors(cudaMemcpy(dev_uranus, uranus, texSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&dev_neptune, texSize));
checkCudaErrors(cudaMemcpy(dev_neptune, neptune, texSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&dev_pluto, texSize));
checkCudaErrors(cudaMemcpy(dev_pluto, pluto, texSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&dev_sun, texHQSize));
checkCudaErrors(cudaMemcpy(dev_sun, sun, texHQSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&dev_jupiter, texHQSize));
checkCudaErrors(cudaMemcpy(dev_jupiter, jupiter, texHQSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&dev_saturn, texHQSize));
checkCudaErrors(cudaMemcpy(dev_saturn, saturn, texHQSize, cudaMemcpyHostToDevice));
}
/****** Allocate and copy memory for adjustable background color ******/
Color background;
if (image == 0) background = Color(0, 0, 0);
else if (image == 1) background = Color(0, 0, 0);
else if (image == 2) background = Color(0, 0, 0);
else background = Color(0.70, 0.80, 1.00);
Color *dev_background;
checkCudaErrors(cudaMallocManaged((void **)&dev_background, sizeof(Color)));
checkCudaErrors(cudaMemcpy(dev_background, &background, sizeof(Color), cudaMemcpyHostToDevice));
auto texture_time_end = high_resolution_clock::now();
std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
auto create_time_start = high_resolution_clock::now();
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(Vec3);
// allocate frame buffer (unified memory)
Vec3 *fb;
checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size));
// allocate random state
curandState *d_rand_state;
checkCudaErrors(cudaMalloc((void **)&d_rand_state, num_pixels*sizeof(curandState)));
curandState *d_rand_state2;
checkCudaErrors(cudaMalloc((void **)&d_rand_state2, 1*sizeof(curandState)));
/****** Render and time frame buffer ******/
// we need that 2nd random state to be initialized for the world creation
rand_init<<<1,1>>>(d_rand_state2);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// make our world of hittables
Hittable **d_list;
int numHittables;
if (image == 0) {
numHittables = 16 * 16 + 2;
checkCudaErrors(cudaMalloc((void **)&d_list, numHittables*sizeof(Hittable *)));
} else if (image == 1) {
numHittables = 11+45*56;
checkCudaErrors(cudaMalloc((void **)&d_list, numHittables*sizeof(Hittable *)));
} else if (image == 2) {
numHittables = 18;
checkCudaErrors(cudaMalloc((void **)&d_list, numHittables*sizeof(Hittable *)));
} else {
numHittables = 22*22+1+4;
checkCudaErrors(cudaMalloc((void **)&d_list, numHittables*sizeof(Hittable *)));
}
Hittable **d_world;
checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(Hittable *)));
Camera **d_camera;
checkCudaErrors(cudaMalloc((void **)&d_camera, sizeof(Camera *)));
if (image == 0) {
glow_balls<<<1,1>>>(d_list, d_world, d_camera, nx, ny, d_rand_state2);
} else if (image == 1) {
solar_system<<<1,1>>>(d_list,d_world,d_camera, nx, ny, d_rand_state2, tex_nx, tex_ny, texHQ_nx, texHQ_ny, dev_sun,
dev_mercury, dev_venus, dev_earth, dev_mars, dev_jupiter, dev_saturn, dev_uranus, dev_neptune, dev_pluto);
} else if (image == 2) {
pool_table<<<1,1>>>(d_list, d_world, d_camera, nx, ny, d_rand_state2);
} else {
create_world<<<1,1>>>(d_list,d_world,d_camera, nx, ny, d_rand_state2);
}
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// Render our buffer
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
render_init<<<blocks, threads>>>(nx, ny, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
auto create_time_end = high_resolution_clock::now();
std::cerr << "Starting Render.\n";
auto render_time_start = high_resolution_clock::now();
render<<<blocks, threads>>>(fb, nx, ny, ns, d_camera, d_world, d_rand_state, depth, dev_background);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
std::cerr << "Render Finished.\n";
auto render_time_end = high_resolution_clock::now();
// Output File
std::fstream file;
file.open("out.ppm", std::ios::out);
// std::streambuf *ppm_out = file.rdbuf();
// Redirect Cout
// std::cout.rdbuf(ppm_out);
auto save_time_start = high_resolution_clock::now();
// Output FB as Image
file << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
writeColor(file,fb[pixel_index]);
}
}
auto save_time_end = high_resolution_clock::now();
// clean up
checkCudaErrors(cudaDeviceSynchronize());
if (image == 0) {
free_glow_balls<<<1,1>>>(d_list, d_world, d_camera);
} else if (image == 1) {
free_solar_system<<<1,1>>>(d_list, d_world, d_camera);
checkCudaErrors(cudaFree(dev_mercury));
checkCudaErrors(cudaFree(dev_venus));
checkCudaErrors(cudaFree(dev_earth));
checkCudaErrors(cudaFree(dev_mars));
checkCudaErrors(cudaFree(dev_jupiter));
checkCudaErrors(cudaFree(dev_saturn));
checkCudaErrors(cudaFree(dev_uranus));
checkCudaErrors(cudaFree(dev_neptune));
checkCudaErrors(cudaFree(dev_pluto));
checkCudaErrors(cudaFree(dev_sun));
} else if (image == 2) {
free_pool_table<<<1,1>>>(d_list, d_world, d_camera);
}else {
free_world<<<1,1>>>(d_list, d_world, d_camera);
}
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_camera));
checkCudaErrors(cudaFree(d_world));
checkCudaErrors(cudaFree(d_list));
checkCudaErrors(cudaFree(d_rand_state));
checkCudaErrors(cudaFree(d_rand_state2));
checkCudaErrors(cudaFree(fb));
checkCudaErrors(cudaFree(dev_background));
std::cerr << "Image Successfully Saved." << std::endl;
file.close();
// useful for cuda-memcheck --leak-check full
cudaDeviceReset();
auto program_end = high_resolution_clock::now();
// Texture Time
auto texture_time = duration_cast<milliseconds>(texture_time_end - texture_time_start);
std::cout << "Texture Transfer Time: " << texture_time.count() << "ms" << std::endl;
// Create Time
auto create_time = duration_cast<milliseconds>(create_time_end - create_time_start);
std::cout << "World Creation Time: " << create_time.count() << "ms" << std::endl;
// Render Time
auto render_time = duration_cast<milliseconds>(render_time_end - render_time_start);
std::cout << "Render Time: " << render_time.count() << "ms" << std::endl;
// Save image time
auto save_time = duration_cast<milliseconds>(save_time_end - save_time_start);
std::cout << "Image Save Time: " << save_time.count() << "ms" << std::endl;
// Total Time
auto time = duration_cast<milliseconds>(program_end - program_start);
std::cout << "Total Time: " << time.count() << "ms" << std::endl;
return 0;
} |
fb76ea12c9ed1d177c458ae6b8ded5a53c4b3d90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <string>
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
#define PRESERVE_SPINOR_NORM
#ifdef PRESERVE_SPINOR_NORM // Preserve the norm regardless of basis
#define kP (1.0/sqrt(2.0))
#define kU (1.0/sqrt(2.0))
#else // More numerically accurate not to preserve the norm between basis
#define kP (0.5)
#define kU (1.0)
#endif
namespace quda {
using namespace colorspinor;
void exchangeExtendedGhost(cudaColorSpinorField* spinor, int R[], int parity, hipStream_t *stream_p)
{
#ifdef MULTI_GPU
int nFace = 0;
for(int i=0; i<4; i++){
if(R[i] > nFace) nFace = R[i];
}
int dagger = 0;
int gatherCompleted[2] = {0,0};
int commsCompleted[2] = {0,0};
hipEvent_t gatherEnd[2];
for(int dir=0; dir<2; dir++) hipEventCreate(&gatherEnd[dir], hipEventDisableTiming);
for(int dim=3; dim<=0; dim--){
if(!commDim(dim)) continue;
spinor->packExtended(nFace, R, parity, dagger, dim, stream_p); // packing in the dim dimension complete
qudaDeviceSynchronize(); // Need this since packing is performed in stream[Nstream-1]
for(int dir=1; dir<=0; dir--){
spinor->gather(nFace, dagger, 2*dim + dir);
qudaEventRecord(gatherEnd[dir], streams[2*dim+dir]); // gatherEnd[1], gatherEnd[0]
}
int completeSum = 0;
int dir = 1;
while(completeSum < 2){
if(!gatherCompleted[dir]){
if(hipSuccess == hipEventQuery(gatherEnd[dir])){
spinor->commsStart(nFace, 2*dim+dir, dagger);
completeSum++;
gatherCompleted[dir--] = 1;
}
}
}
gatherCompleted[0] = gatherCompleted[1] = 0;
// Query if comms has completed
dir = 1;
while(completeSum < 4){
if(!commsCompleted[dir]){
if(spinor->commsQuery(nFace, 2*dim+dir, dagger)){
spinor->scatterExtended(nFace, parity, dagger, 2*dim+dir);
completeSum++;
commsCompleted[dir--] = 1;
}
}
}
commsCompleted[0] = commsCompleted[1] = 0;
qudaDeviceSynchronize(); // Wait for scatters to complete before next iteration
} // loop over dim
for(int dir=0; dir<2; dir++) hipEventDestroy(gatherEnd[dir]);
#endif
return;
}
/** Straight copy with no basis change */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
class PreserveBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
public:
__device__ __host__ inline void operator()(ColorSpinor<RegTypeOut,Nc,Ns> &out, const ColorSpinor<RegTypeIn,Nc,Ns> &in) {
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
out(s,c) = in(s,c);
}
}
}
};
/** Transform from relativistic into non-relavisitic basis */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
struct NonRelBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
__device__ __host__ inline void operator()(ColorSpinor<RegTypeOut,Nc,Ns> &out, const ColorSpinor<RegTypeIn,Nc,Ns> &in) {
int s1[4] = {1, 2, 3, 0};
int s2[4] = {3, 0, 1, 2};
RegTypeOut K1[4] = {static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(-kP),
static_cast<RegTypeOut>(-kP), static_cast<RegTypeOut>(-kP)};
RegTypeOut K2[4] = {static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(-kP),
static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(kP)};
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
out(s,c).real(K1[s]*in(s1[s],c).real() + K2[s]*in(s2[s],c).real());
out(s,c).imag(K1[s]*in(s1[s],c).imag() + K2[s]*in(s2[s],c).imag());
}
}
}
};
/** Transform from non-relativistic into relavisitic basis */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
struct RelBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
__device__ __host__ inline void operator()(ColorSpinor<RegTypeOut,Nc,Ns> &out, const ColorSpinor<RegTypeIn,Nc,Ns> &in) {
int s1[4] = {1, 2, 3, 0};
int s2[4] = {3, 0, 1, 2};
RegTypeOut K1[4] = {static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(kU),
static_cast<RegTypeOut>(kU), static_cast<RegTypeOut>(kU)};
RegTypeOut K2[4] = {static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(kU),
static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(-kU)};
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
out(s,c).real(K1[s]*in(s1[s],c).real() + K2[s]*in(s2[s],c).real());
out(s,c).imag(K1[s]*in(s1[s],c).imag() + K2[s]*in(s2[s],c).imag());
}
}
}
};
template<typename OutOrder, typename InOrder, typename Basis>
struct CopySpinorExArg{
OutOrder out;
const InOrder in;
Basis basis;
int E[QUDA_MAX_DIM];
int X[QUDA_MAX_DIM];
int length;
int parity;
CopySpinorExArg(const OutOrder &out, const InOrder &in, const Basis& basis, const int *E, const int *X, const int parity)
: out(out), in(in), basis(basis), parity(parity)
{
this->length = 1;
for(int d=0; d<4; d++){
this->E[d] = E[d];
this->X[d] = X[d];
this->length *= X[d]; // smaller volume
}
}
};
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
__device__ __host__ void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg, int X)
{
int x[4];
int R[4];
for(int d=0; d<4; d++) R[d] = (arg.E[d] - arg.X[d]) >> 1;
int za = X/(arg.X[0]/2);
int x0h = X - za*(arg.X[0]/2);
int zb = za/arg.X[1];
x[1] = za - zb*arg.X[1];
x[3] = zb / arg.X[2];
x[2] = zb - x[3]*arg.X[2];
x[0] = 2*x0h + ((x[1] + x[2] + x[3] + arg.parity) & 1);
// Y is the cb spatial index into the extended gauge field
int Y = ((((x[3]+R[3])*arg.E[2] + (x[2]+R[2]))*arg.E[1] + (x[1]+R[1]))*arg.E[0]+(x[0]+R[0])) >> 1;
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
ColorSpinor<RegTypeIn,Nc,Ns> in;
ColorSpinor<RegTypeOut,Nc,Ns> out;
int parity = 0;
if(extend){
in = arg.in(X, parity);
arg.basis(out, in);
arg.out(Y, parity) = out;
}else{
in = arg.in(Y, parity);
arg.basis(out, in);
arg.out(Y, parity) = out;
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
__global__ void copyInteriorKernel(CopySpinorExArg<OutOrder,InOrder,Basis> arg)
{
int cb_idx = blockIdx.x*blockDim.x + threadIdx.x;
while(cb_idx < arg.length){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg,cb_idx);
cb_idx += gridDim.x*blockDim.x;
}
}
/*
Host function
*/
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg)
{
for(int cb_idx=0; cb_idx<arg.length; cb_idx++){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg, cb_idx);
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
class CopySpinorEx : Tunable {
CopySpinorExArg<OutOrder,InOrder,Basis> arg;
const ColorSpinorField &meta;
QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool advanceSharedBytes(TuneParam ¶m) const { return false; } // Don't tune shared mem
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.length; }
public:
CopySpinorEx(CopySpinorExArg<OutOrder,InOrder,Basis> &arg, const ColorSpinorField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("out_stride=%d,in_stride=%d",arg.out.stride,arg.in.stride);
}
virtual ~CopySpinorEx() {}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(location == QUDA_CPU_FIELD_LOCATION){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg);
}else if(location == QUDA_CUDA_FIELD_LOCATION){
hipLaunchKernelGGL(( copyInteriorKernel<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>)
, dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
long long flops() const { return 0; }
long long bytes() const {
return arg.length*2*Nc*Ns*(sizeof(FloatIn) + sizeof(FloatOut));
}
}; // CopySpinorEx
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis>
void copySpinorEx(OutOrder outOrder, const InOrder inOrder, const Basis basis, const int *E,
const int *X, const int parity, const bool extend, const ColorSpinorField &meta, QudaFieldLocation location)
{
CopySpinorExArg<OutOrder,InOrder,Basis> arg(outOrder, inOrder, basis, E, X, parity);
if(extend){
CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, true> copier(arg, meta, location);
copier.apply(0);
}else{
CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, false> copier(arg, meta, location);
copier.apply(0);
}
if(location == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder>
void copySpinorEx(OutOrder outOrder, InOrder inOrder, const QudaGammaBasis outBasis, const QudaGammaBasis inBasis,
const int* E, const int* X, const int parity, const bool extend,
const ColorSpinorField &meta, QudaFieldLocation location)
{
if(inBasis == outBasis){
PreserveBasis<FloatOut,FloatIn,Ns,Nc> basis;
copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, PreserveBasis<FloatOut,FloatIn,Ns,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else if(outBasis == QUDA_UKQCD_GAMMA_BASIS && inBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){
if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns);
NonRelBasis<FloatOut,FloatIn,4,Nc> basis;
copySpinorEx<FloatOut, FloatIn, 4, Nc, OutOrder, InOrder, NonRelBasis<FloatOut,FloatIn,4,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else if(inBasis == QUDA_UKQCD_GAMMA_BASIS && outBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){
if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns);
RelBasis<FloatOut,FloatIn,4,Nc> basis;
copySpinorEx<FloatOut, FloatIn, 4, Nc, OutOrder, InOrder, RelBasis<FloatOut,FloatIn,4,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else{
errorQuda("Basis change not supported");
}
}
// Need to rewrite the following two functions...
// Decide on the output order
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename InOrder>
void extendedCopyColorSpinor(InOrder &inOrder, ColorSpinorField &out,
QudaGammaBasis inBasis, const int *E, const int *X, const int parity, const bool extend,
QudaFieldLocation location, FloatOut *Out, float *outNorm){
if (out.isNative()) {
typedef typename colorspinor_mapper<FloatOut,Ns,Nc>::type ColorSpinor;
ColorSpinor outOrder(out, 1, Out, outNorm);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
} else {
errorQuda("Order not defined");
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc>
void extendedCopyColorSpinor(ColorSpinorField &out, const ColorSpinorField &in,
const int parity, const QudaFieldLocation location, FloatOut *Out, FloatIn *In,
float* outNorm, float *inNorm){
int E[4];
int X[4];
const bool extend = (out.Volume() >= in.Volume());
if (extend) {
for (int d=0; d<4; d++) {
E[d] = out.X()[d];
X[d] = in.X()[d];
}
} else {
for (int d=0; d<4; d++) {
E[d] = in.X()[d];
X[d] = out.X()[d];
}
}
X[0] *= 2; E[0] *= 2; // Since we consider only a single parity at a time
if (in.isNative()) {
typedef typename colorspinor_mapper<FloatIn,Ns,Nc>::type ColorSpinor;
ColorSpinor inOrder(in, 1, In, inNorm);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm);
} else {
errorQuda("Order not defined");
}
}
template<int Ns, typename dstFloat, typename srcFloat>
void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src,
float *dstNorm, float *srcNorm) {
if(dst.Ndim() != src.Ndim())
errorQuda("Number of dimensions %d %d don't match", dst.Ndim(), src.Ndim());
if(!(dst.SiteOrder() == src.SiteOrder() ||
(dst.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER &&
src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER) ||
(dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER &&
src.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER) ) ){
errorQuda("Subset orders %d %d don't match", dst.SiteOrder(), src.SiteOrder());
}
if(dst.SiteSubset() != src.SiteSubset())
errorQuda("Subset types do not match %d %d", dst.SiteSubset(), src.SiteSubset());
if(dst.Ncolor() != 3 || src.Ncolor() != 3) errorQuda("Nc != 3 not yet supported");
const int Nc = 3;
// We currently only support parity-ordered fields; even-odd or odd-even
if(dst.SiteOrder() == QUDA_LEXICOGRAPHIC_SITE_ORDER){
errorQuda("Copying to full fields with lexicographical ordering is not currently supported");
}
if(dst.SiteSubset() == QUDA_FULL_SITE_SUBSET){
if(src.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER ||
dst.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){
errorQuda("QDPJIT field ordering not supported for full site fields");
}
// set for the source subset ordering
srcFloat *srcEven = Src ? Src : (srcFloat*)src.V();
srcFloat* srcOdd = (srcFloat*)((char*)srcEven + src.Bytes()/2);
float *srcNormEven = srcNorm ? srcNorm : (float*)src.Norm();
float *srcNormOdd = (float*)((char*)srcNormEven + src.NormBytes()/2);
if(src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){
std::swap<srcFloat*>(srcEven, srcOdd);
std::swap<float*>(srcNormEven, srcNormOdd);
}
// set for the destination subset ordering
dstFloat *dstEven = Dst ? Dst : (dstFloat*)dst.V();
dstFloat *dstOdd = (dstFloat*)((char*)dstEven + dst.Bytes()/2);
float *dstNormEven = dstNorm ? dstNorm : (float*)dst.Norm();
float *dstNormOdd = (float*)((char*)dstNormEven + dst.NormBytes()/2);
if(dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){
std::swap<dstFloat*>(dstEven, dstOdd);
std::swap<float*>(dstNormEven, dstNormOdd);
}
// should be able to apply to select either even or odd parity at this point as well.
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, 0, location, dstEven, srcEven, dstNormEven, srcNormEven);
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, 1, location, dstOdd, srcOdd, dstNormOdd, srcNormOdd);
}else{
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
} // N.B. Need to update this to account for differences in parity
}
template<typename dstFloat, typename srcFloat>
void CopyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src,
float *dstNorm=0, float *srcNorm=0)
{
if(dst.Nspin() != src.Nspin())
errorQuda("source and destination spins must match");
if(dst.Nspin() == 4){
#if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
copyExtendedColorSpinor<4>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
#else
errorQuda("Extended copy has not been built for Nspin=%d fields",dst.Nspin());
#endif
}else if(dst.Nspin() == 1){
#ifdef GPU_STAGGERED_DIRAC
copyExtendedColorSpinor<1>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
#else
errorQuda("Extended copy has not been built for Nspin=%d fields", dst.Nspin());
#endif
}else{
errorQuda("Nspin=%d unsupported", dst.Nspin());
}
}
// There's probably no need to have the additional Dst and Src arguments here!
void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
QudaFieldLocation location, const int parity, void *Dst, void *Src,
void *dstNorm, void *srcNorm){
if(dst.Precision() == QUDA_DOUBLE_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<double*>(Src));
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<float*>(Src));
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm));
} else {
errorQuda("Unsupported Precision %d", src.Precision());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<double*>(Src));
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<float*>(Src));
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm));
}else{
errorQuda("Unsupported Precision %d", src.Precision());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<double*>(Src), static_cast<float*>(dstNorm), 0);
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<float*>(Src), static_cast<float*>(dstNorm), 0);
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<short*>(Src), static_cast<float*>(dstNorm), static_cast<float*>(srcNorm));
}else{
errorQuda("Unsupported Precision %d", src.Precision());
}
}else{
errorQuda("Unsupported Precision %d", dst.Precision());
}
}
} // quda
| fb76ea12c9ed1d177c458ae6b8ded5a53c4b3d90.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
#define PRESERVE_SPINOR_NORM
#ifdef PRESERVE_SPINOR_NORM // Preserve the norm regardless of basis
#define kP (1.0/sqrt(2.0))
#define kU (1.0/sqrt(2.0))
#else // More numerically accurate not to preserve the norm between basis
#define kP (0.5)
#define kU (1.0)
#endif
namespace quda {
using namespace colorspinor;
void exchangeExtendedGhost(cudaColorSpinorField* spinor, int R[], int parity, cudaStream_t *stream_p)
{
#ifdef MULTI_GPU
int nFace = 0;
for(int i=0; i<4; i++){
if(R[i] > nFace) nFace = R[i];
}
int dagger = 0;
int gatherCompleted[2] = {0,0};
int commsCompleted[2] = {0,0};
cudaEvent_t gatherEnd[2];
for(int dir=0; dir<2; dir++) cudaEventCreate(&gatherEnd[dir], cudaEventDisableTiming);
for(int dim=3; dim<=0; dim--){
if(!commDim(dim)) continue;
spinor->packExtended(nFace, R, parity, dagger, dim, stream_p); // packing in the dim dimension complete
qudaDeviceSynchronize(); // Need this since packing is performed in stream[Nstream-1]
for(int dir=1; dir<=0; dir--){
spinor->gather(nFace, dagger, 2*dim + dir);
qudaEventRecord(gatherEnd[dir], streams[2*dim+dir]); // gatherEnd[1], gatherEnd[0]
}
int completeSum = 0;
int dir = 1;
while(completeSum < 2){
if(!gatherCompleted[dir]){
if(cudaSuccess == cudaEventQuery(gatherEnd[dir])){
spinor->commsStart(nFace, 2*dim+dir, dagger);
completeSum++;
gatherCompleted[dir--] = 1;
}
}
}
gatherCompleted[0] = gatherCompleted[1] = 0;
// Query if comms has completed
dir = 1;
while(completeSum < 4){
if(!commsCompleted[dir]){
if(spinor->commsQuery(nFace, 2*dim+dir, dagger)){
spinor->scatterExtended(nFace, parity, dagger, 2*dim+dir);
completeSum++;
commsCompleted[dir--] = 1;
}
}
}
commsCompleted[0] = commsCompleted[1] = 0;
qudaDeviceSynchronize(); // Wait for scatters to complete before next iteration
} // loop over dim
for(int dir=0; dir<2; dir++) cudaEventDestroy(gatherEnd[dir]);
#endif
return;
}
/** Straight copy with no basis change */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
class PreserveBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
public:
__device__ __host__ inline void operator()(ColorSpinor<RegTypeOut,Nc,Ns> &out, const ColorSpinor<RegTypeIn,Nc,Ns> &in) {
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
out(s,c) = in(s,c);
}
}
}
};
/** Transform from relativistic into non-relavisitic basis */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
struct NonRelBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
__device__ __host__ inline void operator()(ColorSpinor<RegTypeOut,Nc,Ns> &out, const ColorSpinor<RegTypeIn,Nc,Ns> &in) {
int s1[4] = {1, 2, 3, 0};
int s2[4] = {3, 0, 1, 2};
RegTypeOut K1[4] = {static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(-kP),
static_cast<RegTypeOut>(-kP), static_cast<RegTypeOut>(-kP)};
RegTypeOut K2[4] = {static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(-kP),
static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(kP)};
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
out(s,c).real(K1[s]*in(s1[s],c).real() + K2[s]*in(s2[s],c).real());
out(s,c).imag(K1[s]*in(s1[s],c).imag() + K2[s]*in(s2[s],c).imag());
}
}
}
};
/** Transform from non-relativistic into relavisitic basis */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
struct RelBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
__device__ __host__ inline void operator()(ColorSpinor<RegTypeOut,Nc,Ns> &out, const ColorSpinor<RegTypeIn,Nc,Ns> &in) {
int s1[4] = {1, 2, 3, 0};
int s2[4] = {3, 0, 1, 2};
RegTypeOut K1[4] = {static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(kU),
static_cast<RegTypeOut>(kU), static_cast<RegTypeOut>(kU)};
RegTypeOut K2[4] = {static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(kU),
static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(-kU)};
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
out(s,c).real(K1[s]*in(s1[s],c).real() + K2[s]*in(s2[s],c).real());
out(s,c).imag(K1[s]*in(s1[s],c).imag() + K2[s]*in(s2[s],c).imag());
}
}
}
};
template<typename OutOrder, typename InOrder, typename Basis>
struct CopySpinorExArg{
OutOrder out;
const InOrder in;
Basis basis;
int E[QUDA_MAX_DIM];
int X[QUDA_MAX_DIM];
int length;
int parity;
CopySpinorExArg(const OutOrder &out, const InOrder &in, const Basis& basis, const int *E, const int *X, const int parity)
: out(out), in(in), basis(basis), parity(parity)
{
this->length = 1;
for(int d=0; d<4; d++){
this->E[d] = E[d];
this->X[d] = X[d];
this->length *= X[d]; // smaller volume
}
}
};
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
__device__ __host__ void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg, int X)
{
int x[4];
int R[4];
for(int d=0; d<4; d++) R[d] = (arg.E[d] - arg.X[d]) >> 1;
int za = X/(arg.X[0]/2);
int x0h = X - za*(arg.X[0]/2);
int zb = za/arg.X[1];
x[1] = za - zb*arg.X[1];
x[3] = zb / arg.X[2];
x[2] = zb - x[3]*arg.X[2];
x[0] = 2*x0h + ((x[1] + x[2] + x[3] + arg.parity) & 1);
// Y is the cb spatial index into the extended gauge field
int Y = ((((x[3]+R[3])*arg.E[2] + (x[2]+R[2]))*arg.E[1] + (x[1]+R[1]))*arg.E[0]+(x[0]+R[0])) >> 1;
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
ColorSpinor<RegTypeIn,Nc,Ns> in;
ColorSpinor<RegTypeOut,Nc,Ns> out;
int parity = 0;
if(extend){
in = arg.in(X, parity);
arg.basis(out, in);
arg.out(Y, parity) = out;
}else{
in = arg.in(Y, parity);
arg.basis(out, in);
arg.out(Y, parity) = out;
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
__global__ void copyInteriorKernel(CopySpinorExArg<OutOrder,InOrder,Basis> arg)
{
int cb_idx = blockIdx.x*blockDim.x + threadIdx.x;
while(cb_idx < arg.length){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg,cb_idx);
cb_idx += gridDim.x*blockDim.x;
}
}
/*
Host function
*/
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg)
{
for(int cb_idx=0; cb_idx<arg.length; cb_idx++){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg, cb_idx);
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
class CopySpinorEx : Tunable {
CopySpinorExArg<OutOrder,InOrder,Basis> arg;
const ColorSpinorField &meta;
QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool advanceSharedBytes(TuneParam ¶m) const { return false; } // Don't tune shared mem
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.length; }
public:
CopySpinorEx(CopySpinorExArg<OutOrder,InOrder,Basis> &arg, const ColorSpinorField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("out_stride=%d,in_stride=%d",arg.out.stride,arg.in.stride);
}
virtual ~CopySpinorEx() {}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(location == QUDA_CPU_FIELD_LOCATION){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg);
}else if(location == QUDA_CUDA_FIELD_LOCATION){
copyInteriorKernel<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>
<<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
long long flops() const { return 0; }
long long bytes() const {
return arg.length*2*Nc*Ns*(sizeof(FloatIn) + sizeof(FloatOut));
}
}; // CopySpinorEx
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis>
void copySpinorEx(OutOrder outOrder, const InOrder inOrder, const Basis basis, const int *E,
const int *X, const int parity, const bool extend, const ColorSpinorField &meta, QudaFieldLocation location)
{
CopySpinorExArg<OutOrder,InOrder,Basis> arg(outOrder, inOrder, basis, E, X, parity);
if(extend){
CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, true> copier(arg, meta, location);
copier.apply(0);
}else{
CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, false> copier(arg, meta, location);
copier.apply(0);
}
if(location == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder>
void copySpinorEx(OutOrder outOrder, InOrder inOrder, const QudaGammaBasis outBasis, const QudaGammaBasis inBasis,
const int* E, const int* X, const int parity, const bool extend,
const ColorSpinorField &meta, QudaFieldLocation location)
{
if(inBasis == outBasis){
PreserveBasis<FloatOut,FloatIn,Ns,Nc> basis;
copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, PreserveBasis<FloatOut,FloatIn,Ns,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else if(outBasis == QUDA_UKQCD_GAMMA_BASIS && inBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){
if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns);
NonRelBasis<FloatOut,FloatIn,4,Nc> basis;
copySpinorEx<FloatOut, FloatIn, 4, Nc, OutOrder, InOrder, NonRelBasis<FloatOut,FloatIn,4,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else if(inBasis == QUDA_UKQCD_GAMMA_BASIS && outBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){
if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns);
RelBasis<FloatOut,FloatIn,4,Nc> basis;
copySpinorEx<FloatOut, FloatIn, 4, Nc, OutOrder, InOrder, RelBasis<FloatOut,FloatIn,4,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else{
errorQuda("Basis change not supported");
}
}
// Need to rewrite the following two functions...
// Decide on the output order
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename InOrder>
void extendedCopyColorSpinor(InOrder &inOrder, ColorSpinorField &out,
QudaGammaBasis inBasis, const int *E, const int *X, const int parity, const bool extend,
QudaFieldLocation location, FloatOut *Out, float *outNorm){
if (out.isNative()) {
typedef typename colorspinor_mapper<FloatOut,Ns,Nc>::type ColorSpinor;
ColorSpinor outOrder(out, 1, Out, outNorm);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
} else {
errorQuda("Order not defined");
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc>
void extendedCopyColorSpinor(ColorSpinorField &out, const ColorSpinorField &in,
const int parity, const QudaFieldLocation location, FloatOut *Out, FloatIn *In,
float* outNorm, float *inNorm){
int E[4];
int X[4];
const bool extend = (out.Volume() >= in.Volume());
if (extend) {
for (int d=0; d<4; d++) {
E[d] = out.X()[d];
X[d] = in.X()[d];
}
} else {
for (int d=0; d<4; d++) {
E[d] = in.X()[d];
X[d] = out.X()[d];
}
}
X[0] *= 2; E[0] *= 2; // Since we consider only a single parity at a time
if (in.isNative()) {
typedef typename colorspinor_mapper<FloatIn,Ns,Nc>::type ColorSpinor;
ColorSpinor inOrder(in, 1, In, inNorm);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm);
} else {
errorQuda("Order not defined");
}
}
template<int Ns, typename dstFloat, typename srcFloat>
void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src,
float *dstNorm, float *srcNorm) {
if(dst.Ndim() != src.Ndim())
errorQuda("Number of dimensions %d %d don't match", dst.Ndim(), src.Ndim());
if(!(dst.SiteOrder() == src.SiteOrder() ||
(dst.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER &&
src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER) ||
(dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER &&
src.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER) ) ){
errorQuda("Subset orders %d %d don't match", dst.SiteOrder(), src.SiteOrder());
}
if(dst.SiteSubset() != src.SiteSubset())
errorQuda("Subset types do not match %d %d", dst.SiteSubset(), src.SiteSubset());
if(dst.Ncolor() != 3 || src.Ncolor() != 3) errorQuda("Nc != 3 not yet supported");
const int Nc = 3;
// We currently only support parity-ordered fields; even-odd or odd-even
if(dst.SiteOrder() == QUDA_LEXICOGRAPHIC_SITE_ORDER){
errorQuda("Copying to full fields with lexicographical ordering is not currently supported");
}
if(dst.SiteSubset() == QUDA_FULL_SITE_SUBSET){
if(src.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER ||
dst.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){
errorQuda("QDPJIT field ordering not supported for full site fields");
}
// set for the source subset ordering
srcFloat *srcEven = Src ? Src : (srcFloat*)src.V();
srcFloat* srcOdd = (srcFloat*)((char*)srcEven + src.Bytes()/2);
float *srcNormEven = srcNorm ? srcNorm : (float*)src.Norm();
float *srcNormOdd = (float*)((char*)srcNormEven + src.NormBytes()/2);
if(src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){
std::swap<srcFloat*>(srcEven, srcOdd);
std::swap<float*>(srcNormEven, srcNormOdd);
}
// set for the destination subset ordering
dstFloat *dstEven = Dst ? Dst : (dstFloat*)dst.V();
dstFloat *dstOdd = (dstFloat*)((char*)dstEven + dst.Bytes()/2);
float *dstNormEven = dstNorm ? dstNorm : (float*)dst.Norm();
float *dstNormOdd = (float*)((char*)dstNormEven + dst.NormBytes()/2);
if(dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){
std::swap<dstFloat*>(dstEven, dstOdd);
std::swap<float*>(dstNormEven, dstNormOdd);
}
// should be able to apply to select either even or odd parity at this point as well.
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, 0, location, dstEven, srcEven, dstNormEven, srcNormEven);
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, 1, location, dstOdd, srcOdd, dstNormOdd, srcNormOdd);
}else{
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
} // N.B. Need to update this to account for differences in parity
}
template<typename dstFloat, typename srcFloat>
void CopyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src,
float *dstNorm=0, float *srcNorm=0)
{
if(dst.Nspin() != src.Nspin())
errorQuda("source and destination spins must match");
if(dst.Nspin() == 4){
#if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
copyExtendedColorSpinor<4>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
#else
errorQuda("Extended copy has not been built for Nspin=%d fields",dst.Nspin());
#endif
}else if(dst.Nspin() == 1){
#ifdef GPU_STAGGERED_DIRAC
copyExtendedColorSpinor<1>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
#else
errorQuda("Extended copy has not been built for Nspin=%d fields", dst.Nspin());
#endif
}else{
errorQuda("Nspin=%d unsupported", dst.Nspin());
}
}
// There's probably no need to have the additional Dst and Src arguments here!
void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
QudaFieldLocation location, const int parity, void *Dst, void *Src,
void *dstNorm, void *srcNorm){
if(dst.Precision() == QUDA_DOUBLE_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<double*>(Src));
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<float*>(Src));
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm));
} else {
errorQuda("Unsupported Precision %d", src.Precision());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<double*>(Src));
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<float*>(Src));
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm));
}else{
errorQuda("Unsupported Precision %d", src.Precision());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<double*>(Src), static_cast<float*>(dstNorm), 0);
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<float*>(Src), static_cast<float*>(dstNorm), 0);
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<short*>(Src), static_cast<float*>(dstNorm), static_cast<float*>(srcNorm));
}else{
errorQuda("Unsupported Precision %d", src.Precision());
}
}else{
errorQuda("Unsupported Precision %d", dst.Precision());
}
}
} // quda
|
acb6559bf5839f7261d05b3bf95de0bb0ccdee28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2018 by Contributors
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
/*! \brief prediction parameters */
struct GPUPredictionParam : public dmlc::Parameter<GPUPredictionParam> {
int gpu_id;
int n_gpus;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUPredictionParam) {
DMLC_DECLARE_FIELD(gpu_id).set_lower_bound(0).set_default(0).describe(
"Device ordinal for GPU prediction.");
DMLC_DECLARE_FIELD(n_gpus).set_lower_bound(-1).set_default(1).describe(
"Number of devices to use for prediction.");
}
};
DMLC_REGISTER_PARAMETER(GPUPredictionParam);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes");
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
common::Span<const size_t> d_row_ptr;
common::Span<const Entry> d_data;
int num_features;
float* smem;
size_t entry_start;
__device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr,
common::Span<const Entry> entry, int num_features,
float* smem, int num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem),
entry_start(entry_start) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
common::Span<const size_t> d_row_ptr,
common::Span<const Entry> d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DeviceOffsets(const HostDeviceVector<size_t>& data, std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
offsets.resize(devices_.Size() + 1);
offsets[0] = 0;
#pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1)
for (int shard = 0; shard < devices_.Size(); ++shard) {
int device = devices_.DeviceId(shard);
auto data_span = data.DeviceSpan(device);
dh::safe_cuda(hipSetDevice(device));
// copy the last element from every shard
dh::safe_cuda(hipMemcpy(&offsets.at(shard + 1),
&data_span[data_span.size()-1],
sizeof(size_t), hipMemcpyDeviceToHost));
}
}
struct DeviceShard {
DeviceShard() : device_{-1} {}
void Init(int device) {
this->device_ = device;
max_shared_memory_bytes_ = dh::MaxSharedMemory(this->device_);
}
void PredictInternal
(const SparsePage& batch, const MetaInfo& info,
HostDeviceVector<bst_float>* predictions,
const size_t batch_offset,
const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<DevicePredictionNode>& h_nodes,
size_t tree_begin, size_t tree_end) {
dh::safe_cuda(hipSetDevice(device_));
nodes_.resize(h_nodes.size());
dh::safe_cuda(hipMemcpyAsync(dh::Raw(nodes_), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
hipMemcpyHostToDevice));
tree_segments_.resize(h_tree_segments.size());
dh::safe_cuda(hipMemcpyAsync(dh::Raw(tree_segments_), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
hipMemcpyHostToDevice));
tree_group_.resize(model.tree_info.size());
dh::safe_cuda(hipMemcpyAsync(dh::Raw(tree_group_), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
hipMemcpyHostToDevice));
const int BLOCK_THREADS = 128;
size_t num_rows = batch.offset.DeviceSize(device_) - 1;
if (num_rows < 1) { return; }
const int GRID_SIZE = static_cast<int>(dh::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
(sizeof(float) * info.num_col_ * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes_) {
shared_memory_bytes = 0;
use_shared = false;
}
const auto& data_distr = batch.data.Distribution();
size_t entry_start = data_distr.ShardStart(batch.data.Size(),
data_distr.Devices().Index(device_));
hipLaunchKernelGGL(( PredictKernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), shared_memory_bytes, 0,
dh::ToSpan(nodes_), predictions->DeviceSpan(device_).subspan(batch_offset),
dh::ToSpan(tree_segments_), dh::ToSpan(tree_group_), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), tree_begin, tree_end, info.num_col_,
num_rows, entry_start, use_shared, model.param.num_output_group);
}
private:
int device_;
thrust::device_vector<DevicePredictionNode> nodes_;
thrust::device_vector<size_t> tree_segments_;
thrust::device_vector<int> tree_group_;
size_t max_shared_memory_bytes_;
};
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) { return; }
monitor_.StartCuda("DevicePredictInternal");
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
size_t i_batch = 0;
size_t batch_offset = 0;
for (auto &batch : dmat->GetRowBatches()) {
CHECK(i_batch == 0 || devices_.Size() == 1) << "External memory not supported for multi-GPU";
// out_preds have been sharded and resized in InitOutPredictions()
batch.offset.Shard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, &device_offsets);
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, dmat->Info(), out_preds, batch_offset, model,
h_tree_segments, h_nodes, tree_begin, tree_end);
});
batch_offset += batch.Size() * model.param.num_output_group;
i_batch++;
}
monitor_.StopCuda("DevicePredictInternal");
}
public:
GPUPredictor() // NOLINT
: cpu_predictor_(Predictor::Create("cpu_predictor")) {} // NOLINT
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
GPUSet devices = GPUSet::All(
param_.gpu_id, param_.n_gpus, dmat->Info().num_row_);
ConfigureShards(devices);
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.param.num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Shard(GPUDistribution::Granular(devices_, n_classes));
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
monitor_.StartCuda("PredictFromCache");
out_preds->Shard(y.Distribution());
out_preds->Resize(y.Size());
out_preds->Copy(y);
monitor_.StopCuda("PredictFromCache");
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor_->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor_->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor_->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor_->Init(cfg, cache);
param_.InitAllowUnknown(cfg);
GPUSet devices = GPUSet::All(param_.gpu_id, param_.n_gpus);
ConfigureShards(devices);
}
private:
/*! \brief Re configure shards when GPUSet is changed. */
void ConfigureShards(GPUSet devices) {
if (devices_ == devices) return;
devices_ = devices;
shards_.clear();
shards_.resize(devices_.Size());
dh::ExecuteIndexShards(&shards_, [=](size_t i, DeviceShard& shard){
shard.Init(devices_.DeviceId(i));
});
}
GPUPredictionParam param_;
std::unique_ptr<Predictor> cpu_predictor_;
std::vector<DeviceShard> shards_;
GPUSet devices_;
common::Monitor monitor_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
| acb6559bf5839f7261d05b3bf95de0bb0ccdee28.cu | /*!
* Copyright 2017-2018 by Contributors
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
/*! \brief prediction parameters */
struct GPUPredictionParam : public dmlc::Parameter<GPUPredictionParam> {
int gpu_id;
int n_gpus;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUPredictionParam) {
DMLC_DECLARE_FIELD(gpu_id).set_lower_bound(0).set_default(0).describe(
"Device ordinal for GPU prediction.");
DMLC_DECLARE_FIELD(n_gpus).set_lower_bound(-1).set_default(1).describe(
"Number of devices to use for prediction.");
}
};
DMLC_REGISTER_PARAMETER(GPUPredictionParam);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes");
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
common::Span<const size_t> d_row_ptr;
common::Span<const Entry> d_data;
int num_features;
float* smem;
size_t entry_start;
__device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr,
common::Span<const Entry> entry, int num_features,
float* smem, int num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem),
entry_start(entry_start) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
common::Span<const size_t> d_row_ptr,
common::Span<const Entry> d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DeviceOffsets(const HostDeviceVector<size_t>& data, std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
offsets.resize(devices_.Size() + 1);
offsets[0] = 0;
#pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1)
for (int shard = 0; shard < devices_.Size(); ++shard) {
int device = devices_.DeviceId(shard);
auto data_span = data.DeviceSpan(device);
dh::safe_cuda(cudaSetDevice(device));
// copy the last element from every shard
dh::safe_cuda(cudaMemcpy(&offsets.at(shard + 1),
&data_span[data_span.size()-1],
sizeof(size_t), cudaMemcpyDeviceToHost));
}
}
struct DeviceShard {
DeviceShard() : device_{-1} {}
void Init(int device) {
this->device_ = device;
max_shared_memory_bytes_ = dh::MaxSharedMemory(this->device_);
}
void PredictInternal
(const SparsePage& batch, const MetaInfo& info,
HostDeviceVector<bst_float>* predictions,
const size_t batch_offset,
const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<DevicePredictionNode>& h_nodes,
size_t tree_begin, size_t tree_end) {
dh::safe_cuda(cudaSetDevice(device_));
nodes_.resize(h_nodes.size());
dh::safe_cuda(cudaMemcpyAsync(dh::Raw(nodes_), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
cudaMemcpyHostToDevice));
tree_segments_.resize(h_tree_segments.size());
dh::safe_cuda(cudaMemcpyAsync(dh::Raw(tree_segments_), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
cudaMemcpyHostToDevice));
tree_group_.resize(model.tree_info.size());
dh::safe_cuda(cudaMemcpyAsync(dh::Raw(tree_group_), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
cudaMemcpyHostToDevice));
const int BLOCK_THREADS = 128;
size_t num_rows = batch.offset.DeviceSize(device_) - 1;
if (num_rows < 1) { return; }
const int GRID_SIZE = static_cast<int>(dh::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
(sizeof(float) * info.num_col_ * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes_) {
shared_memory_bytes = 0;
use_shared = false;
}
const auto& data_distr = batch.data.Distribution();
size_t entry_start = data_distr.ShardStart(batch.data.Size(),
data_distr.Devices().Index(device_));
PredictKernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS, shared_memory_bytes>>>
(dh::ToSpan(nodes_), predictions->DeviceSpan(device_).subspan(batch_offset),
dh::ToSpan(tree_segments_), dh::ToSpan(tree_group_), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), tree_begin, tree_end, info.num_col_,
num_rows, entry_start, use_shared, model.param.num_output_group);
}
private:
int device_;
thrust::device_vector<DevicePredictionNode> nodes_;
thrust::device_vector<size_t> tree_segments_;
thrust::device_vector<int> tree_group_;
size_t max_shared_memory_bytes_;
};
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) { return; }
monitor_.StartCuda("DevicePredictInternal");
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
size_t i_batch = 0;
size_t batch_offset = 0;
for (auto &batch : dmat->GetRowBatches()) {
CHECK(i_batch == 0 || devices_.Size() == 1) << "External memory not supported for multi-GPU";
// out_preds have been sharded and resized in InitOutPredictions()
batch.offset.Shard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, &device_offsets);
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, dmat->Info(), out_preds, batch_offset, model,
h_tree_segments, h_nodes, tree_begin, tree_end);
});
batch_offset += batch.Size() * model.param.num_output_group;
i_batch++;
}
monitor_.StopCuda("DevicePredictInternal");
}
public:
GPUPredictor() // NOLINT
: cpu_predictor_(Predictor::Create("cpu_predictor")) {} // NOLINT
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
GPUSet devices = GPUSet::All(
param_.gpu_id, param_.n_gpus, dmat->Info().num_row_);
ConfigureShards(devices);
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.param.num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Shard(GPUDistribution::Granular(devices_, n_classes));
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
monitor_.StartCuda("PredictFromCache");
out_preds->Shard(y.Distribution());
out_preds->Resize(y.Size());
out_preds->Copy(y);
monitor_.StopCuda("PredictFromCache");
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor_->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor_->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor_->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor_->Init(cfg, cache);
param_.InitAllowUnknown(cfg);
GPUSet devices = GPUSet::All(param_.gpu_id, param_.n_gpus);
ConfigureShards(devices);
}
private:
/*! \brief Re configure shards when GPUSet is changed. */
void ConfigureShards(GPUSet devices) {
if (devices_ == devices) return;
devices_ = devices;
shards_.clear();
shards_.resize(devices_.Size());
dh::ExecuteIndexShards(&shards_, [=](size_t i, DeviceShard& shard){
shard.Init(devices_.DeviceId(i));
});
}
GPUPredictionParam param_;
std::unique_ptr<Predictor> cpu_predictor_;
std::vector<DeviceShard> shards_;
GPUSet devices_;
common::Monitor monitor_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
|
8455f07546629cf36e069fe1e25aa2031446b029.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
//////////////////////////////
// forward
//////////////////////////////
__global__ void kernal_fp32_Col2Im_Forward(
const float* x_buf,
float* y_buf,
int hw_size,
int c_size,
int output_frame_size,
int output_frame_stride,
int input_frame_stride
)
{
int output_frame = blockDim.x * blockIdx.x + threadIdx.x;
int xy = blockDim.y * blockIdx.y + threadIdx.y;
int c = blockDim.z * blockIdx.z + threadIdx.z;
int output_node = c * hw_size + xy;
if (output_frame < output_frame_size && xy < hw_size ) {
int input_frame = output_frame * hw_size + xy;
int input_node = c;
y_buf[output_node * output_frame_stride + output_frame] = x_buf[input_node * input_frame_stride + input_frame];
}
}
BBCU_DLL_EXPORT int bbcu_fp32_Col2Im_Forward
(
float const *dev_x_buf,
float *dev_y_buf,
int w_size,
int h_size,
int c_size,
int input_frame_stride,
int output_frame_size,
int output_frame_stride,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int hw_size = h_size * w_size;
dim3 block(32, 32, 1);
dim3 grid((output_frame_size+31)/32, (hw_size+31)/32, c_size);
hipLaunchKernelGGL(( kernal_fp32_Col2Im_Forward), dim3(grid), dim3(block), 0, streamId,
dev_x_buf,
dev_y_buf,
hw_size,
c_size,
output_frame_size,
output_frame_stride,
input_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
__global__ void kernal_bit_Col2Im_Forward(
int const *x_buf,
int *y_buf,
int hw_size,
int c_size,
int output_frame_size,
int output_frame_stride,
int input_frame_stride
)
{
int output_frame_unit = blockDim.x * blockIdx.x + threadIdx.x;
int xy = blockDim.y * blockIdx.y + threadIdx.y;
if ( output_frame_unit < output_frame_stride && xy < hw_size ) {
int c = blockDim.z * blockIdx.z + threadIdx.z;
int output_node = c * hw_size + xy;
int y = 0;
for ( int i = 0; i < 32; ++i ) {
int output_frame = output_frame_unit * 32 + i;
if (output_frame < output_frame_size && xy < hw_size ) {
int input_frame = output_frame * hw_size + xy;
int input_node = c;
int const *x_ptr = &x_buf[input_node * input_frame_stride];
int x = ((x_ptr[input_frame / 32] >> (input_frame % 32)) & 1);
y |= (x << i);
}
}
int *y_ptr = &y_buf[output_node * output_frame_stride];
y_ptr[output_frame_unit] = y;
}
}
BBCU_DLL_EXPORT int bbcu_bit_Col2Im_Forward
(
int const *dev_x_buf,
int *dev_y_buf,
int w_size,
int h_size,
int c_size,
int input_frame_stride,
int output_frame_size,
int output_frame_stride,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int hw_size = h_size * w_size;
// 32bit
int output_frame_unit = (output_frame_size + 31) / 32 ;
dim3 block(32, 32, 1);
dim3 grid((output_frame_unit+31)/32, (hw_size+31)/32, c_size);
hipLaunchKernelGGL(( kernal_bit_Col2Im_Forward), dim3(grid), dim3(block), 0, streamId,
dev_x_buf,
dev_y_buf,
hw_size,
c_size,
output_frame_size,
output_frame_stride,
input_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////////////////
// backward
//////////////////////////////
__global__ void kernal_fp32_Col2Im_Backward(
const float* dy_buf,
float* dx_buf,
int hw_size,
int c_size,
int output_frame_size,
int output_frame_stride,
int input_frame_stride
)
{
int output_frame = blockDim.x * blockIdx.x + threadIdx.x;
int xy = blockDim.y * blockIdx.y + threadIdx.y;
int c = blockDim.z * blockIdx.z + threadIdx.z;
if (output_frame < output_frame_size && xy < hw_size ) {
int output_node = c * hw_size + xy;
int input_frame = output_frame * hw_size + xy;
int input_node = c;
dx_buf[input_node * input_frame_stride + input_frame] = dy_buf[output_node * output_frame_stride + output_frame];
}
}
BBCU_DLL_EXPORT int bbcu_fp32_Col2Im_Backward
(
float const *dev_dy_buf,
float *dev_dx_buf,
int w_size,
int h_size,
int c_size,
int input_frame_stride,
int output_frame_size,
int output_frame_stride,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int hw_size = h_size * w_size;
dim3 block(32, 32, 1);
dim3 grid((output_frame_size+31)/32, (hw_size+31)/32, c_size);
hipLaunchKernelGGL(( kernal_fp32_Col2Im_Backward), dim3(grid), dim3(block), 0, streamId,
dev_dy_buf,
dev_dx_buf,
hw_size,
c_size,
output_frame_size,
output_frame_stride,
input_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
| 8455f07546629cf36e069fe1e25aa2031446b029.cu | #include <iostream>
#include <chrono>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
//////////////////////////////
// forward
//////////////////////////////
__global__ void kernal_fp32_Col2Im_Forward(
const float* x_buf,
float* y_buf,
int hw_size,
int c_size,
int output_frame_size,
int output_frame_stride,
int input_frame_stride
)
{
int output_frame = blockDim.x * blockIdx.x + threadIdx.x;
int xy = blockDim.y * blockIdx.y + threadIdx.y;
int c = blockDim.z * blockIdx.z + threadIdx.z;
int output_node = c * hw_size + xy;
if (output_frame < output_frame_size && xy < hw_size ) {
int input_frame = output_frame * hw_size + xy;
int input_node = c;
y_buf[output_node * output_frame_stride + output_frame] = x_buf[input_node * input_frame_stride + input_frame];
}
}
BBCU_DLL_EXPORT int bbcu_fp32_Col2Im_Forward
(
float const *dev_x_buf,
float *dev_y_buf,
int w_size,
int h_size,
int c_size,
int input_frame_stride,
int output_frame_size,
int output_frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int hw_size = h_size * w_size;
dim3 block(32, 32, 1);
dim3 grid((output_frame_size+31)/32, (hw_size+31)/32, c_size);
kernal_fp32_Col2Im_Forward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
hw_size,
c_size,
output_frame_size,
output_frame_stride,
input_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
__global__ void kernal_bit_Col2Im_Forward(
int const *x_buf,
int *y_buf,
int hw_size,
int c_size,
int output_frame_size,
int output_frame_stride,
int input_frame_stride
)
{
int output_frame_unit = blockDim.x * blockIdx.x + threadIdx.x;
int xy = blockDim.y * blockIdx.y + threadIdx.y;
if ( output_frame_unit < output_frame_stride && xy < hw_size ) {
int c = blockDim.z * blockIdx.z + threadIdx.z;
int output_node = c * hw_size + xy;
int y = 0;
for ( int i = 0; i < 32; ++i ) {
int output_frame = output_frame_unit * 32 + i;
if (output_frame < output_frame_size && xy < hw_size ) {
int input_frame = output_frame * hw_size + xy;
int input_node = c;
int const *x_ptr = &x_buf[input_node * input_frame_stride];
int x = ((x_ptr[input_frame / 32] >> (input_frame % 32)) & 1);
y |= (x << i);
}
}
int *y_ptr = &y_buf[output_node * output_frame_stride];
y_ptr[output_frame_unit] = y;
}
}
BBCU_DLL_EXPORT int bbcu_bit_Col2Im_Forward
(
int const *dev_x_buf,
int *dev_y_buf,
int w_size,
int h_size,
int c_size,
int input_frame_stride,
int output_frame_size,
int output_frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int hw_size = h_size * w_size;
// 32bit単位で処理
int output_frame_unit = (output_frame_size + 31) / 32 ;
dim3 block(32, 32, 1);
dim3 grid((output_frame_unit+31)/32, (hw_size+31)/32, c_size);
kernal_bit_Col2Im_Forward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
hw_size,
c_size,
output_frame_size,
output_frame_stride,
input_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////////////////
// backward
//////////////////////////////
__global__ void kernal_fp32_Col2Im_Backward(
const float* dy_buf,
float* dx_buf,
int hw_size,
int c_size,
int output_frame_size,
int output_frame_stride,
int input_frame_stride
)
{
int output_frame = blockDim.x * blockIdx.x + threadIdx.x;
int xy = blockDim.y * blockIdx.y + threadIdx.y;
int c = blockDim.z * blockIdx.z + threadIdx.z;
if (output_frame < output_frame_size && xy < hw_size ) {
int output_node = c * hw_size + xy;
int input_frame = output_frame * hw_size + xy;
int input_node = c;
dx_buf[input_node * input_frame_stride + input_frame] = dy_buf[output_node * output_frame_stride + output_frame];
}
}
BBCU_DLL_EXPORT int bbcu_fp32_Col2Im_Backward
(
float const *dev_dy_buf,
float *dev_dx_buf,
int w_size,
int h_size,
int c_size,
int input_frame_stride,
int output_frame_size,
int output_frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int hw_size = h_size * w_size;
dim3 block(32, 32, 1);
dim3 grid((output_frame_size+31)/32, (hw_size+31)/32, c_size);
kernal_fp32_Col2Im_Backward<<<grid, block, 0, streamId>>>(
dev_dy_buf,
dev_dx_buf,
hw_size,
c_size,
output_frame_size,
output_frame_stride,
input_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
|
3bca74964793fcd608e8b39903e2fda4dde9ef1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Implementations
*/
__global__ void ca_backward_kernel_t(const float *dw, const float *t, const float *f, float *dt, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int plane = blockIdx.z;
if (x < width && y < height && plane < chn) {
for (int batch = 0; batch < num; ++batch) {
for (int i = 0; i < width; ++i) {
float _dw = dw[(batch * len + i) * sp + y*width + x];
float _f = f[(batch * chn + plane) * sp + y*width + i];
dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f;
}
for (int i = 0; i < height; ++i) {
if (i == y) continue;
int j = i<y ? i : i-1;
float _dw = dw[(batch * len + width + j) * sp + y*width + x];
float _f = f[(batch * chn + plane) * sp + i*width + x];
dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f;
}
}
}
} | 3bca74964793fcd608e8b39903e2fda4dde9ef1e.cu | #include "includes.h"
/*
* Implementations
*/
__global__ void ca_backward_kernel_t(const float *dw, const float *t, const float *f, float *dt, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int plane = blockIdx.z;
if (x < width && y < height && plane < chn) {
for (int batch = 0; batch < num; ++batch) {
for (int i = 0; i < width; ++i) {
float _dw = dw[(batch * len + i) * sp + y*width + x];
float _f = f[(batch * chn + plane) * sp + y*width + i];
dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f;
}
for (int i = 0; i < height; ++i) {
if (i == y) continue;
int j = i<y ? i : i-1;
float _dw = dw[(batch * len + width + j) * sp + y*width + x];
float _f = f[(batch * chn + plane) * sp + i*width + x];
dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f;
}
}
}
} |
d4f8368f8b590bd2f5b348c36e3ec1a104ec941b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <ctime>
#include <assert.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
//enum MatrixLayout{
#define ROW_MAJOR 0
#define COL_MAJOR 1
//};
//ONLY THE PARAMETER HERE NEEDS TO BE CHANGED
// Must be multiples of 16 for wmma code to work
#define MATRIX_M (8)
#define MATRIX_N (32)
#define MATRIX_K (16)
const int WMMA_M =8;
const int WMMA_N =32;
const int WMMA_K =16;
typedef half atype;
typedef half btype;
typedef half ctype;
typedef half dtype;
typedef float host_type;
#define A_LAYOUT COL_MAJOR
#define B_LAYOUT COL_MAJOR
#define C_LAYOUT COL_MAJOR
#define D_LAYOUT COL_MAJOR
#define NUM_CTA 1
#define WARP_IN_CTA 1
//Don't change anything after here
#define THREAD_IN_WARP 32
#if A_LAYOUT==ROW_MAJOR
#define LAYOUT_A wmma::row_major
#define A_STRIDE MATRIX_K
#else
#define LAYOUT_A wmma::col_major
#define A_STRIDE MATRIX_M
#endif
#if B_LAYOUT==ROW_MAJOR
#define LAYOUT_B wmma::row_major
#define B_STRIDE MATRIX_N
#else
#define LAYOUT_B wmma::col_major
#define B_STRIDE MATRIX_K
#endif
#if C_LAYOUT==ROW_MAJOR
#define LAYOUT_C wmma::mem_row_major
#define C_STRIDE MATRIX_N
#else
#define LAYOUT_C wmma::mem_col_major
#define C_STRIDE MATRIX_M
#endif
#if D_LAYOUT==ROW_MAJOR
#define LAYOUT_D wmma::mem_row_major
#define D_STRIDE MATRIX_N
#else
#define LAYOUT_D wmma::mem_col_major
#define D_STRIDE MATRIX_M
#endif
enum MatrixInitializationType{
ZERO,
ONE,
RANDOM,
IDENTITY,
LINEAR
};
int get_value(MatrixInitializationType init_type,int randomRange=3,bool RESET=false){
static int val=0;
switch(init_type){
case ZERO:
break;
case ONE:
val=1;
break;
case RANDOM:
val=rand()%randomRange;
break;
case LINEAR:
val++;
break;
default :
printf("illegal MatrixInitializationType\n");
abort();
break;
}
if(RESET)
val=0;
return val;
}
template <typename T>
void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
T val;
if(layout==ROW_MAJOR)
val=matrix[row*col_size+col];
else
val=matrix[col*row_size+row];
printf("%.2f ",static_cast<float>(val));
}
printf(";\n");
}
}
template <typename T>
void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
if(init_type==IDENTITY){
assert(row_size==col_size);//only for square matrix can be used
matrix[row*row_size+col]=static_cast<T>(1);
}
else{
if(layout==ROW_MAJOR){
matrix[row*col_size+col]=static_cast<T>(get_value(init_type));
}
else{
matrix[col*row_size+row]=static_cast<T>(get_value(init_type));
}
}
}
}
get_value(init_type,10,true);//reseting the val counter
print_matrix<T>(matrix,row_size,col_size,layout);
}
int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){
int index=0;
if(layout==ROW_MAJOR){
index=row*col_size+col;
}
else{
index=col*row_size+row;
}
return index;
}
template <typename T>
void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){
for(int row=0;row<M;row++){
for(int col=0;col<N;col++){
int rindex=get_index(row,col,M,N,resultlayout);
int cindex=get_index(row,col,M,N,clayout);
for(int k=0;k<K;k++){
int aindex=get_index(row,k,M,K,alayout);
int bindex=get_index(k,col,K,N,blayout);
result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex];
}
result_matrix[rindex]+=matrix_c[cindex];
}
}
print_matrix<T>(result_matrix,M,N,resultlayout);
}
template <typename T>
void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
int index_a,index_b;
index_a=get_index(row,col,row_size,col_size,alayout);
index_b=get_index(row,col,row_size,col_size,alayout);
if(matrix_a[index_a]!=matrix_b[index_b])
printf("ERROR at index row=%d col=%d\n",row,col);
}
}
}
__global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d)
{
float t;
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag;
// Bounds checking
wmma::load_matrix_sync(a_frag, a, A_STRIDE);
wmma::load_matrix_sync(b_frag, b, B_STRIDE);
wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C);
for(int i=0; i < a_frag.num_elements; i++) {
t=static_cast<float>(a_frag.x[i]);
printf("A_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < b_frag.num_elements; i++) {
t=static_cast<float>(b_frag.x[i]);
printf("B_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < c_frag.num_elements; i++) {
t=static_cast<float>(c_frag.x[i]);
printf("C_THREAD%d: %.2f \n",threadIdx.x,t);
}
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D);
}
template <typename T1,typename T2>
__global__ void convert(T1 *out, T2 *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
//data on device in host type format
host_type *a_htype;
host_type *b_htype;
host_type *c_htype;
host_type *d_htype;
//data on device in gemm format
atype *a_atype;
btype *b_btype;
ctype *c_ctype;
dtype *d_dtype;
srand(time(NULL));
host_type *a_host_wmma;
host_type *b_host_wmma;
host_type *c_host_wmma;
host_type *d_host_wmma;
host_type *d_cal_host_wmma;
hipEvent_t startWMMA;
hipEvent_t stopWMMA;
cudaErrCheck(hipEventCreate(&startWMMA));
cudaErrCheck(hipEventCreate(&stopWMMA));
// Use tensor cores
cudaErrCheck(hipMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype)));
cudaErrCheck(hipMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype)));
cudaErrCheck(hipMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype)));
cudaErrCheck(hipMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype)));
a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type));
b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type));
c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
printf("a_host\n");
initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,LINEAR);
printf("b_host\n");
initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,LINEAR);
printf("c_host\n");
initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,LINEAR);
printf("d_cal_host\n");
initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO);
printf("d_cal_host\n");
matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT);
cudaErrCheck(hipMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( convert<atype,host_type>) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_atype, a_htype, MATRIX_M * MATRIX_K);
hipLaunchKernelGGL(( convert<btype,host_type>) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_btype, b_htype, MATRIX_K * MATRIX_N);
hipLaunchKernelGGL(( convert<ctype,host_type>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, c_ctype, c_htype, MATRIX_M * MATRIX_N);
printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K);
printf("Running with wmma...\n");
cudaErrCheck(hipEventRecord(startWMMA));
hipLaunchKernelGGL(( wmma_example) , dim3(NUM_CTA),dim3(WARP_IN_CTA*THREAD_IN_WARP), 0, 0, a_atype, b_btype, c_ctype, d_dtype);
cudaErrCheck(hipEventRecord(stopWMMA));
hipLaunchKernelGGL(( convert<host_type,dtype>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, d_htype, d_dtype, MATRIX_M * MATRIX_N);
cudaErrCheck(hipEventSynchronize(stopWMMA));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(hipMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyDeviceToHost));
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
cudaErrCheck(hipEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
printf("wmma took %.2fms\n", wmmaTime);
cudaErrCheck(hipEventDestroy(startWMMA));
cudaErrCheck(hipEventDestroy(stopWMMA));
//printf("D_CALCULATED\n");
//print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
//printf("D_WMMA\n");
//print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
//printf("CHECKING\n");
//compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT);
cudaErrCheck(hipFree(a_htype));
cudaErrCheck(hipFree(b_htype));
cudaErrCheck(hipFree(c_htype));
cudaErrCheck(hipFree(d_htype));
cudaErrCheck(hipFree(a_atype));
cudaErrCheck(hipFree(b_btype));
cudaErrCheck(hipFree(c_ctype));
cudaErrCheck(hipFree(d_dtype));
free(a_host_wmma);
free(b_host_wmma);
free(c_host_wmma);
free(d_host_wmma);
free(d_cal_host_wmma);
cudaErrCheck(hipDeviceReset());
return 0;
}
| d4f8368f8b590bd2f5b348c36e3ec1a104ec941b.cu | #include <stdio.h>
#include <curand.h>
#include <ctime>
#include <assert.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
//enum MatrixLayout{
#define ROW_MAJOR 0
#define COL_MAJOR 1
//};
//ONLY THE PARAMETER HERE NEEDS TO BE CHANGED
// Must be multiples of 16 for wmma code to work
#define MATRIX_M (8)
#define MATRIX_N (32)
#define MATRIX_K (16)
const int WMMA_M =8;
const int WMMA_N =32;
const int WMMA_K =16;
typedef half atype;
typedef half btype;
typedef half ctype;
typedef half dtype;
typedef float host_type;
#define A_LAYOUT COL_MAJOR
#define B_LAYOUT COL_MAJOR
#define C_LAYOUT COL_MAJOR
#define D_LAYOUT COL_MAJOR
#define NUM_CTA 1
#define WARP_IN_CTA 1
//Don't change anything after here
#define THREAD_IN_WARP 32
#if A_LAYOUT==ROW_MAJOR
#define LAYOUT_A wmma::row_major
#define A_STRIDE MATRIX_K
#else
#define LAYOUT_A wmma::col_major
#define A_STRIDE MATRIX_M
#endif
#if B_LAYOUT==ROW_MAJOR
#define LAYOUT_B wmma::row_major
#define B_STRIDE MATRIX_N
#else
#define LAYOUT_B wmma::col_major
#define B_STRIDE MATRIX_K
#endif
#if C_LAYOUT==ROW_MAJOR
#define LAYOUT_C wmma::mem_row_major
#define C_STRIDE MATRIX_N
#else
#define LAYOUT_C wmma::mem_col_major
#define C_STRIDE MATRIX_M
#endif
#if D_LAYOUT==ROW_MAJOR
#define LAYOUT_D wmma::mem_row_major
#define D_STRIDE MATRIX_N
#else
#define LAYOUT_D wmma::mem_col_major
#define D_STRIDE MATRIX_M
#endif
enum MatrixInitializationType{
ZERO,
ONE,
RANDOM,
IDENTITY,
LINEAR
};
int get_value(MatrixInitializationType init_type,int randomRange=3,bool RESET=false){
static int val=0;
switch(init_type){
case ZERO:
break;
case ONE:
val=1;
break;
case RANDOM:
val=rand()%randomRange;
break;
case LINEAR:
val++;
break;
default :
printf("illegal MatrixInitializationType\n");
abort();
break;
}
if(RESET)
val=0;
return val;
}
template <typename T>
void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
T val;
if(layout==ROW_MAJOR)
val=matrix[row*col_size+col];
else
val=matrix[col*row_size+row];
printf("%.2f ",static_cast<float>(val));
}
printf(";\n");
}
}
template <typename T>
void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
if(init_type==IDENTITY){
assert(row_size==col_size);//only for square matrix can be used
matrix[row*row_size+col]=static_cast<T>(1);
}
else{
if(layout==ROW_MAJOR){
matrix[row*col_size+col]=static_cast<T>(get_value(init_type));
}
else{
matrix[col*row_size+row]=static_cast<T>(get_value(init_type));
}
}
}
}
get_value(init_type,10,true);//reseting the val counter
print_matrix<T>(matrix,row_size,col_size,layout);
}
int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){
int index=0;
if(layout==ROW_MAJOR){
index=row*col_size+col;
}
else{
index=col*row_size+row;
}
return index;
}
template <typename T>
void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){
for(int row=0;row<M;row++){
for(int col=0;col<N;col++){
int rindex=get_index(row,col,M,N,resultlayout);
int cindex=get_index(row,col,M,N,clayout);
for(int k=0;k<K;k++){
int aindex=get_index(row,k,M,K,alayout);
int bindex=get_index(k,col,K,N,blayout);
result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex];
}
result_matrix[rindex]+=matrix_c[cindex];
}
}
print_matrix<T>(result_matrix,M,N,resultlayout);
}
template <typename T>
void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
int index_a,index_b;
index_a=get_index(row,col,row_size,col_size,alayout);
index_b=get_index(row,col,row_size,col_size,alayout);
if(matrix_a[index_a]!=matrix_b[index_b])
printf("ERROR at index row=%d col=%d\n",row,col);
}
}
}
__global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d)
{
float t;
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag;
// Bounds checking
wmma::load_matrix_sync(a_frag, a, A_STRIDE);
wmma::load_matrix_sync(b_frag, b, B_STRIDE);
wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C);
for(int i=0; i < a_frag.num_elements; i++) {
t=static_cast<float>(a_frag.x[i]);
printf("A_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < b_frag.num_elements; i++) {
t=static_cast<float>(b_frag.x[i]);
printf("B_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < c_frag.num_elements; i++) {
t=static_cast<float>(c_frag.x[i]);
printf("C_THREAD%d: %.2f \n",threadIdx.x,t);
}
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D);
}
template <typename T1,typename T2>
__global__ void convert(T1 *out, T2 *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
//data on device in host type format
host_type *a_htype;
host_type *b_htype;
host_type *c_htype;
host_type *d_htype;
//data on device in gemm format
atype *a_atype;
btype *b_btype;
ctype *c_ctype;
dtype *d_dtype;
srand(time(NULL));
host_type *a_host_wmma;
host_type *b_host_wmma;
host_type *c_host_wmma;
host_type *d_host_wmma;
host_type *d_cal_host_wmma;
cudaEvent_t startWMMA;
cudaEvent_t stopWMMA;
cudaErrCheck(cudaEventCreate(&startWMMA));
cudaErrCheck(cudaEventCreate(&stopWMMA));
// Use tensor cores
cudaErrCheck(cudaMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype)));
cudaErrCheck(cudaMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype)));
cudaErrCheck(cudaMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype)));
cudaErrCheck(cudaMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype)));
a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type));
b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type));
c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
printf("a_host\n");
initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,LINEAR);
printf("b_host\n");
initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,LINEAR);
printf("c_host\n");
initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,LINEAR);
printf("d_cal_host\n");
initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO);
printf("d_cal_host\n");
matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT);
cudaErrCheck(cudaMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice));
convert<atype,host_type> <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_atype, a_htype, MATRIX_M * MATRIX_K);
convert<btype,host_type> <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_btype, b_htype, MATRIX_K * MATRIX_N);
convert<ctype,host_type> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (c_ctype, c_htype, MATRIX_M * MATRIX_N);
printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K);
printf("Running with wmma...\n");
cudaErrCheck(cudaEventRecord(startWMMA));
wmma_example <<< NUM_CTA,WARP_IN_CTA*THREAD_IN_WARP>>> (a_atype, b_btype, c_ctype, d_dtype);
cudaErrCheck(cudaEventRecord(stopWMMA));
convert<host_type,dtype> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (d_htype, d_dtype, MATRIX_M * MATRIX_N);
cudaErrCheck(cudaEventSynchronize(stopWMMA));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(cudaMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyDeviceToHost));
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
cudaErrCheck(cudaEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
printf("wmma took %.2fms\n", wmmaTime);
cudaErrCheck(cudaEventDestroy(startWMMA));
cudaErrCheck(cudaEventDestroy(stopWMMA));
//printf("D_CALCULATED\n");
//print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
//printf("D_WMMA\n");
//print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
//printf("CHECKING\n");
//compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT);
cudaErrCheck(cudaFree(a_htype));
cudaErrCheck(cudaFree(b_htype));
cudaErrCheck(cudaFree(c_htype));
cudaErrCheck(cudaFree(d_htype));
cudaErrCheck(cudaFree(a_atype));
cudaErrCheck(cudaFree(b_btype));
cudaErrCheck(cudaFree(c_ctype));
cudaErrCheck(cudaFree(d_dtype));
free(a_host_wmma);
free(b_host_wmma);
free(c_host_wmma);
free(d_host_wmma);
free(d_cal_host_wmma);
cudaErrCheck(cudaDeviceReset());
return 0;
}
|
50a80938a960a49bb2fe714dd2d426f6be5c09e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#define DEFAULT_THRESHOLD 4000
#define DEFAULT_FILENAME "BWstop-sign.ppm"
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp) {
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++) pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
fp = fopen(filename, "w");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
__global__ void sobel_kernel(int xsize, int ysize, unsigned int *pic, int *result, int thresh,int xoffset, int yoffset) {
int i = blockIdx.y * blockDim.y + threadIdx.y + 16*5*yoffset;
int j = blockIdx.x * blockDim.x + threadIdx.x + 16*5*xoffset;
int jx = j - 16*5*xoffset;
int iy = i - 16*5*yoffset;
__shared__ unsigned int pic_d[16*5*5*16];
if( i > -1 && i < ysize && j > -1 && j < xsize ) {
pic_d[80*iy+jx] = pic[i*xsize +j];
__syncthreads();
}
if( i > 0 && i < ysize - 1 && j > 0 && j < xsize - 1) {
int offset = i*xsize + j;
int sum1,sum2;
/*int sum1 = pic_d[ 80 * (iy-1) + jx+1 ] - pic_d[80*iy + jx]
+ 2 * pic_d[ 80 * (iy) + jx+1 ] - 2 * pic_d[ 80*(iy) + jx-1 ]
+ pic_d[ 80 * (iy+1) + jx+1 ] - pic_d[ 80*(iy+1) + jx-1 ];
int sum2 = pic_d[80*iy+jx]+ 2 * pic_d[ 80 * (iy-1) + jx ] + pic_d[ 80 * (iy-1) + jx+1 ] - pic_d[80 * (iy+1) + jx-1 ] - 2 * pic_d[ 80 * (iy+1) + jx ] - pic_d[ 80 * (iy+1) + jx+1 ];
*/
/*
int sum1 = pic[ xsize * (i-1) + j+1 ] - pic[xsize*i + j]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
int sum2 = pic[xsize*i+j]+ 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ] - pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
*/
int magnitude = sum1*sum1 + sum2*sum2;
result[offset] = (magnitude > thresh) * 255;
}
}
int main( int argc, char **argv )
{
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = strdup( DEFAULT_FILENAME);
if (argc > 1) {
if (argc == 3) { // filename AND threshold
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) { // default file but specified threshhold
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * 3 * sizeof( int );
int result_size = numbytes;
// int result_size = xsize * ysize * sizeof(int);
int pic_size = xsize * ysize * sizeof(unsigned int);
int *result = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
int *out = result;
for (int col=0; col<ysize; col++) {
for (int row=0; row<xsize; row++) {
*out++ = 0;
}
}
int *result_d;
unsigned int *pic_d;
int blockX = 16;
int blockY = 16;
hipMalloc((void **) &result_d, result_size);
hipMalloc((void **) &pic_d, pic_size);
hipMemcpy(pic_d, pic, pic_size, hipMemcpyHostToDevice);
dim3 block(blockX, blockY);
//dim3 grid((xsize + blockX - 1)/blockX , (ysize + blockY - 1)/blockY);
dim3 grid(5,5);
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
for (int x = 0; x <= xsize/(16*5)+5; x++) {
for (int y = 0; y <= ysize/(16*5)+5; y++) {
hipLaunchKernelGGL(( sobel_kernel), dim3(grid), dim3(block), 0, 0, xsize, ysize, pic_d, result_d, thresh,x,y);
}
}
hipError_t error = hipGetLastError();
if (hipSuccess != error)
printf( "Error! %s\n",hipGetErrorString(error) );
hipEventRecord(end, 0);
hipEventSynchronize(end);
float time;
hipEventElapsedTime(&time, start, end);
hipMemcpy(result, result_d, result_size, hipMemcpyDeviceToHost);
hipFree(result_d);
hipFree(pic_d);
printf("time: %fms\n", time);
write_ppm( "result.ppm", xsize, ysize, 255, result);
fprintf(stderr, "sobel done\n");
}
| 50a80938a960a49bb2fe714dd2d426f6be5c09e6.cu | #include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#define DEFAULT_THRESHOLD 4000
#define DEFAULT_FILENAME "BWstop-sign.ppm"
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp) {
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++) pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
fp = fopen(filename, "w");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
__global__ void sobel_kernel(int xsize, int ysize, unsigned int *pic, int *result, int thresh,int xoffset, int yoffset) {
int i = blockIdx.y * blockDim.y + threadIdx.y + 16*5*yoffset;
int j = blockIdx.x * blockDim.x + threadIdx.x + 16*5*xoffset;
int jx = j - 16*5*xoffset;
int iy = i - 16*5*yoffset;
__shared__ unsigned int pic_d[16*5*5*16];
if( i > -1 && i < ysize && j > -1 && j < xsize ) {
pic_d[80*iy+jx] = pic[i*xsize +j];
__syncthreads();
}
if( i > 0 && i < ysize - 1 && j > 0 && j < xsize - 1) {
int offset = i*xsize + j;
int sum1,sum2;
/*int sum1 = pic_d[ 80 * (iy-1) + jx+1 ] - pic_d[80*iy + jx]
+ 2 * pic_d[ 80 * (iy) + jx+1 ] - 2 * pic_d[ 80*(iy) + jx-1 ]
+ pic_d[ 80 * (iy+1) + jx+1 ] - pic_d[ 80*(iy+1) + jx-1 ];
int sum2 = pic_d[80*iy+jx]+ 2 * pic_d[ 80 * (iy-1) + jx ] + pic_d[ 80 * (iy-1) + jx+1 ] - pic_d[80 * (iy+1) + jx-1 ] - 2 * pic_d[ 80 * (iy+1) + jx ] - pic_d[ 80 * (iy+1) + jx+1 ];
*/
/*
int sum1 = pic[ xsize * (i-1) + j+1 ] - pic[xsize*i + j]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
int sum2 = pic[xsize*i+j]+ 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ] - pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
*/
int magnitude = sum1*sum1 + sum2*sum2;
result[offset] = (magnitude > thresh) * 255;
}
}
int main( int argc, char **argv )
{
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = strdup( DEFAULT_FILENAME);
if (argc > 1) {
if (argc == 3) { // filename AND threshold
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) { // default file but specified threshhold
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * 3 * sizeof( int );
int result_size = numbytes;
// int result_size = xsize * ysize * sizeof(int);
int pic_size = xsize * ysize * sizeof(unsigned int);
int *result = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
int *out = result;
for (int col=0; col<ysize; col++) {
for (int row=0; row<xsize; row++) {
*out++ = 0;
}
}
int *result_d;
unsigned int *pic_d;
int blockX = 16;
int blockY = 16;
cudaMalloc((void **) &result_d, result_size);
cudaMalloc((void **) &pic_d, pic_size);
cudaMemcpy(pic_d, pic, pic_size, cudaMemcpyHostToDevice);
dim3 block(blockX, blockY);
//dim3 grid((xsize + blockX - 1)/blockX , (ysize + blockY - 1)/blockY);
dim3 grid(5,5);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
for (int x = 0; x <= xsize/(16*5)+5; x++) {
for (int y = 0; y <= ysize/(16*5)+5; y++) {
sobel_kernel<<<grid, block>>>(xsize, ysize, pic_d, result_d, thresh,x,y);
}
}
cudaError_t error = cudaGetLastError();
if (cudaSuccess != error)
printf( "Error! %s\n",cudaGetErrorString(error) );
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float time;
cudaEventElapsedTime(&time, start, end);
cudaMemcpy(result, result_d, result_size, cudaMemcpyDeviceToHost);
cudaFree(result_d);
cudaFree(pic_d);
printf("time: %fms\n", time);
write_ppm( "result.ppm", xsize, ysize, 255, result);
fprintf(stderr, "sobel done\n");
}
|
51f686da1f6e7ba97ad9c38683c42f7875197da3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <hiprand/hiprand_kernel.h>
#include "time_it.h"
// HANDLE_ERROR is from "CUDA by Exmaple" by Sanders and Kandrot
// I found the actual source code at
// http://stackoverflow.com/questions/13245258/handle-error-not-found-error-in-cuda
// (The URL in the book for getting the source appears to be no longer
// available.)
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
// struct for passing arguments through time_it_run to our kernel functions
struct kernel_arg {
uint n;
float *v, *z;
uint *a;
int m, nblks, tpb, warpSize, whichKernel;
hiprandState_t *dev_randState;
};
/*****************************************************
* print_vec: print the first few elements of a vector
******************************************************/
void print_vec(float *x, uint n, const char *fmt, const char *who) {
printf("%s = ", who);
for(int i = 0; i < n; i++) {
if(i > 0) printf(", ");
printf(fmt, x[i]);
}
if(n > 10) printf(", ...");
printf("\n");
}
void print_vec(uint *x, uint n, const char *fmt, const char *who) {
printf("%s = ", who);
for(int i = 0; i < n; i++) {
if(i > 0) printf(", ");
printf(fmt, x[i]);
}
if(n > 10) printf(", ...");
printf("\n");
}
/************************
* Change by Colin Stone, Rest taken from examples.cu by Mark R. Greenstreet
* logistic map
************************/
#define ALPHA 1.0f
__global__ void logmap(float *x, int n, int m) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
for(int j = 0; j < m; j++)
x[i] = ALPHA * x[i] * (1.0f - x[i]);
}
void do_logmap(void *void_arg) {
struct kernel_arg *argk = (struct kernel_arg *)(void_arg);
// printf("RUNNING LOGMAP m=%d nblks=%d tpb=%d\n", argk->m, argk->nblks, argk->tpb);
hipLaunchKernelGGL(( logmap), dim3(argk->nblks),dim3(argk->tpb), 0, 0, argk->v, argk->n, argk->m);
hipDeviceSynchronize();
}
/************************
* Change by Colin Stone, Rest taken from examples.cu by Mark
* norm
************************/
__device__ void reduce_sum_dev(uint n, float *x) {
uint myId = threadIdx.x;
for(uint m = n >> 1; m > 0; m = n >> 1) {
n -= m;
__syncthreads();
if(myId < m)
x[myId] += x[myId+n];
}
}
__global__ void norm(float *x, int n, float *z) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
uint blockBase = blockDim.x * blockIdx.x;
uint m = min(blockDim.x, n - blockBase);
if (i < n)
x[i] = pow(x[i], 2);
__syncthreads();
reduce_sum_dev(m, &(x[blockBase]));
if (i < n && threadIdx.x == 0)
z[blockIdx.x] = sqrt(x[i]);
}
void do_norm(void *void_arg) {
struct kernel_arg *argk = (struct kernel_arg *)(void_arg);
// printf("RUNNING NORM nblks=%d tpb=%d\n", argk->nblks, argk->tpb);
hipLaunchKernelGGL(( norm), dim3(argk->nblks),dim3(argk->tpb), 0, 0, argk->v, argk->n, argk->z);
hipDeviceSynchronize();
if (argk->nblks > 1024)
hipLaunchKernelGGL(( norm), dim3(1024),dim3(argk->nblks), 0, 0, argk->z, 1024*argk->nblks, argk->z);
hipDeviceSynchronize();
if (argk->nblks > 1)
hipLaunchKernelGGL(( norm), dim3(1),dim3(argk->nblks), 0, 0, argk->z, 1*argk->nblks, argk->z);
hipDeviceSynchronize();
}
/************************
* Change by Colin Stone, Rest taken from examples.cu by Mark
* random number generator
************************/
__global__ void setup_kernel(uint n, hiprandState_t *state) {
uint myId = blockDim.x * blockIdx.x + threadIdx.x;
if(myId < n)
hiprand_init(1234, myId, 0, &state[myId]);
}
__global__ void rndm(uint *a, int m, hiprandState_t *state) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
for(int j = 0; j < m; j++) {
// printf("idx=%d seed=%d\n", i*m+j, i);
a[i*m + j] = hiprand_uniform(&state[i])*1000;
}
}
void do_rndm(void *void_arg) {
struct kernel_arg *argk = (struct kernel_arg *)(void_arg);
// printf("RUNNING RNDM nblks=%d tpb=%d m=%d\n", argk->nblks, argk->tpb, argk->m);
hipLaunchKernelGGL(( setup_kernel), dim3(argk->nblks),dim3(argk->n), 0, 0, argk->n, argk->dev_randState);
hipDeviceSynchronize();
hipLaunchKernelGGL(( rndm), dim3(argk->nblks),dim3(argk->tpb), 0, 0, argk->a, argk->m, argk->dev_randState);
hipDeviceSynchronize();
// int n = argk->n*argk->m;
// int size = n*sizeof(uint);
// uint *a = (uint *)malloc(size);
// hipMemcpy(a, argk->a, size, hipMemcpyDeviceToHost);
// print_vec(a, min(10, n), "%d", "a");
}
/************************
* Rest of Code
************************/
int main(int argc, char **argv) {
int nblks = 24; // default number of blocks in the grid
int m = 1000; // default number of "rounds" in kernel
int tpb = 256; // default threads per block
int whichKernel = 1; // default kernel to run
float *v, *dev_v, *dev_z;
uint *dev_a;
hiprandState_t *dev_randState;
hipDeviceProp_t prop;
struct kernel_arg argk;
struct time_it_raw *tr = time_it_create(10);
struct time_it_stats stats;
int ndev;
HANDLE_ERROR(hipGetDeviceCount(&ndev));
if(ndev < 1) {
fprintf(stderr, "No CUDA device found\n");
exit(-1);
}
HANDLE_ERROR(hipGetDeviceProperties(&prop, 0));
int sharedMemPerBlock = prop.sharedMemPerBlock;
int regsPerBlock = prop.regsPerBlock;
printf("GPU is a %s supporing CUDA level %d.%d\n", prop.name, prop.major, prop.minor);
printf(" It has %d SMs and a warp size of %d\n", prop.multiProcessorCount, prop.warpSize);
printf(" sharedMemPerBlock = %d, regsPerBlock = %d\n", sharedMemPerBlock, regsPerBlock);
printf(" clock rate = %d\n", prop.clockRate);
printf("Max Threads per Block = %d\n", prop.maxThreadsPerBlock);
for(int i = 1; i < argc; i++) {
if(strncmp(argv[i], "nblks=", strlen("nblks=")) == 0) {
nblks = atoi(argv[i]+strlen("nblks="));
if(nblks <= 0) {
fprintf(stderr, "bad option: %s, nblks must be positive\n", argv[i]);
exit(-1);
}
} else if(strncmp(argv[i], "m=", strlen("m=")) == 0) {
m = atoi(argv[i]+strlen("m="));
if(m < 0) {
fprintf(stderr, "bad option: %s, m must be non-negative\n", argv[i]);
exit(-1);
}
} else if(strncmp(argv[i], "tpb=", strlen("tpb=")) == 0) {
tpb = atoi(argv[i]+strlen("tpb="));
if(tpb <= 0) {
fprintf(stderr, "bad option: %s, tpb must be positive\n", argv[i]);
exit(-1);
}
} else if(strncmp(argv[i], "kern=", strlen("kern=")) == 0) {
whichKernel = atoi(argv[i]+strlen("kern="));
if((whichKernel < 1) || (2 < whichKernel)) {
fprintf(stderr, "bad option: %s, kern must be 1 or 2\n", argv[i]);
exit(-1);
}
} else {
fprintf(stderr, "unknown command-line argument: %s\n", argv[i]);
exit(-1);
}
}
// allocate and initialize v
int nv = nblks*tpb;
int szv = nv*sizeof(float);
v = (float *)malloc(szv);
v[0] = 0.123;
for(int i = 1; i < nv; i++)
v[i] = 3.8*v[i-1]*(1.0 - v[i-1]);
// allocate and initialize dev_v
HANDLE_ERROR(hipMalloc((void **)(&dev_v), szv));
HANDLE_ERROR(hipMemcpy(dev_v, v, szv, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void **)(&dev_z), szv));
HANDLE_ERROR(hipMalloc((void **)(&dev_a), szv*m));
HANDLE_ERROR(hipMalloc((void **)(&dev_randState), nv*sizeof(hiprandState_t))); //sizeof(hiprandState_t) = 48
// initialize argk
argk.n = nv;
argk.v = dev_v;
argk.z = dev_z;
argk.a = dev_a;
argk.m = m;
argk.nblks = nblks;
argk.tpb = tpb;
argk.warpSize = prop.warpSize;
argk.whichKernel = whichKernel;
argk.dev_randState = dev_randState;
// run the kernel and report timing info
time_it_run(tr, do_logmap, (void *)(&argk));
time_it_get_stats(tr, &stats);
HANDLE_ERROR(hipMemcpy(v, dev_v, szv, hipMemcpyDeviceToHost));
printf("mean(T) = %10.3e, std(T) = %10.3e\n", stats.mean, stats.std);
//clean up
hipFree(dev_v);
free(v);
time_it_free(tr);
exit(0);
}
| 51f686da1f6e7ba97ad9c38683c42f7875197da3.cu | #include <stdio.h>
#include <math.h>
#include <curand_kernel.h>
#include "time_it.h"
// HANDLE_ERROR is from "CUDA by Exmaple" by Sanders and Kandrot
// I found the actual source code at
// http://stackoverflow.com/questions/13245258/handle-error-not-found-error-in-cuda
// (The URL in the book for getting the source appears to be no longer
// available.)
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
// struct for passing arguments through time_it_run to our kernel functions
struct kernel_arg {
uint n;
float *v, *z;
uint *a;
int m, nblks, tpb, warpSize, whichKernel;
curandState *dev_randState;
};
/*****************************************************
* print_vec: print the first few elements of a vector
******************************************************/
void print_vec(float *x, uint n, const char *fmt, const char *who) {
printf("%s = ", who);
for(int i = 0; i < n; i++) {
if(i > 0) printf(", ");
printf(fmt, x[i]);
}
if(n > 10) printf(", ...");
printf("\n");
}
void print_vec(uint *x, uint n, const char *fmt, const char *who) {
printf("%s = ", who);
for(int i = 0; i < n; i++) {
if(i > 0) printf(", ");
printf(fmt, x[i]);
}
if(n > 10) printf(", ...");
printf("\n");
}
/************************
* Change by Colin Stone, Rest taken from examples.cu by Mark R. Greenstreet
* logistic map
************************/
#define ALPHA 1.0f
__global__ void logmap(float *x, int n, int m) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
for(int j = 0; j < m; j++)
x[i] = ALPHA * x[i] * (1.0f - x[i]);
}
void do_logmap(void *void_arg) {
struct kernel_arg *argk = (struct kernel_arg *)(void_arg);
// printf("RUNNING LOGMAP m=%d nblks=%d tpb=%d\n", argk->m, argk->nblks, argk->tpb);
logmap<<<argk->nblks,argk->tpb>>>(argk->v, argk->n, argk->m);
cudaDeviceSynchronize();
}
/************************
* Change by Colin Stone, Rest taken from examples.cu by Mark
* norm
************************/
__device__ void reduce_sum_dev(uint n, float *x) {
uint myId = threadIdx.x;
for(uint m = n >> 1; m > 0; m = n >> 1) {
n -= m;
__syncthreads();
if(myId < m)
x[myId] += x[myId+n];
}
}
__global__ void norm(float *x, int n, float *z) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
uint blockBase = blockDim.x * blockIdx.x;
uint m = min(blockDim.x, n - blockBase);
if (i < n)
x[i] = pow(x[i], 2);
__syncthreads();
reduce_sum_dev(m, &(x[blockBase]));
if (i < n && threadIdx.x == 0)
z[blockIdx.x] = sqrt(x[i]);
}
void do_norm(void *void_arg) {
struct kernel_arg *argk = (struct kernel_arg *)(void_arg);
// printf("RUNNING NORM nblks=%d tpb=%d\n", argk->nblks, argk->tpb);
norm<<<argk->nblks,argk->tpb>>>(argk->v, argk->n, argk->z);
cudaDeviceSynchronize();
if (argk->nblks > 1024)
norm<<<1024,argk->nblks>>>(argk->z, 1024*argk->nblks, argk->z);
cudaDeviceSynchronize();
if (argk->nblks > 1)
norm<<<1,argk->nblks>>>(argk->z, 1*argk->nblks, argk->z);
cudaDeviceSynchronize();
}
/************************
* Change by Colin Stone, Rest taken from examples.cu by Mark
* random number generator
************************/
__global__ void setup_kernel(uint n, curandState *state) {
uint myId = blockDim.x * blockIdx.x + threadIdx.x;
if(myId < n)
curand_init(1234, myId, 0, &state[myId]);
}
__global__ void rndm(uint *a, int m, curandState *state) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
for(int j = 0; j < m; j++) {
// printf("idx=%d seed=%d\n", i*m+j, i);
a[i*m + j] = curand_uniform(&state[i])*1000;
}
}
void do_rndm(void *void_arg) {
struct kernel_arg *argk = (struct kernel_arg *)(void_arg);
// printf("RUNNING RNDM nblks=%d tpb=%d m=%d\n", argk->nblks, argk->tpb, argk->m);
setup_kernel<<<argk->nblks,argk->n>>>(argk->n, argk->dev_randState);
cudaDeviceSynchronize();
rndm<<<argk->nblks,argk->tpb>>>(argk->a, argk->m, argk->dev_randState);
cudaDeviceSynchronize();
// int n = argk->n*argk->m;
// int size = n*sizeof(uint);
// uint *a = (uint *)malloc(size);
// cudaMemcpy(a, argk->a, size, cudaMemcpyDeviceToHost);
// print_vec(a, min(10, n), "%d", "a");
}
/************************
* Rest of Code
************************/
int main(int argc, char **argv) {
int nblks = 24; // default number of blocks in the grid
int m = 1000; // default number of "rounds" in kernel
int tpb = 256; // default threads per block
int whichKernel = 1; // default kernel to run
float *v, *dev_v, *dev_z;
uint *dev_a;
curandState *dev_randState;
cudaDeviceProp prop;
struct kernel_arg argk;
struct time_it_raw *tr = time_it_create(10);
struct time_it_stats stats;
int ndev;
HANDLE_ERROR(cudaGetDeviceCount(&ndev));
if(ndev < 1) {
fprintf(stderr, "No CUDA device found\n");
exit(-1);
}
HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0));
int sharedMemPerBlock = prop.sharedMemPerBlock;
int regsPerBlock = prop.regsPerBlock;
printf("GPU is a %s supporing CUDA level %d.%d\n", prop.name, prop.major, prop.minor);
printf(" It has %d SMs and a warp size of %d\n", prop.multiProcessorCount, prop.warpSize);
printf(" sharedMemPerBlock = %d, regsPerBlock = %d\n", sharedMemPerBlock, regsPerBlock);
printf(" clock rate = %d\n", prop.clockRate);
printf("Max Threads per Block = %d\n", prop.maxThreadsPerBlock);
for(int i = 1; i < argc; i++) {
if(strncmp(argv[i], "nblks=", strlen("nblks=")) == 0) {
nblks = atoi(argv[i]+strlen("nblks="));
if(nblks <= 0) {
fprintf(stderr, "bad option: %s, nblks must be positive\n", argv[i]);
exit(-1);
}
} else if(strncmp(argv[i], "m=", strlen("m=")) == 0) {
m = atoi(argv[i]+strlen("m="));
if(m < 0) {
fprintf(stderr, "bad option: %s, m must be non-negative\n", argv[i]);
exit(-1);
}
} else if(strncmp(argv[i], "tpb=", strlen("tpb=")) == 0) {
tpb = atoi(argv[i]+strlen("tpb="));
if(tpb <= 0) {
fprintf(stderr, "bad option: %s, tpb must be positive\n", argv[i]);
exit(-1);
}
} else if(strncmp(argv[i], "kern=", strlen("kern=")) == 0) {
whichKernel = atoi(argv[i]+strlen("kern="));
if((whichKernel < 1) || (2 < whichKernel)) {
fprintf(stderr, "bad option: %s, kern must be 1 or 2\n", argv[i]);
exit(-1);
}
} else {
fprintf(stderr, "unknown command-line argument: %s\n", argv[i]);
exit(-1);
}
}
// allocate and initialize v
int nv = nblks*tpb;
int szv = nv*sizeof(float);
v = (float *)malloc(szv);
v[0] = 0.123;
for(int i = 1; i < nv; i++)
v[i] = 3.8*v[i-1]*(1.0 - v[i-1]);
// allocate and initialize dev_v
HANDLE_ERROR(cudaMalloc((void **)(&dev_v), szv));
HANDLE_ERROR(cudaMemcpy(dev_v, v, szv, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void **)(&dev_z), szv));
HANDLE_ERROR(cudaMalloc((void **)(&dev_a), szv*m));
HANDLE_ERROR(cudaMalloc((void **)(&dev_randState), nv*sizeof(curandState))); //sizeof(curandState) = 48
// initialize argk
argk.n = nv;
argk.v = dev_v;
argk.z = dev_z;
argk.a = dev_a;
argk.m = m;
argk.nblks = nblks;
argk.tpb = tpb;
argk.warpSize = prop.warpSize;
argk.whichKernel = whichKernel;
argk.dev_randState = dev_randState;
// run the kernel and report timing info
time_it_run(tr, do_logmap, (void *)(&argk));
time_it_get_stats(tr, &stats);
HANDLE_ERROR(cudaMemcpy(v, dev_v, szv, cudaMemcpyDeviceToHost));
printf("mean(T) = %10.3e, std(T) = %10.3e\n", stats.mean, stats.std);
//clean up
cudaFree(dev_v);
free(v);
time_it_free(tr);
exit(0);
}
|
e499a5e3c206af861d81a769dda0d06109e8a5f8.hip | // !!! This is a file automatically generated by hipify!!!
//build commnad : nvcc -std=c++11 -lcudnn -lcublas kernel.cu -o kernel
#include <stdio.h>
#include <assert.h>
#include <pthread.h>
#include "common/common.h"
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
///////////////////////////////////////////////////////////////////////
__global__ void staticReverse(int *d, int n)
{
__shared__ int s[64];
int t = threadIdx.x;
int trail = n-t-1;
s[t] = d[t];// write step : global to shared
__syncthreads();
d[t] = s[trail]; // read step : shared to global
}
__global__ void dynamicReverse(int *d, int n)
{
extern __shared__ int s[];
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
int shared_memory_reverse(void)
{
const int n = 64;
int h_a[n], h_r[n], h_d[n];
for (int i = 0; i < n; i++) {
h_a[i] = i;
h_r[i] = n-i-1;
h_d[i] = 0;
}
printf("original array elemtns\n");
for (int i = 0; i < n; i++) {
printf("%d ",a[i]);
}
int *d_d;
hipMalloc(&d_d, n * sizeof(int));
//
hipMemcpy(d_d, h_a, n*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( staticReverse), dim3(1),dim3(n), 0, 0, d_d, n);
hipMemcpy(h_d, d_d, n*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
//
hipMemcpy(d_d, a, n*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dynamicReverse), dim3(1),dim3(n),n*sizeof(int), 0, d_d, n);
hipMemcpy(d, d_d, n * sizeof(int), hipMemcpyDeviceToHost);
printf("\nreverse results\n");
int flag=1;
for (int i = 0; i < n; i++)
if (d[i] != r[i]){ flag=0; printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);}
else printf("%d ",r[i]);
if(flag)printf("\nall array elements are correctly reversed\n");
}
///////////////////////////////////////////////////////////////////////
__global__ void kernel(float *a, int offset)
{
int i = offset + threadIdx.x + blockIdx.x*blockDim.x;
float x = (float)i;
float s = sinf(x);
float c = cosf(x);
a[i] = a[i] + sqrtf(s*s+c*c);
}
float maxError(float *a, int n)
{
float maxE = 0;
for (int i = 0; i < n; i++) {
float error = fabs(a[i]-1.0f);
if (error > maxE) maxE = error;
}
return maxE;
}
int overlap(int argc, char **argv)
{
const int blockSize = 256, nStreams = 4;// blockSize=threadCount
const int n = 4 * 1024 * blockSize * nStreams;
const int streamSize = n / nStreams;// == one stream size == 4 * 1024 * blockSize
const int streamBytes = streamSize * sizeof(float);
const int total_bytes = n * sizeof(float);
int devId = 0;
if (argc > 1) devId = atoi(argv[1]);
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, devId));
printf("Device : %s\n", prop.name);
checkCuda( hipSetDevice(devId) );
//
float *a, *d_a;
checkCuda( hipHostMalloc((void**)&a, total_bytes) ); // host pinned
checkCuda( hipMalloc((void**)&d_a, total_bytes) ); // device
float ms; // milliseconds
//
hipEvent_t startEvent, stopEvent, dummyEvent;
hipStream_t stream[nStreams];
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
checkCuda( hipEventCreate(&dummyEvent) );
for (int i = 0; i < nStreams; ++i)
checkCuda( hipStreamCreate(&stream[i]) );
// -
memset(a, 0, total_bytes);
checkCuda( hipEventRecord(startEvent,0) );
checkCuda( hipMemcpy(d_a, a, total_bytes, hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( kernel), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, 0);//gridSize=4*1024(blockCount)
checkCuda( hipMemcpy(a, d_a, total_bytes, hipMemcpyDeviceToHost) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for sequential transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
// 1: [--]
memset(a, 0, total_bytes);
checkCuda( hipEventRecord(startEvent,0) );
for (int i = 0; i < nStreams; ++i) {
int offset = i * streamSize;
checkCuda( hipMemcpyAsync(&d_a[offset], &a[offset],
streamBytes, hipMemcpyHostToDevice,
stream[i]) );
hipLaunchKernelGGL(( kernel), dim3(streamSize/blockSize), dim3(blockSize), 0, stream[i], d_a, offset);
checkCuda( hipMemcpyAsync(&a[offset], &d_a[offset],
streamBytes, hipMemcpyDeviceToHost,
stream[i]) );
}
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for asynchronous V1 transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
// 2:
// , ,
memset(a, 0, total_bytes);
checkCuda( hipEventRecord(startEvent,0) );
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
checkCuda( hipMemcpyAsync(&d_a[offset], &a[offset],
streamBytes, hipMemcpyHostToDevice,
stream[i]) );
}
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
hipLaunchKernelGGL(( kernel), dim3(streamSize/blockSize), dim3(blockSize), 0, stream[i], d_a, offset);
}
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
checkCuda( hipMemcpyAsync(&a[offset], &d_a[offset],
streamBytes, hipMemcpyDeviceToHost,
stream[i]) );
}
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for asynchronous V2 transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
//
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
checkCuda( hipEventDestroy(dummyEvent) );
for (int i = 0; i < nStreams; ++i)
checkCuda( hipStreamDestroy(stream[i]) );
hipFree(d_a);
hipHostFree(a);
return 0;
}
///////////////////////////////////////////////////////////////////////
void profileCopies(float *h_a,
float *h_b,
float *d,
unsigned int n,
char *desc)
{
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
hipEvent_t startEvent, stopEvent;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
checkCuda( hipEventRecord(startEvent, 0) );
checkCuda( hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
float time;
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
checkCuda( hipEventRecord(startEvent, 0) );
checkCuda( hipMemcpy(h_b, d, bytes, hipMemcpyDeviceToHost) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for (int i = 0; i < n; ++i) {
if (h_a[i] != h_b[i]) {
printf("*** %s transfers failed ***", desc);
break;
}
}
//
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
}
int data_transfer_pageable_vs_pinned()
{
unsigned int nElements = 4*1024*1024;
const unsigned int bytes = nElements * sizeof(float);
//
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
//
float *d_a;
//
h_aPageable = (float*)malloc(bytes); // pageable
h_bPageable = (float*)malloc(bytes); // pageable
checkCuda( hipHostMalloc((void**)&h_aPinned, bytes) ); // pinned
checkCuda( hipHostMalloc((void**)&h_bPinned, bytes) ); // pinned
checkCuda( hipMalloc((void**)&d_a, bytes) ); //
for (int i = 0; i < nElements; ++i) h_aPageable[i] = i;
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, 0) );
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n", bytes / (1024 * 1024));
//
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
printf("\n");
//
hipFree(d_a);
hipHostFree(h_aPinned);
hipHostFree(h_bPinned);
free(h_aPageable);
free(h_bPageable);
return 0;
}
///////////////////////////////////////////////////////////////////////
template <typename T>
__global__ void offset(T* a, int s)
{
int i = blockDim.x * blockIdx.x + threadIdx.x + s;
a[i] = a[i] + 1;
}
template <typename T>
__global__ void stride(T* a, int s)
{
int i = (blockDim.x * blockIdx.x + threadIdx.x) * s;
a[i] = a[i] + 1;
}
template <typename T>
void runTest(int deviceId, int nMB)
{
int blockSize = 256;
float ms;
T *d_a;
hipEvent_t startEvent, stopEvent;
int n = nMB*1024*1024/sizeof(T);
// NB: d_a(33*nMB)
checkCuda( hipMalloc(&d_a, n * 33 * sizeof(T)) );
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
printf("Offset, Bandwidth (GB/s):\n");
hipLaunchKernelGGL(( offset), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, 0); // warm up
for (int i = 0; i <= 32; i++) {
checkCuda( hipMemset(d_a, 0, n * sizeof(T)) );
checkCuda( hipEventRecord(startEvent,0) );
hipLaunchKernelGGL(( offset), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, i);
checkCuda( hipEventRecord(stopEvent,0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("%d, %f\n", i, 2*nMB/ms);
}
printf("\n");
printf("Stride, Bandwidth (GB/s):\n");
hipLaunchKernelGGL(( stride), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, 1); // warm up
for (int i = 1; i <= 32; i++) {
checkCuda( hipMemset(d_a, 0, n * sizeof(T)) );
checkCuda( hipEventRecord(startEvent,0) );
hipLaunchKernelGGL(( stride), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, i);
checkCuda( hipEventRecord(stopEvent,0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("%d, %f\n", i, 2*nMB/ms);
}
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
hipFree(d_a);
}
//------------------------------------------------------------------
int coaleascing(int argc, char **argv)
{
int nMB = 4;
int deviceId = 0;
bool bFp64 = false;
for (int i = 1; i < argc; i++) {
if (!strncmp(argv[i], "dev=", 4))
deviceId = atoi((char*)(&argv[i][4]));
else if (!strcmp(argv[i], "fp64"))
bFp64 = true;
}
hipDeviceProp_t prop;
checkCuda( hipSetDevice(deviceId) );
checkCuda( hipGetDeviceProperties(&prop, deviceId) );
printf("Device: %s\n", prop.name);
printf("Transfer size (MB): %d\n", nMB);
printf("%s Precision\n", bFp64 ? "Double" : "Single");
if (bFp64) runTest<double>(deviceId, nMB);
else runTest<float>(deviceId, nMB);
}
///////////////////////////////////////////////////////////////////////
const int N = 1 << 20;
__global__ void kernel_target(float *x, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
void *launch_kernel(void *dummy){
float *data;
hipMalloc(&data, N * sizeof(float));
hipLaunchKernelGGL(( kernel_target), dim3(1), dim3(64), 0, 0, data, N);
hipStreamSynchronize(0);
return NULL;
}
int multithread(){
const int num_threads = 8;
pthread_t threads[num_threads];
for (int i = 0; i < num_threads; i++) {
if (pthread_create(&threads[i], NULL, launch_kernel, 0)) {
fprintf(stderr, "Error creating threadn");
}
}
for (int i = 0; i < num_threads; i++) {
if(pthread_join(threads[i], NULL)) {
fprintf(stderr, "Error joining threadn");
return 2;
}
}
hipDeviceReset();
return 0;
}
///////////////////////////////////////////////////////////////////////
int ROWS = 1024;
int COLS = 1024;
void generate_random_dense_matrix(int M, int N, float **outA)
{
int i, j;
double rand_max = (double)RAND_MAX;
float *A = (float *)malloc(sizeof(float) * M * N);
for (j = 0; j < N; j++){//
for (i = 0; i < M; i++){//
double drand = (double)rand();
A[j * M + i] = (drand / rand_max) * 100.0; //0-100
}
}
*outA = A;
}
int cublasMM(int argc, char **argv)
{
int i, j;
float *A, *dA;
float *B, *dB;
float *C, *dC;
float beta;
float alpha;
hipblasHandle_t handle = 0;
alpha = 3.0f;
beta = 4.0f;
int N = ROWS;
int M = COLS;
//
srand(9384);
generate_random_dense_matrix(M, N, &A);
generate_random_dense_matrix(N, M, &B);
C = (float *)malloc(sizeof(float) * M * M);
memset(C, 0x00, sizeof(float) * M * M);
// cuBLAS
CHECK_CUBLAS(hipblasCreate(&handle));
//
CHECK(hipMalloc((void **)&dA, sizeof(float) * M * N));
CHECK(hipMalloc((void **)&dB, sizeof(float) * N * M));
CHECK(hipMalloc((void **)&dC, sizeof(float) * M * M));
//
CHECK_CUBLAS(hipblasSetMatrix(M, N, sizeof(float), A, M, dA, M));
CHECK_CUBLAS(hipblasSetMatrix(N, M, sizeof(float), B, N, dB, N));
CHECK_CUBLAS(hipblasSetMatrix(M, M, sizeof(float), C, M, dC, M));
// -
CHECK_CUBLAS(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, M, N, &alpha,
dA, M, dB, N, &beta, dC, M));
//
CHECK_CUBLAS(hipblasGetMatrix(M, M, sizeof(float), dC, M, C, M));
for (j = 0; j < 10; j++)
{
for (i = 0; i < 10; i++)
{
printf("%2.2f ", C[j * M + i]);
}
printf("...\n");
}
printf("...\n");
free(A);
free(B);
free(C);
CHECK(hipFree(dA));
CHECK(hipFree(dB));
CHECK(hipFree(dC));
CHECK_CUBLAS(hipblasDestroy(handle));
return 0;
}
///////////////////////////////////////////////////////////////////////
int cublasMMAsync(int argc, char **argv)
{
int i, j;
float *A, *dA;
float *B, *dB;
float *C, *dC;
float beta;
float alpha;
hipblasHandle_t handle = 0;
hipStream_t stream = 0;
alpha = 3.0f;
beta = 4.0f;
int N = ROWS;
int M = COLS;
//
srand(9384);
generate_random_dense_matrix(M, N, &A);
generate_random_dense_matrix(N, M, &B);
C = (float *)malloc(sizeof(float) * M * M);
memset(C, 0x00, sizeof(float) * M * M);
// cuBLAS
CHECK_CUBLAS(hipblasCreate(&handle));
CHECK(hipStreamCreate(&stream));
CHECK_CUBLAS(hipblasSetStream(handle, stream));
//
CHECK(hipMalloc((void **)&dA, sizeof(float) * M * N));
CHECK(hipMalloc((void **)&dB, sizeof(float) * N * M));
CHECK(hipMalloc((void **)&dC, sizeof(float) * M * M));
//
CHECK_CUBLAS(hipblasSetMatrixAsync(M, N, sizeof(float), A, M, dA, M, stream));
CHECK_CUBLAS(hipblasSetMatrixAsync(N, M, sizeof(float), B, N, dB, N, stream));
CHECK_CUBLAS(hipblasSetMatrixAsync(M, M, sizeof(float), C, M, dC, M, stream));
// -
CHECK_CUBLAS(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, M, N, &alpha,
dA, M, dB, N, &beta, dC, M));
//
CHECK_CUBLAS(hipblasGetMatrixAsync(M, M, sizeof(float), dC, M, C, M,
stream));
CHECK(hipStreamSynchronize(stream));
for (j = 0; j < 10; j++)
{
for (i = 0; i < 10; i++)
{
printf("%2.2f ", C[j * M + i]);
}
printf("...\n");
}
printf("...\n");
free(A);
free(B);
free(C);
CHECK(hipFree(dA));
CHECK(hipFree(dB));
CHECK(hipFree(dC));
CHECK(hipStreamDestroy(stream));
CHECK_CUBLAS(hipblasDestroy(handle));
return 0;
}
///////////////////////////////////////////////////////////////////////
__global__ void kernel(float *g_data, float value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + value;
}
int checkResult(float *data, const int n, const float x)
{
for (int i = 0; i < n; i++)
{
if (data[i] != x)
{
printf("Error! data[%d] = %f, ref = %f\n", i, data[i], x);
return 0;
}
}
return 1;
}
int async(int argc, char *argv[])
{
int devID = 0;
hipDeviceProp_t deviceProps;
CHECK(hipGetDeviceProperties(&deviceProps, devID));
printf("> %s running on", argv[0]);
printf(" CUDA device [%s]\n", deviceProps.name);
int num = 1 << 24;
int nbytes = num * sizeof(int);
float value = 10.0f;
//
float *h_a = 0;
CHECK(hipHostMalloc((void **)&h_a, nbytes));
memset(h_a, 0, nbytes);
//
float *d_a = 0;
CHECK(hipMalloc((void **)&d_a, nbytes));
CHECK(hipMemset(d_a, 255, nbytes));
//
dim3 block = dim3(512);
dim3 grid = dim3((num + block.x - 1) / block.x);
//
hipEvent_t stop;
CHECK(hipEventCreate(&stop));
// ( 0)
CHECK(hipMemcpyAsync(d_a, h_a, nbytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, d_a, value);
CHECK(hipMemcpyAsync(h_a, d_a, nbytes, hipMemcpyDeviceToHost));
CHECK(hipEventRecord(stop));
// GPU CPU
unsigned long int counter = 0;
while (hipEventQuery(stop) == hipErrorNotReady) {
counter++;
}
printf("CPU executed %lu iterations while waiting for GPU to finish\n",
counter);
bool bFinalResults = (bool) checkResult(h_a, num, value);
CHECK(hipEventDestroy(stop));
CHECK(hipHostFree(h_a));
CHECK(hipFree(d_a));
CHECK(hipDeviceReset());
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
///////////////////////////////////////////////////////////////////////
#define BDIMX 32
#define BDIMY 32
#define IPAD 1
void printData(char *msg, int *in, const int size)
{
printf("%s: ", msg);
for (int i = 0; i < size; i++)
{
printf("%5d", in[i]);
fflush(stdout);
}
printf("\n");
return;
}
__global__ void setRowReadRow (int *out)
{
//
__shared__ int tile[BDIMY][BDIMX]; // x, y
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
tile[threadIdx.y][threadIdx.x] = idx; // x, y
__syncthreads();
out[idx] = tile[threadIdx.y][threadIdx.x] ;// x, y
}
__global__ void setColReadCol (int *out)
{
//
__shared__ int tile[BDIMX][BDIMY]; // y, x
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
tile[threadIdx.x][threadIdx.y] = idx;// y, x
__syncthreads();
out[idx] = tile[threadIdx.x][threadIdx.y];// y, x
}
__global__ void setRowReadCol(int *out)
{
//
__shared__ int tile[BDIMY][BDIMX];
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
tile[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadColDyn(int *out)
{
//
extern __shared__ int tile[];
unsigned int row_idx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int col_idx = threadIdx.x * blockDim.y + threadIdx.y;
tile[row_idx] = row_idx;
__syncthreads();
out[row_idx] = tile[col_idx];
}
__global__ void setRowReadColPad(int *out)
{
//
__shared__ int tile[BDIMY][BDIMX + IPAD];
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
tile[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadColDynPad(int *out)
{
//
extern __shared__ int tile[];
unsigned int row_idx = threadIdx.y * (blockDim.x + IPAD) + threadIdx.x;
unsigned int col_idx = threadIdx.x * (blockDim.x + IPAD) + threadIdx.y;
unsigned int g_idx = threadIdx.y * blockDim.x + threadIdx.x;
tile[row_idx] = g_idx;
__syncthreads();
out[g_idx] = tile[col_idx];
}
int smemSquare(int argc, char **argv)
{
//
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
hipSharedMemConfig pConfig;
CHECK(hipDeviceGetSharedMemConfig ( &pConfig ));
printf("with Bank Mode:%s ", pConfig == 1 ? "4-Byte" : "8-Byte");
// (2048)
int nx = BDIMX;
int ny = BDIMY;
bool iprintf = 0;
if (argc > 1) iprintf = atoi(argv[1]);
size_t nBytes = nx * ny * sizeof(int);
//
dim3 block (BDIMX, BDIMY);
dim3 grid (1, 1);
printf("<<< grid (%d,%d) block (%d,%d)>>>\n", grid.x, grid.y, block.x,
block.y);
//
int *d_C;
CHECK(hipMalloc((int**)&d_C, nBytes));
int *gpuRef = (int *)malloc(nBytes);
CHECK(hipMemset(d_C, 0, nBytes));
hipLaunchKernelGGL(( setColReadCol), dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprintf) printData("set col read col ", gpuRef, nx * ny);
CHECK(hipMemset(d_C, 0, nBytes));
hipLaunchKernelGGL(( setRowReadRow), dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprintf) printData("set row read row ", gpuRef, nx * ny);
CHECK(hipMemset(d_C, 0, nBytes));
hipLaunchKernelGGL(( setRowReadCol), dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprintf) printData("set row read col ", gpuRef, nx * ny);
CHECK(hipMemset(d_C, 0, nBytes));
hipLaunchKernelGGL(( setRowReadColDyn), dim3(grid), dim3(block), BDIMX*BDIMY*sizeof(int), 0, d_C);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprintf) printData("set row read col dyn", gpuRef, nx * ny);
CHECK(hipMemset(d_C, 0, nBytes));
hipLaunchKernelGGL(( setRowReadColPad), dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprintf) printData("set row read col pad", gpuRef, nx * ny);
CHECK(hipMemset(d_C, 0, nBytes));
hipLaunchKernelGGL(( setRowReadColDynPad), dim3(grid), dim3(block), (BDIMX + IPAD)*BDIMY*sizeof(int), 0, d_C);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprintf) printData("set row read col DP ", gpuRef, nx * ny);
CHECK(hipFree(d_C));
free(gpuRef);
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////
#define DIM 128
extern __shared__ int dsmem[];
int recursiveReduce(int *data, int const size)
{
if (size == 1) return data[0];
int const stride = size / 2;
for (int i = 0; i < stride; i++)
data[i] += data[i + stride];
return recursiveReduce(data, stride);
}
// unroll4 + complete unroll for loop + gmem
__global__ void reduceGmem(int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceSmem(int *g_idata, int *g_odata, unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// set to smem by each threads
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceSmemDyn(int *g_idata, int *g_odata, unsigned int n)
{
extern __shared__ int smem[];
// set thread ID
unsigned int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
// set to smem by each threads
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
// unroll4 + complete unroll for loop + gmem
__global__ void reduceGmemUnroll(int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx < n)
{
int a1, a2, a3, a4;
a1 = a2 = a3 = a4 = 0;
a1 = g_idata[idx];
if (idx + blockDim.x < n) a2 = g_idata[idx + blockDim.x];
if (idx + 2 * blockDim.x < n) a3 = g_idata[idx + 2 * blockDim.x];
if (idx + 3 * blockDim.x < n) a4 = g_idata[idx + 3 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4;
}
__syncthreads();
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceSmemUnroll(int *g_idata, int *g_odata, unsigned int n)
{
// static shared memory
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// global index, 4 blocks of input data processed at a time
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// unrolling 4 blocks
int tmpSum = 0;
// boundary check
if (idx < n)
{
int a1, a2, a3, a4;
a1 = a2 = a3 = a4 = 0;
a1 = g_idata[idx];
if (idx + blockDim.x < n) a2 = g_idata[idx + blockDim.x];
if (idx + 2 * blockDim.x < n) a3 = g_idata[idx + 2 * blockDim.x];
if (idx + 3 * blockDim.x < n) a4 = g_idata[idx + 3 * blockDim.x];
tmpSum = a1 + a2 + a3 + a4;
}
smem[tid] = tmpSum;
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceSmemUnrollDyn(int *g_idata, int *g_odata, unsigned int n)
{
extern __shared__ int smem[];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// unrolling 4
int tmpSum = 0;
if (idx < n)
{
int a1, a2, a3, a4;
a1 = a2 = a3 = a4 = 0;
a1 = g_idata[idx];
if (idx + blockDim.x < n) a2 = g_idata[idx + blockDim.x];
if (idx + 2 * blockDim.x < n) a3 = g_idata[idx + 2 * blockDim.x];
if (idx + 3 * blockDim.x < n) a4 = g_idata[idx + 3 * blockDim.x];
tmpSum = a1 + a2 + a3 + a4;
}
smem[tid] = tmpSum;
__syncthreads();
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceNeighboredGmem(int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceNeighboredSmem(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
smem[tid] += smem[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
int reduceSum(int argc, char **argv)
{
//
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
bool bResult = false;
//
int size = 1 << 22;
printf(" with array size %d ", size);
//
int blocksize = DIM;
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
//
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
{
h_idata[i] = (int)( rand() & 0xFF );
}
memcpy (tmp, h_idata, bytes);
int gpu_sum = 0;
//
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(hipMalloc((void **) &d_idata, bytes));
CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int)));
int cpu_sum = recursiveReduce (tmp, size);
printf("cpu reduce : %d\n", cpu_sum);
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceNeighboredGmem), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceNeighboredGmem: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceNeighboredSmem), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceNeighboredSmem: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
// reduce gmem
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceGmem), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceGmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x,
block.x);
// reduce smem
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceSmem), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceSmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x,
block.x);
// reduce smem
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceSmemDyn), dim3(grid.x), dim3(block), blocksize*sizeof(int), 0, d_idata, d_odata,
size);
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceSmemDyn : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x,
block.x);
// reduce gmem
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceGmemUnroll), dim3(grid.x / 4), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i];
printf("reduceGmemUnroll4 : %d <<<grid %d block %d>>>\n", gpu_sum,
grid.x / 4, block.x);
// reduce smem
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceSmemUnroll), dim3(grid.x / 4), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i];
printf("reduceSmemUnroll4 : %d <<<grid %d block %d>>>\n", gpu_sum,
grid.x / 4, block.x);
// reduce smem
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceSmemUnrollDyn), dim3(grid.x / 4), dim3(block), DIM*sizeof(int), 0, d_idata,
d_odata, size);
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i];
printf("reduceSmemDynUnroll4: %d <<<grid %d block %d>>>\n", gpu_sum,
grid.x / 4, block.x);
//
free(h_idata);
free(h_odata);
CHECK(hipFree(d_idata));
CHECK(hipFree(d_odata));
CHECK(hipDeviceReset());
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
__device__ void clock_block(clock_t *d_o, clock_t clock_count)
{
unsigned int start_clock = (unsigned int)clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count)
{
unsigned int end_clock = (unsigned int)clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
}
////////////////////////////////////////////////////////////////////////////////
// clock_block() .
// .
__global__ void kernel_A(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
__global__ void kernel_B(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
__global__ void kernel_C(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
__global__ void kernel_D(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
int simpleHyperQ(int argc, char **argv)
{
int nstreams = 8; //
float kernel_time = 10; // ms
float elapsed_time;
int cuda_device = 0;
char * iname = "CUDA_DEVICE_MAX_CONNECTIONS";
setenv(iname, "4", 1); // 4 or 32
char *ivalue = getenv(iname);
printf("%s = %s\n", iname, ivalue);
hipDeviceProp_t deviceProp;
CHECK(hipGetDevice(&cuda_device));
CHECK(hipGetDeviceProperties(&deviceProp, cuda_device));
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
//
clock_t *a = 0;
CHECK(hipHostMalloc((void **)&a, sizeof(clock_t)));
//
clock_t *d_a = 0;
CHECK(hipMalloc((void **)&d_a, 2 * nstreams * sizeof(clock_t)));
//
hipStream_t *streams = (hipStream_t *)malloc(nstreams * sizeof(hipStream_t));
for (int i = 0; i < nstreams; i++)
{
CHECK(hipStreamCreate(&(streams[i])));
}
//
hipEvent_t start_event, stop_event;
CHECK(hipEventCreate(&start_event));
CHECK(hipEventCreate(&stop_event));
// Target time per kernel = kernel_time ms, clockRate = in KHz
// Target number of clocks = target time * clock frequency
#if defined(__arm__) || defined(__aarch64__)
clock_t time_clocks = (clock_t)(kernel_time * (deviceProp.clockRate / 1000));
#else
clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate);
#endif
clock_t total_clocks = 0;
CHECK(hipEventRecord(start_event, 0));
for (int i = 0; i < nstreams; ++i)
{
hipLaunchKernelGGL(( kernel_A) , dim3(1), dim3(1), 0, streams[i] , &d_a[2 * i], time_clocks);
total_clocks += time_clocks;
hipLaunchKernelGGL(( kernel_B) , dim3(1), dim3(1), 0, streams[i] , &d_a[2 * i + 1], time_clocks);
total_clocks += time_clocks;
hipLaunchKernelGGL(( kernel_C) , dim3(1), dim3(1), 0, streams[i] , &d_a[2 * i], time_clocks);
total_clocks += time_clocks;
hipLaunchKernelGGL(( kernel_D) , dim3(1), dim3(1), 0, streams[i] , &d_a[2 * i + 1], time_clocks);
total_clocks += time_clocks;
}
// 0
CHECK(hipEventRecord(stop_event, 0));
// CPU GPU .
// .
CHECK(hipEventSynchronize(stop_event));
CHECK(hipEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("Expected time for serial execution of %d sets of kernels is between approx. %.3fs and %.3fs\n", nstreams, (nstreams + 1) * kernel_time / 1000.0f, 2 * nstreams *kernel_time / 1000.0f);
printf("Expected time for fully concurrent execution of %d sets of kernels is approx. %.3fs\n", nstreams, 2 * kernel_time / 1000.0f);
printf("Measured time for sample = %.3fs\n", elapsed_time / 1000.0f);
bool bTestResult = (a[0] >= total_clocks);
for (int i = 0; i < nstreams; i++)
{
hipStreamDestroy(streams[i]);
}
free(streams);
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
hipHostFree(a);
hipFree(d_a);
return (bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
/////////////////////////////////////////////////////////////////////////////////
#define LOOP_COUNT 3000000
void CUDART_CB my_callback(hipStream_t stream, hipError_t status, void *data)
{
printf("callback from stream %d\n", *((int *)data));
}
__global__ void kernel_1()
{
double sum = 0.0;
for (int i = 0; i < LOOP_COUNT; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for (int i = 0; i < LOOP_COUNT; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for (int i = 0; i < LOOP_COUNT; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for (int i = 0; i < LOOP_COUNT; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int simpleCallback(int argc, char **argv)
{
int n_streams = 8;
if (argc > 2) n_streams = atoi(argv[2]);
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("> %s Starting...\n", argv[0]);
printf("> Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
printf("> Compute Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
//
char * iname = "CUDA_DEVICE_MAX_CONNECTIONS";
setenv(iname, "8", 1);
char *ivalue = getenv(iname);
printf("> %s = %s\n", iname, ivalue);
printf("> with streams = %d\n", n_streams);
//
hipStream_t *streams = (hipStream_t *)malloc(n_streams * sizeof(
hipStream_t));
for (int i = 0; i < n_streams; i++)
{
CHECK(hipStreamCreate(&(streams[i])));
}
dim3 block(1);
dim3 grid(1);
hipEvent_t start_event, stop_event;
CHECK(hipEventCreate(&start_event));
CHECK(hipEventCreate(&stop_event));
int stream_ids[4];
CHECK(hipEventRecord(start_event, 0));
for (int i = 0; i < n_streams; i++)
{
stream_ids[i] = i;
hipLaunchKernelGGL(( kernel_1) , dim3(grid), dim3(block), 0, streams[i] , );
hipLaunchKernelGGL(( kernel_2) , dim3(grid), dim3(block), 0, streams[i] , );
hipLaunchKernelGGL(( kernel_3) , dim3(grid), dim3(block), 0, streams[i] , );
hipLaunchKernelGGL(( kernel_4) , dim3(grid), dim3(block), 0, streams[i] , );
CHECK(hipStreamAddCallback(streams[i], my_callback,
(void *)(stream_ids + i), 0));
}
CHECK(hipEventRecord(stop_event, 0));
CHECK(hipEventSynchronize(stop_event));
float elapsed_time;
CHECK(hipEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("Measured time for parallel execution = %.3fs\n", elapsed_time);
//
for (int i = 0; i < n_streams; i++)
{
CHECK(hipStreamDestroy(streams[i]));
}
free(streams);
CHECK(hipDeviceReset());
return 0;
}
int main(int argc, char* argv[]){
int ex=0;
ex=atoi(argv[1]);
printf("run ex : %d\n",ex);
switch(ex){
case 1:{
printf("multithread\n");//stream
multithread();
break;
}
case 2:{
printf("coaleascing\n");
coaleascing(argc, argv);
break;
}
case 3:{
printf("shared_memory_reverse\n");//simple smem + sync
shared_memory_reverse();
break;
}
case 4:{
printf("reduceSum\n");
reduceSum(argc,argv);
break;
}
case 5:{
printf("smemSquare\n");//smem + sync
smemSquare(argc,argv);
break;
}
case 6:{
printf("simpleHyperQ\n");//hyper q
simpleHyperQ(argc,argv);
break;
}
case 7:{
printf("simpleCallback\n");//stream
simpleCallback(argc,argv);
break;
}
case 8:{
printf("async\n");//simple async memcpy
async(argc,argv);
break;
}
case 9:{
printf("data_transfer_pageable_vs_pinned\n");
data_transfer_pageable_vs_pinned();
break;
}
case 10:{
printf("overlap\n");//stream
overlap(argc,argv);
break;
}
}
return 0;
} | e499a5e3c206af861d81a769dda0d06109e8a5f8.cu | //build commnad : nvcc -std=c++11 -lcudnn -lcublas kernel.cu -o kernel
#include <stdio.h>
#include <assert.h>
#include <pthread.h>
#include "common/common.h"
#include <stdlib.h>
#include <cuda.h>
#include "cublas_v2.h"
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
///////////////////////////////////////////////////////////////////////
__global__ void staticReverse(int *d, int n)
{
__shared__ int s[64];
int t = threadIdx.x;
int trail = n-t-1;
s[t] = d[t];// write step : global to shared
__syncthreads();
d[t] = s[trail]; // read step : shared to global
}
__global__ void dynamicReverse(int *d, int n)
{
extern __shared__ int s[];
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
int shared_memory_reverse(void)
{
const int n = 64;
int h_a[n], h_r[n], h_d[n];
for (int i = 0; i < n; i++) {
h_a[i] = i;
h_r[i] = n-i-1;
h_d[i] = 0;
}
printf("original array elemtns\n");
for (int i = 0; i < n; i++) {
printf("%d ",a[i]);
}
int *d_d;
cudaMalloc(&d_d, n * sizeof(int));
// 정적 공유 메모리 버전
cudaMemcpy(d_d, h_a, n*sizeof(int), cudaMemcpyHostToDevice);
staticReverse<<<1,n>>>(d_d, n);
cudaMemcpy(h_d, d_d, n*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
// 동적 공유 메모리 버전
cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice);
dynamicReverse<<<1,n,n*sizeof(int)>>>(d_d, n);
cudaMemcpy(d, d_d, n * sizeof(int), cudaMemcpyDeviceToHost);
printf("\nreverse results\n");
int flag=1;
for (int i = 0; i < n; i++)
if (d[i] != r[i]){ flag=0; printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);}
else printf("%d ",r[i]);
if(flag)printf("\nall array elements are correctly reversed\n");
}
///////////////////////////////////////////////////////////////////////
__global__ void kernel(float *a, int offset)
{
int i = offset + threadIdx.x + blockIdx.x*blockDim.x;
float x = (float)i;
float s = sinf(x);
float c = cosf(x);
a[i] = a[i] + sqrtf(s*s+c*c);
}
float maxError(float *a, int n)
{
float maxE = 0;
for (int i = 0; i < n; i++) {
float error = fabs(a[i]-1.0f);
if (error > maxE) maxE = error;
}
return maxE;
}
int overlap(int argc, char **argv)
{
const int blockSize = 256, nStreams = 4;// blockSize=threadCount
const int n = 4 * 1024 * blockSize * nStreams;
const int streamSize = n / nStreams;// == one stream size == 4 * 1024 * blockSize
const int streamBytes = streamSize * sizeof(float);
const int total_bytes = n * sizeof(float);
int devId = 0;
if (argc > 1) devId = atoi(argv[1]);
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, devId));
printf("Device : %s\n", prop.name);
checkCuda( cudaSetDevice(devId) );
// 호스트 고정 메모리와 디바이스 메모리 할당
float *a, *d_a;
checkCuda( cudaMallocHost((void**)&a, total_bytes) ); // host pinned
checkCuda( cudaMalloc((void**)&d_a, total_bytes) ); // device
float ms; // milliseconds 타이머
// 이벤트 및 스트림 생성
cudaEvent_t startEvent, stopEvent, dummyEvent;
cudaStream_t stream[nStreams];
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
checkCuda( cudaEventCreate(&dummyEvent) );
for (int i = 0; i < nStreams; ++i)
checkCuda( cudaStreamCreate(&stream[i]) );
// 기본 케이스 - 순차적 메모리 전송과 커널 호출
memset(a, 0, total_bytes);
checkCuda( cudaEventRecord(startEvent,0) );
checkCuda( cudaMemcpy(d_a, a, total_bytes, cudaMemcpyHostToDevice) );
kernel<<<n/blockSize, blockSize>>>(d_a, 0);//gridSize=4*1024(blockCount)
checkCuda( cudaMemcpy(a, d_a, total_bytes, cudaMemcpyDeviceToHost) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for sequential transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
// 비동기 버전 1: [복사-커널호출-복사]를 루프로 반복 수행
memset(a, 0, total_bytes);
checkCuda( cudaEventRecord(startEvent,0) );
for (int i = 0; i < nStreams; ++i) {
int offset = i * streamSize;
checkCuda( cudaMemcpyAsync(&d_a[offset], &a[offset],
streamBytes, cudaMemcpyHostToDevice,
stream[i]) );
kernel<<<streamSize/blockSize, blockSize, 0, stream[i]>>>(d_a, offset);
checkCuda( cudaMemcpyAsync(&a[offset], &d_a[offset],
streamBytes, cudaMemcpyDeviceToHost,
stream[i]) );
}
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for asynchronous V1 transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
// 비동기 버전 2:
// 복사 루프, 커널 호출 루프, 복사 루프를 별개로 수행
memset(a, 0, total_bytes);
checkCuda( cudaEventRecord(startEvent,0) );
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
checkCuda( cudaMemcpyAsync(&d_a[offset], &a[offset],
streamBytes, cudaMemcpyHostToDevice,
stream[i]) );
}
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
kernel<<<streamSize/blockSize, blockSize, 0, stream[i]>>>(d_a, offset);
}
for (int i = 0; i < nStreams; ++i)
{
int offset = i * streamSize;
checkCuda( cudaMemcpyAsync(&a[offset], &d_a[offset],
streamBytes, cudaMemcpyDeviceToHost,
stream[i]) );
}
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Time for asynchronous V2 transfer and execute (ms): %f\n", ms);
printf(" max error: %e\n", maxError(a, n));
// 메모리 해제
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
checkCuda( cudaEventDestroy(dummyEvent) );
for (int i = 0; i < nStreams; ++i)
checkCuda( cudaStreamDestroy(stream[i]) );
cudaFree(d_a);
cudaFreeHost(a);
return 0;
}
///////////////////////////////////////////////////////////////////////
void profileCopies(float *h_a,
float *h_b,
float *d,
unsigned int n,
char *desc)
{
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
cudaEvent_t startEvent, stopEvent;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
checkCuda( cudaEventRecord(startEvent, 0) );
checkCuda( cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
float time;
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
checkCuda( cudaEventRecord(startEvent, 0) );
checkCuda( cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for (int i = 0; i < n; ++i) {
if (h_a[i] != h_b[i]) {
printf("*** %s transfers failed ***", desc);
break;
}
}
// 이벤트 해제
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
}
int data_transfer_pageable_vs_pinned()
{
unsigned int nElements = 4*1024*1024;
const unsigned int bytes = nElements * sizeof(float);
//호스트 배열
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
//디바이 스 배열
float *d_a;
//할당 및 초기화
h_aPageable = (float*)malloc(bytes); // 호스트 pageable 메모리 할당
h_bPageable = (float*)malloc(bytes); // 호스트 pageable 메모리 할당
checkCuda( cudaMallocHost((void**)&h_aPinned, bytes) ); // 호스트 pinned 메모리 할당
checkCuda( cudaMallocHost((void**)&h_bPinned, bytes) ); // 호스트 pinned 메모리 할당
checkCuda( cudaMalloc((void**)&d_a, bytes) ); // 디바이스 메모리 할당
for (int i = 0; i < nElements; ++i) h_aPageable[i] = i;
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, 0) );
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n", bytes / (1024 * 1024));
// 고정 메모리 전송 성능 비교
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
printf("\n");
// 메모리 해제
cudaFree(d_a);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(h_aPageable);
free(h_bPageable);
return 0;
}
///////////////////////////////////////////////////////////////////////
template <typename T>
__global__ void offset(T* a, int s)
{
int i = blockDim.x * blockIdx.x + threadIdx.x + s;
a[i] = a[i] + 1;
}
template <typename T>
__global__ void stride(T* a, int s)
{
int i = (blockDim.x * blockIdx.x + threadIdx.x) * s;
a[i] = a[i] + 1;
}
template <typename T>
void runTest(int deviceId, int nMB)
{
int blockSize = 256;
float ms;
T *d_a;
cudaEvent_t startEvent, stopEvent;
int n = nMB*1024*1024/sizeof(T);
// NB: d_a(33*nMB)
checkCuda( cudaMalloc(&d_a, n * 33 * sizeof(T)) );
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
printf("Offset, Bandwidth (GB/s):\n");
offset<<<n/blockSize, blockSize>>>(d_a, 0); // warm up
for (int i = 0; i <= 32; i++) {
checkCuda( cudaMemset(d_a, 0, n * sizeof(T)) );
checkCuda( cudaEventRecord(startEvent,0) );
offset<<<n/blockSize, blockSize>>>(d_a, i);
checkCuda( cudaEventRecord(stopEvent,0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("%d, %f\n", i, 2*nMB/ms);
}
printf("\n");
printf("Stride, Bandwidth (GB/s):\n");
stride<<<n/blockSize, blockSize>>>(d_a, 1); // warm up
for (int i = 1; i <= 32; i++) {
checkCuda( cudaMemset(d_a, 0, n * sizeof(T)) );
checkCuda( cudaEventRecord(startEvent,0) );
stride<<<n/blockSize, blockSize>>>(d_a, i);
checkCuda( cudaEventRecord(stopEvent,0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("%d, %f\n", i, 2*nMB/ms);
}
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
cudaFree(d_a);
}
//------------------------------------------------------------------
int coaleascing(int argc, char **argv)
{
int nMB = 4;
int deviceId = 0;
bool bFp64 = false;
for (int i = 1; i < argc; i++) {
if (!strncmp(argv[i], "dev=", 4))
deviceId = atoi((char*)(&argv[i][4]));
else if (!strcmp(argv[i], "fp64"))
bFp64 = true;
}
cudaDeviceProp prop;
checkCuda( cudaSetDevice(deviceId) );
checkCuda( cudaGetDeviceProperties(&prop, deviceId) );
printf("Device: %s\n", prop.name);
printf("Transfer size (MB): %d\n", nMB);
printf("%s Precision\n", bFp64 ? "Double" : "Single");
if (bFp64) runTest<double>(deviceId, nMB);
else runTest<float>(deviceId, nMB);
}
///////////////////////////////////////////////////////////////////////
const int N = 1 << 20;
__global__ void kernel_target(float *x, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
void *launch_kernel(void *dummy){
float *data;
cudaMalloc(&data, N * sizeof(float));
kernel_target<<<1, 64>>>(data, N);
cudaStreamSynchronize(0);
return NULL;
}
int multithread(){
const int num_threads = 8;
pthread_t threads[num_threads];
for (int i = 0; i < num_threads; i++) {
if (pthread_create(&threads[i], NULL, launch_kernel, 0)) {
fprintf(stderr, "Error creating threadn");
}
}
for (int i = 0; i < num_threads; i++) {
if(pthread_join(threads[i], NULL)) {
fprintf(stderr, "Error joining threadn");
return 2;
}
}
cudaDeviceReset();
return 0;
}
///////////////////////////////////////////////////////////////////////
int ROWS = 1024;
int COLS = 1024;
void generate_random_dense_matrix(int M, int N, float **outA)
{
int i, j;
double rand_max = (double)RAND_MAX;
float *A = (float *)malloc(sizeof(float) * M * N);
for (j = 0; j < N; j++){//열
for (i = 0; i < M; i++){//행
double drand = (double)rand();
A[j * M + i] = (drand / rand_max) * 100.0; //0-100 사이 값
}
}
*outA = A;
}
int cublasMM(int argc, char **argv)
{
int i, j;
float *A, *dA;
float *B, *dB;
float *C, *dC;
float beta;
float alpha;
cublasHandle_t handle = 0;
alpha = 3.0f;
beta = 4.0f;
int N = ROWS;
int M = COLS;
// 입력 데이터 초기화
srand(9384);
generate_random_dense_matrix(M, N, &A);
generate_random_dense_matrix(N, M, &B);
C = (float *)malloc(sizeof(float) * M * M);
memset(C, 0x00, sizeof(float) * M * M);
// cuBLAS 핸들러 생성
CHECK_CUBLAS(cublasCreate(&handle));
// 디바이스 메모리 할당
CHECK(cudaMalloc((void **)&dA, sizeof(float) * M * N));
CHECK(cudaMalloc((void **)&dB, sizeof(float) * N * M));
CHECK(cudaMalloc((void **)&dC, sizeof(float) * M * M));
// 디바이스로 데이터 전송
CHECK_CUBLAS(cublasSetMatrix(M, N, sizeof(float), A, M, dA, M));
CHECK_CUBLAS(cublasSetMatrix(N, M, sizeof(float), B, N, dB, N));
CHECK_CUBLAS(cublasSetMatrix(M, M, sizeof(float), C, M, dC, M));
// 행렬-벡터 곱 수행
CHECK_CUBLAS(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, M, N, &alpha,
dA, M, dB, N, &beta, dC, M));
// 결과 값 반환 및 확인
CHECK_CUBLAS(cublasGetMatrix(M, M, sizeof(float), dC, M, C, M));
for (j = 0; j < 10; j++)
{
for (i = 0; i < 10; i++)
{
printf("%2.2f ", C[j * M + i]);
}
printf("...\n");
}
printf("...\n");
free(A);
free(B);
free(C);
CHECK(cudaFree(dA));
CHECK(cudaFree(dB));
CHECK(cudaFree(dC));
CHECK_CUBLAS(cublasDestroy(handle));
return 0;
}
///////////////////////////////////////////////////////////////////////
int cublasMMAsync(int argc, char **argv)
{
int i, j;
float *A, *dA;
float *B, *dB;
float *C, *dC;
float beta;
float alpha;
cublasHandle_t handle = 0;
cudaStream_t stream = 0;
alpha = 3.0f;
beta = 4.0f;
int N = ROWS;
int M = COLS;
// 입력 데이터 초기화
srand(9384);
generate_random_dense_matrix(M, N, &A);
generate_random_dense_matrix(N, M, &B);
C = (float *)malloc(sizeof(float) * M * M);
memset(C, 0x00, sizeof(float) * M * M);
// cuBLAS 핸들러 생성
CHECK_CUBLAS(cublasCreate(&handle));
CHECK(cudaStreamCreate(&stream));
CHECK_CUBLAS(cublasSetStream(handle, stream));
// 디바이스 메모리 할당
CHECK(cudaMalloc((void **)&dA, sizeof(float) * M * N));
CHECK(cudaMalloc((void **)&dB, sizeof(float) * N * M));
CHECK(cudaMalloc((void **)&dC, sizeof(float) * M * M));
// 디바이스로 데이터 비동기 전송
CHECK_CUBLAS(cublasSetMatrixAsync(M, N, sizeof(float), A, M, dA, M, stream));
CHECK_CUBLAS(cublasSetMatrixAsync(N, M, sizeof(float), B, N, dB, N, stream));
CHECK_CUBLAS(cublasSetMatrixAsync(M, M, sizeof(float), C, M, dC, M, stream));
// 행렬-벡터 곱 수행
CHECK_CUBLAS(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, M, N, &alpha,
dA, M, dB, N, &beta, dC, M));
// 결과 값 반환 및 확인
CHECK_CUBLAS(cublasGetMatrixAsync(M, M, sizeof(float), dC, M, C, M,
stream));
CHECK(cudaStreamSynchronize(stream));
for (j = 0; j < 10; j++)
{
for (i = 0; i < 10; i++)
{
printf("%2.2f ", C[j * M + i]);
}
printf("...\n");
}
printf("...\n");
free(A);
free(B);
free(C);
CHECK(cudaFree(dA));
CHECK(cudaFree(dB));
CHECK(cudaFree(dC));
CHECK(cudaStreamDestroy(stream));
CHECK_CUBLAS(cublasDestroy(handle));
return 0;
}
///////////////////////////////////////////////////////////////////////
__global__ void kernel(float *g_data, float value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + value;
}
int checkResult(float *data, const int n, const float x)
{
for (int i = 0; i < n; i++)
{
if (data[i] != x)
{
printf("Error! data[%d] = %f, ref = %f\n", i, data[i], x);
return 0;
}
}
return 1;
}
int async(int argc, char *argv[])
{
int devID = 0;
cudaDeviceProp deviceProps;
CHECK(cudaGetDeviceProperties(&deviceProps, devID));
printf("> %s running on", argv[0]);
printf(" CUDA device [%s]\n", deviceProps.name);
int num = 1 << 24;
int nbytes = num * sizeof(int);
float value = 10.0f;
// 호스트 메모리 할당
float *h_a = 0;
CHECK(cudaMallocHost((void **)&h_a, nbytes));
memset(h_a, 0, nbytes);
// 디바이스 메모리 할당
float *d_a = 0;
CHECK(cudaMalloc((void **)&d_a, nbytes));
CHECK(cudaMemset(d_a, 255, nbytes));
// 스레드 레이아웃 설정
dim3 block = dim3(512);
dim3 grid = dim3((num + block.x - 1) / block.x);
// 이벤트 핸들러 생성
cudaEvent_t stop;
CHECK(cudaEventCreate(&stop));
// 비동기 메모리 복사 및 커널 호출(모두 스트림 0으로)
CHECK(cudaMemcpyAsync(d_a, h_a, nbytes, cudaMemcpyHostToDevice));
kernel<<<grid, block>>>(d_a, value);
CHECK(cudaMemcpyAsync(h_a, d_a, nbytes, cudaMemcpyDeviceToHost));
CHECK(cudaEventRecord(stop));
// GPU 작업이 진행되는 동안 CPU도 작업 수행
unsigned long int counter = 0;
while (cudaEventQuery(stop) == cudaErrorNotReady) {
counter++;
}
printf("CPU executed %lu iterations while waiting for GPU to finish\n",
counter);
bool bFinalResults = (bool) checkResult(h_a, num, value);
CHECK(cudaEventDestroy(stop));
CHECK(cudaFreeHost(h_a));
CHECK(cudaFree(d_a));
CHECK(cudaDeviceReset());
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
///////////////////////////////////////////////////////////////////////
#define BDIMX 32
#define BDIMY 32
#define IPAD 1
void printData(char *msg, int *in, const int size)
{
printf("%s: ", msg);
for (int i = 0; i < size; i++)
{
printf("%5d", in[i]);
fflush(stdout);
}
printf("\n");
return;
}
__global__ void setRowReadRow (int *out)
{
// 정적 공유 메모리
__shared__ int tile[BDIMY][BDIMX]; // x, y
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
tile[threadIdx.y][threadIdx.x] = idx; // x, y
__syncthreads();
out[idx] = tile[threadIdx.y][threadIdx.x] ;// x, y
}
__global__ void setColReadCol (int *out)
{
// 정적 공유 메모리
__shared__ int tile[BDIMX][BDIMY]; // y, x
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
tile[threadIdx.x][threadIdx.y] = idx;// y, x
__syncthreads();
out[idx] = tile[threadIdx.x][threadIdx.y];// y, x
}
__global__ void setRowReadCol(int *out)
{
// 정적 공유 메모리
__shared__ int tile[BDIMY][BDIMX];
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
tile[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadColDyn(int *out)
{
// 동적 공유 메모리
extern __shared__ int tile[];
unsigned int row_idx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int col_idx = threadIdx.x * blockDim.y + threadIdx.y;
tile[row_idx] = row_idx;
__syncthreads();
out[row_idx] = tile[col_idx];
}
__global__ void setRowReadColPad(int *out)
{
// 정적 공유 메모리 패딩
__shared__ int tile[BDIMY][BDIMX + IPAD];
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
tile[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadColDynPad(int *out)
{
// 동적 공유 메모리 패딩
extern __shared__ int tile[];
unsigned int row_idx = threadIdx.y * (blockDim.x + IPAD) + threadIdx.x;
unsigned int col_idx = threadIdx.x * (blockDim.x + IPAD) + threadIdx.y;
unsigned int g_idx = threadIdx.y * blockDim.x + threadIdx.x;
tile[row_idx] = g_idx;
__syncthreads();
out[g_idx] = tile[col_idx];
}
int smemSquare(int argc, char **argv)
{
// 디바이스 설정
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
cudaSharedMemConfig pConfig;
CHECK(cudaDeviceGetSharedMemConfig ( &pConfig ));
printf("with Bank Mode:%s ", pConfig == 1 ? "4-Byte" : "8-Byte");
// 배열 크기 설정(2048)
int nx = BDIMX;
int ny = BDIMY;
bool iprintf = 0;
if (argc > 1) iprintf = atoi(argv[1]);
size_t nBytes = nx * ny * sizeof(int);
// 실행 구성 설정
dim3 block (BDIMX, BDIMY);
dim3 grid (1, 1);
printf("<<< grid (%d,%d) block (%d,%d)>>>\n", grid.x, grid.y, block.x,
block.y);
// 디바이스 메모리 할당
int *d_C;
CHECK(cudaMalloc((int**)&d_C, nBytes));
int *gpuRef = (int *)malloc(nBytes);
CHECK(cudaMemset(d_C, 0, nBytes));
setColReadCol<<<grid, block>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("set col read col ", gpuRef, nx * ny);
CHECK(cudaMemset(d_C, 0, nBytes));
setRowReadRow<<<grid, block>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("set row read row ", gpuRef, nx * ny);
CHECK(cudaMemset(d_C, 0, nBytes));
setRowReadCol<<<grid, block>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("set row read col ", gpuRef, nx * ny);
CHECK(cudaMemset(d_C, 0, nBytes));
setRowReadColDyn<<<grid, block, BDIMX*BDIMY*sizeof(int)>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("set row read col dyn", gpuRef, nx * ny);
CHECK(cudaMemset(d_C, 0, nBytes));
setRowReadColPad<<<grid, block>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("set row read col pad", gpuRef, nx * ny);
CHECK(cudaMemset(d_C, 0, nBytes));
setRowReadColDynPad<<<grid, block, (BDIMX + IPAD)*BDIMY*sizeof(int)>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("set row read col DP ", gpuRef, nx * ny);
CHECK(cudaFree(d_C));
free(gpuRef);
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////
#define DIM 128
extern __shared__ int dsmem[];
int recursiveReduce(int *data, int const size)
{
if (size == 1) return data[0];
int const stride = size / 2;
for (int i = 0; i < stride; i++)
data[i] += data[i + stride];
return recursiveReduce(data, stride);
}
// unroll4 + complete unroll for loop + gmem
__global__ void reduceGmem(int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceSmem(int *g_idata, int *g_odata, unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// set to smem by each threads
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceSmemDyn(int *g_idata, int *g_odata, unsigned int n)
{
extern __shared__ int smem[];
// set thread ID
unsigned int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
// set to smem by each threads
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
// unroll4 + complete unroll for loop + gmem
__global__ void reduceGmemUnroll(int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx < n)
{
int a1, a2, a3, a4;
a1 = a2 = a3 = a4 = 0;
a1 = g_idata[idx];
if (idx + blockDim.x < n) a2 = g_idata[idx + blockDim.x];
if (idx + 2 * blockDim.x < n) a3 = g_idata[idx + 2 * blockDim.x];
if (idx + 3 * blockDim.x < n) a4 = g_idata[idx + 3 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4;
}
__syncthreads();
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceSmemUnroll(int *g_idata, int *g_odata, unsigned int n)
{
// static shared memory
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// global index, 4 blocks of input data processed at a time
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// unrolling 4 blocks
int tmpSum = 0;
// boundary check
if (idx < n)
{
int a1, a2, a3, a4;
a1 = a2 = a3 = a4 = 0;
a1 = g_idata[idx];
if (idx + blockDim.x < n) a2 = g_idata[idx + blockDim.x];
if (idx + 2 * blockDim.x < n) a3 = g_idata[idx + 2 * blockDim.x];
if (idx + 3 * blockDim.x < n) a4 = g_idata[idx + 3 * blockDim.x];
tmpSum = a1 + a2 + a3 + a4;
}
smem[tid] = tmpSum;
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceSmemUnrollDyn(int *g_idata, int *g_odata, unsigned int n)
{
extern __shared__ int smem[];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// unrolling 4
int tmpSum = 0;
if (idx < n)
{
int a1, a2, a3, a4;
a1 = a2 = a3 = a4 = 0;
a1 = g_idata[idx];
if (idx + blockDim.x < n) a2 = g_idata[idx + blockDim.x];
if (idx + 2 * blockDim.x < n) a3 = g_idata[idx + 2 * blockDim.x];
if (idx + 3 * blockDim.x < n) a4 = g_idata[idx + 3 * blockDim.x];
tmpSum = a1 + a2 + a3 + a4;
}
smem[tid] = tmpSum;
__syncthreads();
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceNeighboredGmem(int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceNeighboredSmem(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
smem[tid] += smem[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
int reduceSum(int argc, char **argv)
{
// 디바이스 설정
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
bool bResult = false;
// 초기화
int size = 1 << 22;
printf(" with array size %d ", size);
// 실행 구성 설정
int blocksize = DIM;
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// 호스트 메모리 할당
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
{
h_idata[i] = (int)( rand() & 0xFF );
}
memcpy (tmp, h_idata, bytes);
int gpu_sum = 0;
// 디바이스 메모리 할당
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(cudaMalloc((void **) &d_idata, bytes));
CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int)));
int cpu_sum = recursiveReduce (tmp, size);
printf("cpu reduce : %d\n", cpu_sum);
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
reduceNeighboredGmem<<<grid.x, block>>>(d_idata, d_odata, size);
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceNeighboredGmem: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
reduceNeighboredSmem<<<grid.x, block>>>(d_idata, d_odata, size);
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceNeighboredSmem: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
// reduce gmem
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
reduceGmem<<<grid.x, block>>>(d_idata, d_odata, size);
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceGmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x,
block.x);
// reduce smem
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
reduceSmem<<<grid.x, block>>>(d_idata, d_odata, size);
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceSmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x,
block.x);
// reduce smem
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
reduceSmemDyn<<<grid.x, block, blocksize*sizeof(int)>>>(d_idata, d_odata,
size);
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceSmemDyn : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x,
block.x);
// reduce gmem
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
reduceGmemUnroll<<<grid.x / 4, block>>>(d_idata, d_odata, size);
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i];
printf("reduceGmemUnroll4 : %d <<<grid %d block %d>>>\n", gpu_sum,
grid.x / 4, block.x);
// reduce smem
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
reduceSmemUnroll<<<grid.x / 4, block>>>(d_idata, d_odata, size);
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i];
printf("reduceSmemUnroll4 : %d <<<grid %d block %d>>>\n", gpu_sum,
grid.x / 4, block.x);
// reduce smem
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
reduceSmemUnrollDyn<<<grid.x / 4, block, DIM*sizeof(int)>>>(d_idata,
d_odata, size);
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i];
printf("reduceSmemDynUnroll4: %d <<<grid %d block %d>>>\n", gpu_sum,
grid.x / 4, block.x);
// 메모리 해제
free(h_idata);
free(h_odata);
CHECK(cudaFree(d_idata));
CHECK(cudaFree(d_odata));
CHECK(cudaDeviceReset());
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
__device__ void clock_block(clock_t *d_o, clock_t clock_count)
{
unsigned int start_clock = (unsigned int)clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count)
{
unsigned int end_clock = (unsigned int)clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
}
////////////////////////////////////////////////////////////////////////////////
// clock_block()을 호출하는 커널.
//두 커널이 동일한 스트림 상에서 의존하도록 한다.
__global__ void kernel_A(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
__global__ void kernel_B(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
__global__ void kernel_C(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
__global__ void kernel_D(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
int simpleHyperQ(int argc, char **argv)
{
int nstreams = 8; // 스트림 개수
float kernel_time = 10; // 커널이 실행될 ms 단위 시간
float elapsed_time;
int cuda_device = 0;
char * iname = "CUDA_DEVICE_MAX_CONNECTIONS";
setenv(iname, "4", 1); // 4 or 32
char *ivalue = getenv(iname);
printf("%s = %s\n", iname, ivalue);
cudaDeviceProp deviceProp;
CHECK(cudaGetDevice(&cuda_device));
CHECK(cudaGetDeviceProperties(&deviceProp, cuda_device));
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// 호스트 메모리 할당
clock_t *a = 0;
CHECK(cudaMallocHost((void **)&a, sizeof(clock_t)));
// 디바이스 메모리 할당
clock_t *d_a = 0;
CHECK(cudaMalloc((void **)&d_a, 2 * nstreams * sizeof(clock_t)));
// 스트림 객체에 대한 메모리 할당 및 생성
cudaStream_t *streams = (cudaStream_t *)malloc(nstreams * sizeof(cudaStream_t));
for (int i = 0; i < nstreams; i++)
{
CHECK(cudaStreamCreate(&(streams[i])));
}
// 이벤트 핸들러 생성
cudaEvent_t start_event, stop_event;
CHECK(cudaEventCreate(&start_event));
CHECK(cudaEventCreate(&stop_event));
// Target time per kernel = kernel_time ms, clockRate = in KHz
// Target number of clocks = target time * clock frequency
#if defined(__arm__) || defined(__aarch64__)
clock_t time_clocks = (clock_t)(kernel_time * (deviceProp.clockRate / 1000));
#else
clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate);
#endif
clock_t total_clocks = 0;
CHECK(cudaEventRecord(start_event, 0));
for (int i = 0; i < nstreams; ++i)
{
kernel_A <<<1, 1, 0, streams[i] >>>(&d_a[2 * i], time_clocks);
total_clocks += time_clocks;
kernel_B <<<1, 1, 0, streams[i] >>>(&d_a[2 * i + 1], time_clocks);
total_clocks += time_clocks;
kernel_C <<<1, 1, 0, streams[i] >>>(&d_a[2 * i], time_clocks);
total_clocks += time_clocks;
kernel_D <<<1, 1, 0, streams[i] >>>(&d_a[2 * i + 1], time_clocks);
total_clocks += time_clocks;
}
// 스트림 0 상의 중단 이벤트
CHECK(cudaEventRecord(stop_event, 0));
// 여기서 CPU는 GPU와 독립적으로 병렬 작업 수행 진행.
// 여기서는 모든 작업이 완료될 때까지 대기한다.
CHECK(cudaEventSynchronize(stop_event));
CHECK(cudaEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("Expected time for serial execution of %d sets of kernels is between approx. %.3fs and %.3fs\n", nstreams, (nstreams + 1) * kernel_time / 1000.0f, 2 * nstreams *kernel_time / 1000.0f);
printf("Expected time for fully concurrent execution of %d sets of kernels is approx. %.3fs\n", nstreams, 2 * kernel_time / 1000.0f);
printf("Measured time for sample = %.3fs\n", elapsed_time / 1000.0f);
bool bTestResult = (a[0] >= total_clocks);
for (int i = 0; i < nstreams; i++)
{
cudaStreamDestroy(streams[i]);
}
free(streams);
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
cudaFreeHost(a);
cudaFree(d_a);
return (bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
/////////////////////////////////////////////////////////////////////////////////
#define LOOP_COUNT 3000000
void CUDART_CB my_callback(cudaStream_t stream, cudaError_t status, void *data)
{
printf("callback from stream %d\n", *((int *)data));
}
__global__ void kernel_1()
{
double sum = 0.0;
for (int i = 0; i < LOOP_COUNT; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for (int i = 0; i < LOOP_COUNT; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for (int i = 0; i < LOOP_COUNT; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for (int i = 0; i < LOOP_COUNT; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int simpleCallback(int argc, char **argv)
{
int n_streams = 8;
if (argc > 2) n_streams = atoi(argv[2]);
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("> %s Starting...\n", argv[0]);
printf("> Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
printf("> Compute Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// 최대 연결 수 설정
char * iname = "CUDA_DEVICE_MAX_CONNECTIONS";
setenv(iname, "8", 1);
char *ivalue = getenv(iname);
printf("> %s = %s\n", iname, ivalue);
printf("> with streams = %d\n", n_streams);
// 스트림 할당 및 초기화
cudaStream_t *streams = (cudaStream_t *)malloc(n_streams * sizeof(
cudaStream_t));
for (int i = 0; i < n_streams; i++)
{
CHECK(cudaStreamCreate(&(streams[i])));
}
dim3 block(1);
dim3 grid(1);
cudaEvent_t start_event, stop_event;
CHECK(cudaEventCreate(&start_event));
CHECK(cudaEventCreate(&stop_event));
int stream_ids[4];
CHECK(cudaEventRecord(start_event, 0));
for (int i = 0; i < n_streams; i++)
{
stream_ids[i] = i;
kernel_1 <<<grid, block, 0, streams[i] >>>();
kernel_2 <<<grid, block, 0, streams[i] >>>();
kernel_3 <<<grid, block, 0, streams[i] >>>();
kernel_4 <<<grid, block, 0, streams[i] >>>();
CHECK(cudaStreamAddCallback(streams[i], my_callback,
(void *)(stream_ids + i), 0));
}
CHECK(cudaEventRecord(stop_event, 0));
CHECK(cudaEventSynchronize(stop_event));
float elapsed_time;
CHECK(cudaEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("Measured time for parallel execution = %.3fs\n", elapsed_time);
// 스트림 해제
for (int i = 0; i < n_streams; i++)
{
CHECK(cudaStreamDestroy(streams[i]));
}
free(streams);
CHECK(cudaDeviceReset());
return 0;
}
int main(int argc, char* argv[]){
int ex=0;
ex=atoi(argv[1]);
printf("run ex : %d\n",ex);
switch(ex){
case 1:{
printf("multithread\n");//stream
multithread();
break;
}
case 2:{
printf("coaleascing\n");
coaleascing(argc, argv);
break;
}
case 3:{
printf("shared_memory_reverse\n");//simple smem + sync
shared_memory_reverse();
break;
}
case 4:{
printf("reduceSum\n");
reduceSum(argc,argv);
break;
}
case 5:{
printf("smemSquare\n");//smem + sync
smemSquare(argc,argv);
break;
}
case 6:{
printf("simpleHyperQ\n");//hyper q
simpleHyperQ(argc,argv);
break;
}
case 7:{
printf("simpleCallback\n");//stream
simpleCallback(argc,argv);
break;
}
case 8:{
printf("async\n");//simple async memcpy
async(argc,argv);
break;
}
case 9:{
printf("data_transfer_pageable_vs_pinned\n");
data_transfer_pageable_vs_pinned();
break;
}
case 10:{
printf("overlap\n");//stream
overlap(argc,argv);
break;
}
}
return 0;
} |
877414c64108a4e17ad1943f41661819475f792f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Author : H.M.Gamaarachchi
C file for adding of 2 matrices
*/
/*#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <stdlib.h>
#include "myhelpers.h"
#define BLOCK 16*/
//adding kernel
__global__ void cuda_sub(float *dev_c,float *dev_a,float *dev_b,int width,int height){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x<width && y<height){
int n = y * width + x;
dev_c[n]=dev_a[n]-dev_b[n];
}
}
//addition abstraction
hipError_t substract(float *c,float *a,float *b,int width,int height){
float *dev_a=0;
float *dev_b=0;
float *dev_c=0;
hipError_t cudastatus;
//memory allocation
cudastatus=hipMalloc((void**)&dev_a,width*height*sizeof(float));
if (cudastatus!=hipSuccess)
return cudastatus;
cudastatus=hipMalloc((void**)&dev_b,width*height*sizeof(float));
if (cudastatus!=hipSuccess)
return cudastatus;
cudastatus=hipMalloc((void**)&dev_c,width*height*sizeof(float));
if (cudastatus!=hipSuccess)
return cudastatus;
//copying
cudastatus=hipMemcpy(dev_a,a,sizeof(float)*width*height,hipMemcpyHostToDevice);
if(cudastatus!=hipSuccess)
return cudastatus;
cudastatus=hipMemcpy(dev_b,b,sizeof(float)*width*height,hipMemcpyHostToDevice);
if(cudastatus!=hipSuccess)
return cudastatus;
cudastatus=hipMemcpy(dev_c,c,sizeof(float)*width*height,hipMemcpyHostToDevice);
if(cudastatus!=hipSuccess)
return cudastatus;
dim3 grid(ceil(width/(float)BLOCK),ceil(height/(float)BLOCK));
dim3 block(BLOCK,BLOCK);
//Time
//hipEvent_t start,stop;
//float elapsedtime;
//hipEventCreate(&start);
//hipEventRecord(start,0);
//function
hipLaunchKernelGGL(( cuda_sub), dim3(grid),dim3(block), 0, 0, dev_c,dev_a,dev_b,width,height);
checkCudaError(hipGetLastError());
hipDeviceSynchronize();
checkCudaError(hipGetLastError());
//Time
/*hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedtime,start,stop);
printf("Time spent for calculation in CUDA : %.10f s\n",elapsedtime/(float)1000);*/
cudastatus=hipGetLastError();
if (cudastatus!=hipSuccess)
return cudastatus;
//copyback
cudastatus=hipMemcpy(c,dev_c,width*height*sizeof(float),hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return cudastatus;
}
/*int main(int argc, char *argv[]){
int width,height;
//checking args and getting args
if(argc<5){
printf("Please enter all args eg: ./add file1.txt file2.txt rows cols ans.txt");
exit(1);
}
//char matf1[]=argv[1];
width=atoi(argv[4]);
height=atoi(argv[3]);
//allocating
float *mat1=(float *)malloc(width*height*sizeof(float));
isMemoryFull(mat1);
float *mat2=(float *)malloc(width*height*sizeof(float));
isMemoryFull(mat2);
float *ans=(float *)malloc(width*height*sizeof(float));
isMemoryFull(ans);
//reading files
int i,j;
FILE *fp;
fp=fopen(argv[1],"r");
isFileOK(fp);
for (i=0;i<width*height;i++){
fscanf(fp,"%f",&mat1[i]);
}
fclose(fp);
//printf("reading mat 1 finished\n");
fp=fopen(argv[2],"r");
isFileOK(fp);
for (i=0;i<width*height;i++){
fscanf(fp,"%f",&mat2[i]);
}
fclose(fp);
//printf("reading mat 2 finished\n");
//add
clock_t start=clock();
hipError_t status=add(ans,mat1,mat2,width,height);
checkCudaError(status);
clock_t stop=clock();
double cputime=(double)((stop-start)/(float)CLOCKS_PER_SEC);
printf("Time for calculation with memory transfer overhead : %1.10f s\n",cputime);
//writing to file
fp=fopen(argv[5],"w");
isFileOK(fp);
for (i=0;i<height;i++){
for (j=0;j<width;j++){
fprintf(fp,"%f ",ans[width*i+j]);
}
fprintf(fp,"\n");
}
fclose(fp);
return 0;
}*/
| 877414c64108a4e17ad1943f41661819475f792f.cu | /*
Author : H.M.Gamaarachchi
C file for adding of 2 matrices
*/
/*#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <stdlib.h>
#include "myhelpers.h"
#define BLOCK 16*/
//adding kernel
__global__ void cuda_sub(float *dev_c,float *dev_a,float *dev_b,int width,int height){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x<width && y<height){
int n = y * width + x;
dev_c[n]=dev_a[n]-dev_b[n];
}
}
//addition abstraction
cudaError_t substract(float *c,float *a,float *b,int width,int height){
float *dev_a=0;
float *dev_b=0;
float *dev_c=0;
cudaError_t cudastatus;
//memory allocation
cudastatus=cudaMalloc((void**)&dev_a,width*height*sizeof(float));
if (cudastatus!=cudaSuccess)
return cudastatus;
cudastatus=cudaMalloc((void**)&dev_b,width*height*sizeof(float));
if (cudastatus!=cudaSuccess)
return cudastatus;
cudastatus=cudaMalloc((void**)&dev_c,width*height*sizeof(float));
if (cudastatus!=cudaSuccess)
return cudastatus;
//copying
cudastatus=cudaMemcpy(dev_a,a,sizeof(float)*width*height,cudaMemcpyHostToDevice);
if(cudastatus!=cudaSuccess)
return cudastatus;
cudastatus=cudaMemcpy(dev_b,b,sizeof(float)*width*height,cudaMemcpyHostToDevice);
if(cudastatus!=cudaSuccess)
return cudastatus;
cudastatus=cudaMemcpy(dev_c,c,sizeof(float)*width*height,cudaMemcpyHostToDevice);
if(cudastatus!=cudaSuccess)
return cudastatus;
dim3 grid(ceil(width/(float)BLOCK),ceil(height/(float)BLOCK));
dim3 block(BLOCK,BLOCK);
//Time
//cudaEvent_t start,stop;
//float elapsedtime;
//cudaEventCreate(&start);
//cudaEventRecord(start,0);
//function
cuda_sub<<<grid,block>>>(dev_c,dev_a,dev_b,width,height);
checkCudaError(cudaGetLastError());
cudaDeviceSynchronize();
checkCudaError(cudaGetLastError());
//Time
/*cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedtime,start,stop);
printf("Time spent for calculation in CUDA : %.10f s\n",elapsedtime/(float)1000);*/
cudastatus=cudaGetLastError();
if (cudastatus!=cudaSuccess)
return cudastatus;
//copyback
cudastatus=cudaMemcpy(c,dev_c,width*height*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return cudastatus;
}
/*int main(int argc, char *argv[]){
int width,height;
//checking args and getting args
if(argc<5){
printf("Please enter all args eg: ./add file1.txt file2.txt rows cols ans.txt");
exit(1);
}
//char matf1[]=argv[1];
width=atoi(argv[4]);
height=atoi(argv[3]);
//allocating
float *mat1=(float *)malloc(width*height*sizeof(float));
isMemoryFull(mat1);
float *mat2=(float *)malloc(width*height*sizeof(float));
isMemoryFull(mat2);
float *ans=(float *)malloc(width*height*sizeof(float));
isMemoryFull(ans);
//reading files
int i,j;
FILE *fp;
fp=fopen(argv[1],"r");
isFileOK(fp);
for (i=0;i<width*height;i++){
fscanf(fp,"%f",&mat1[i]);
}
fclose(fp);
//printf("reading mat 1 finished\n");
fp=fopen(argv[2],"r");
isFileOK(fp);
for (i=0;i<width*height;i++){
fscanf(fp,"%f",&mat2[i]);
}
fclose(fp);
//printf("reading mat 2 finished\n");
//add
clock_t start=clock();
cudaError_t status=add(ans,mat1,mat2,width,height);
checkCudaError(status);
clock_t stop=clock();
double cputime=(double)((stop-start)/(float)CLOCKS_PER_SEC);
printf("Time for calculation with memory transfer overhead : %1.10f s\n",cputime);
//writing to file
fp=fopen(argv[5],"w");
isFileOK(fp);
for (i=0;i<height;i++){
for (j=0;j<width;j++){
fprintf(fp,"%f ",ans[width*i+j]);
}
fprintf(fp,"\n");
}
fclose(fp);
return 0;
}*/
|
86e5639aa9b78e774845fd208be8fb9dfdcf75b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHGeneral.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/TensorAccessor.h>
#if defined(__HIP_PLATFORM_HCC__)
constexpr int WARP_SIZE = 64;
#else
constexpr int WARP_SIZE = 32;
#endif
// The maximum number of threads in a block
#if defined(__HIP_PLATFORM_HCC__)
constexpr int MAX_BLOCK_SIZE = 256;
#else
constexpr int MAX_BLOCK_SIZE = 512;
#endif
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static int getNumThreads(int nElem) {
#if defined(__HIP_PLATFORM_HCC__)
int threadSizes[5] = { 16, 32, 64, 128, MAX_BLOCK_SIZE };
#else
int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE };
#endif
for (int i = 0; i != 5; ++i) {
if (nElem <= threadSizes[i]) {
return threadSizes[i];
}
}
return MAX_BLOCK_SIZE;
}
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
template <typename scalar_t, typename accscalar_t>
struct Float2 {
accscalar_t v1, v2;
__device__ Float2() {}
__device__ Float2(scalar_t v1, scalar_t v2) : v1(static_cast<accscalar_t>(v1)), v2(static_cast<accscalar_t>(v2)) {}
__device__ Float2(int v) : v1(static_cast<accscalar_t>(v)), v2(static_cast<accscalar_t>(v)) {}
__device__ Float2& operator+=(const Float2& a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
template <typename scalar_t, typename accscalar_t, typename PTA>
struct SumOp {
__device__ SumOp(const PTA& t) : tensor(t) {}
__device__ __forceinline__ accscalar_t operator()(int batch, int plane, int n) {
return static_cast<accscalar_t>(tensor[batch][plane][n]);
}
const PTA& tensor;
};
template <typename scalar_t, typename accscalar_t, typename PTA>
struct VarOp {
__device__ VarOp(accscalar_t m, const PTA& t) : mean(m), tensor(t) {}
__device__ __forceinline__ accscalar_t operator()(int batch, int plane, int n) {
accscalar_t val = tensor[batch][plane][n];
return (val - mean) * (val - mean);
}
const accscalar_t mean;
const PTA& tensor;
};
template <typename scalar_t, typename accscalar_t, typename PTA>
struct GradOp {
__device__ GradOp(accscalar_t m, const PTA& i, const PTA& g)
: mean(m), input(i), grad_output(g) {}
__device__ __forceinline__ Float2<scalar_t, accscalar_t> operator()(int batch, int plane, int n) {
accscalar_t g = grad_output[batch][plane][n];
accscalar_t c = static_cast<accscalar_t>(input[batch][plane][n]) - mean;
return Float2<scalar_t, accscalar_t>(g, g * c);
}
const accscalar_t mean;
const PTA& input;
const PTA& grad_output;
};
// Sum across all threads within a warp
template <typename T>
static __device__ __forceinline__ T warpSum(T val) {
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE);
}
return val;
}
template <typename scalar_t, typename accscalar_t>
static __device__ __forceinline__ Float2<scalar_t, accscalar_t> warpSum(Float2<scalar_t, accscalar_t> value) {
value.v1 = warpSum(value.v1);
value.v2 = warpSum(value.v2);
return value;
}
// Sum across (batch, x/y/z) applying Op() pointwise
// this works by first having each thread sum it's part
// of the data. Then there is a double-shuffeling reduction.
// First each warp (of WARP_SIZE threads) uses warpSum to reduce its
// data to the "warp leader", who writes its value into shared memory.
// Then a single warp reads the remaining (at most WARP_SIZE) items
// and reduces them using another warpSum.
// The implicit assumption is that there are no more
// than WARP_SIZE**2 threads.
template<typename scalar_t, typename Op, typename PTA>
__device__ scalar_t reduce(Op op, PTA tensor, int plane) {
// first the reductions each thread does separately
scalar_t sum = static_cast<scalar_t>(0);
for (int batch = threadIdx.y; batch < tensor.size(0); batch += blockDim.y) {
for (int x = threadIdx.x; x < tensor.size(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// first warpSum to get one value per thread to
// one value per warp
sum = warpSum(sum);
// this writes each warps item into shared memory
// there are at most WARP_SIZE items left because
// there are at most WARP_SIZE**2 threads at the beginning
__shared__ scalar_t shared[WARP_SIZE];
__syncthreads();
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (tid % WARP_SIZE == 0) {
shared[tid / WARP_SIZE] = sum;
}
if (tid >= blockDim.x * blockDim.y / WARP_SIZE && tid < WARP_SIZE) {
// zero out the other entries in shared
shared[tid] = (scalar_t)0;
}
__syncthreads();
// now have a second warpSum to reduce the intermediate values
// from shared memory to a single number. The very first
// thread writes it to shared memory.
if (tid / WARP_SIZE == 0) {
sum = warpSum(shared[tid]);
if (tid == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole grad_input
return shared[0];
}
template <typename scalar_t, typename accscalar_t, bool train, typename index_t>
__global__ void batch_norm_transform_input_kernel(
const at::PackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input,
at::PackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> output,
const at::PackedTensorAccessor<typename std::conditional<train, accscalar_t, scalar_t>::type, 1, at::RestrictPtrTraits, index_t> mean_,
const at::PackedTensorAccessor<typename std::conditional<train, accscalar_t, scalar_t>::type, 1, at::RestrictPtrTraits, index_t> var_or_std,
const at::PackedTensorAccessor<scalar_t, 1, at::RestrictPtrTraits, index_t> weight,
const at::PackedTensorAccessor<scalar_t, 1, at::RestrictPtrTraits, index_t> bias,
accscalar_t epsilon) {
index_t plane = blockIdx.x;
if (plane >= input.size(1)) {
return;
}
accscalar_t gamma = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : static_cast<accscalar_t>(1);
accscalar_t beta = bias.size(0) > 0 ? static_cast<accscalar_t>(bias[plane]) : static_cast<accscalar_t>(0);
accscalar_t mean = static_cast<accscalar_t>(mean_[plane]);
accscalar_t invstd = 1.0 / var_or_std[plane];
index_t bs = input.size(0);
index_t fs = input.size(2);
index_t bstep = blockDim.y * gridDim.y;
for (index_t batch = threadIdx.y + blockIdx.y * blockDim.y; batch < bs; batch += bstep) {
auto o = output[batch][plane];
auto i = input[batch][plane];
for (index_t feature = threadIdx.x; feature < fs; feature += blockDim.x) {
o[feature] = static_cast<scalar_t>(gamma * (i[feature] - mean) * invstd + beta);
}
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void batch_norm_collect_statistics_kernel(
const at::PackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input,
at::PackedTensorAccessor<accscalar_t, 1, at::RestrictPtrTraits, index_t> save_mean,
at::PackedTensorAccessor<accscalar_t, 1, at::RestrictPtrTraits, index_t> save_mean2) {
__shared__ int shared_n[2 * 2 * WARP_SIZE + WARP_SIZE];
int plane = blockIdx.x;
int N = input.size(0) * input.size(2);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
// Compute the mean and variance across (batch, x/y/z)
// this uses the Welford (in the for loop)/parallel algorithm (to sum across the block)
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_Online_algorithm
// and the parallel algorithm on the same page.
// We use two shuffles to reduce across the entire block.
// https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ has a description.
accscalar_t* shared_avg_var = (accscalar_t*) &shared_n[WARP_SIZE];
// first the reductions each thread does separately
accscalar_t avg = 0;
accscalar_t avg2 = 0;
int n = 0;
for (int batch = threadIdx.y; batch < input.size(0); batch += blockDim.y) {
for (int x = threadIdx.x; x < input.size(2); x += blockDim.x) {
accscalar_t v = input[batch][plane][x];
accscalar_t d1 = v - avg;
accscalar_t d2 = (v * v) - avg2;
n++;
avg += d1 / n;
avg2 += d2 / n;
}
}
// first warpSum to get one value per thread to
// one value per warp
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
accscalar_t o_avg = WARP_SHFL_XOR(avg, 1 << i, WARP_SIZE);
accscalar_t o_avg2 = WARP_SHFL_XOR(avg2, 1 << i, WARP_SIZE);
int o_n = WARP_SHFL_XOR(n, 1 << i, WARP_SIZE);
accscalar_t factor = 1.0 / fmaxf(1.0, n+o_n);
// var_n += WARP_SHFL_XOR(var_n, 1 << i, WARP_SIZE) + (avg - o_avg) * (avg - o_avg) * n * o_n * factor;
avg2 = (n * avg2 + o_n * o_avg2) * factor;
avg = (n * avg + o_n * o_avg) * factor;
n += o_n;
}
// this writes each warps item into shared memory
// there are at most WARP_SIZE items left because
// there are at most WARP_SIZE**2 threads at the beginning
__syncthreads();
if (tid % WARP_SIZE == 0) {
shared_n[tid / WARP_SIZE] = n;
shared_avg_var[tid / WARP_SIZE * 2] = avg;
shared_avg_var[tid / WARP_SIZE * 2 + 1] = avg2;
}
__syncthreads();
// now have a second warpSum to reduce the intermediate values
// from shared memory to a single number. The very first
// thread writes it to shared memory.
if (tid < WARP_SIZE) {
n = (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_n[tid] : 0);
avg = (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_avg_var[2 * tid] : 0);
avg2 = (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_avg_var[2 * tid + 1] : 0);
}
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
accscalar_t o_avg = WARP_SHFL_XOR(avg, 1 << i, WARP_SIZE);
accscalar_t o_avg2 = WARP_SHFL_XOR(avg2, 1 << i, WARP_SIZE);
int o_n = WARP_SHFL_XOR(n, 1 << i, WARP_SIZE);
accscalar_t factor = 1.0 / fmaxf(1.0, n+o_n);
// var_n += WARP_SHFL_XOR(var_n, 1 << i, WARP_SIZE) + (avg - o_avg) * (avg - o_avg) * n * o_n * factor;
avg2 = (n * avg2 + o_n * o_avg2) * factor;
avg = (n * avg + o_n * o_avg) * factor;
n += o_n;
}
// Save the mean, variance, and moving averages
if (tid == 0) {
/*
accscalar_t invstd = 0;
if (var_n != static_cast<accscalar_t>(0) || epsilon != static_cast<accscalar_t>(0)) {
invstd = static_cast<accscalar_t>(1) / device_sqrt(var_n / N + epsilon);
}
*/
save_mean[plane] = avg;
save_mean2[plane] = avg2;
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void batch_norm_collect_grad_statistics_kernel(
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> input,
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_output,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_weight,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_bias,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_ex,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_exs,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> weight,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_mean,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_std,
accscalar_t epsilon) {
index_t plane = blockIdx.x;
index_t N = grad_output.size(0) * grad_output.size(2);
accscalar_t mean, invstd;
mean = save_mean[plane];
invstd = 1.0 / save_std[plane];
/*
if (train) {
mean = save_mean[plane];
invstd = 1.0 / save_std[plane];
} else {
mean = static_cast<accscalar_t>(running_mean[plane]);
invstd = static_cast<accscalar_t>(1) / device_sqrt(static_cast<accscalar_t>(running_var[plane]) + epsilon);
}
*/
accscalar_t weight_val = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : accscalar_t(1);
// accscalar_t norm = accscalar_t(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(grad_output)
// 2. DotProduct(input - mean, grad_output)
GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>> g(mean, input, grad_output);
Float2<scalar_t, accscalar_t> res = reduce<Float2<scalar_t, accscalar_t>, GradOp<scalar_t, accscalar_t,
at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>>>(g, grad_output, plane);
accscalar_t grad_output_sum = res.v1;
accscalar_t dot_p = res.v2;
/*
accscalar_t grad_mean = grad_output_sum * norm;
accscalar_t proj_scale = dot_p * norm * invstd * invstd;
accscalar_t grad_scale = invstd * weight_val;
if (grad_input.data() != NULL) {
for (int batch = threadIdx.y; batch < grad_output.size(0); batch += blockDim.y) {
for (int x = threadIdx.x; x < grad_output.size(2); x += blockDim.x) {
scalar_t go = grad_output[batch][plane][x];
if (train) {
scalar_t inp = input[batch][plane][x];
accscalar_t proj = (inp - mean) * proj_scale;
grad_input[batch][plane][x] = static_cast<scalar_t>((go - proj - grad_mean) * grad_scale);
} else {
grad_input[batch][plane][x] = static_cast<scalar_t>(go * grad_scale);
}
}
}
}
*/
if (threadIdx.x == 0) {
grad_exs[plane] = static_cast<scalar_t>(dot_p * weight_val * (-0.5) * pow(invstd, 3.0));
grad_ex[plane] = static_cast<scalar_t>(grad_output_sum * weight_val * (-1.0) * invstd + \
dot_p * weight_val * pow(invstd, 3.0) * mean);
}
if (grad_weight.size(0) > 0) {
if (threadIdx.x == 0) {
// printf("dot_p = %f, invstd = %f\n", dot_p, invstd);
grad_weight[plane] = static_cast<scalar_t>(dot_p * invstd);
}
}
if (grad_bias.size(0) > 0) {
if (threadIdx.x == 0) {
grad_bias[plane] = static_cast<scalar_t>(grad_output_sum);
}
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void batch_norm_backward_kernel(
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> input,
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_output,
at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_input,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_weight,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_bias,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> weight,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> running_mean,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> running_var,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_mean,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_invstd,
bool train,
accscalar_t epsilon) {
index_t plane = blockIdx.x;
index_t N = grad_output.size(0) * grad_output.size(2);
accscalar_t mean, invstd;
if (train) {
mean = save_mean[plane];
invstd = save_invstd[plane];
} else {
mean = static_cast<accscalar_t>(running_mean[plane]);
invstd = static_cast<accscalar_t>(1) / device_sqrt(static_cast<accscalar_t>(running_var[plane]) + epsilon);
}
accscalar_t weight_val = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : accscalar_t(1);
accscalar_t norm = accscalar_t(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(grad_output)
// 2. DotProduct(input - mean, grad_output)
GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>> g(mean, input, grad_output);
Float2<scalar_t, accscalar_t> res = reduce<Float2<scalar_t, accscalar_t>, GradOp<scalar_t, accscalar_t,
at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>>>(g, grad_output, plane);
accscalar_t grad_output_sum = res.v1;
accscalar_t dot_p = res.v2;
accscalar_t grad_mean = grad_output_sum * norm;
accscalar_t proj_scale = dot_p * norm * invstd * invstd;
accscalar_t grad_scale = invstd * weight_val;
if (grad_input.data() != NULL) {
for (int batch = threadIdx.y; batch < grad_output.size(0); batch += blockDim.y) {
for (int x = threadIdx.x; x < grad_output.size(2); x += blockDim.x) {
scalar_t go = grad_output[batch][plane][x];
if (train) {
scalar_t inp = input[batch][plane][x];
accscalar_t proj = (inp - mean) * proj_scale;
grad_input[batch][plane][x] = static_cast<scalar_t>((go - proj - grad_mean) * grad_scale);
} else {
grad_input[batch][plane][x] = static_cast<scalar_t>(go * grad_scale);
}
}
}
}
if (grad_weight.size(0) > 0) {
if (threadIdx.x == 0) {
grad_weight[plane] = static_cast<scalar_t>(dot_p * invstd);
}
}
if (grad_bias.size(0) > 0) {
if (threadIdx.x == 0) {
grad_bias[plane] = static_cast<scalar_t>(grad_output_sum);
}
}
}
template <typename scalar_t, int64_t dim, template <typename U> class PtrTraits = at::DefaultPtrTraits, typename index_t = int64_t>
static at::PackedTensorAccessor<scalar_t, dim, PtrTraits, index_t> packed_accessor_or_dummy(const at::Tensor& t) {
if (! t.defined()) {
const std::vector<index_t> zeros(dim);
return at::PackedTensorAccessor<scalar_t, dim, PtrTraits, index_t>(nullptr, zeros.data(), zeros.data());
}
return t.packed_accessor<scalar_t, dim, PtrTraits, index_t>();
}
std::vector<at::Tensor> batch_norm_collect_statistics_cuda(
const at::Tensor input) {
// const auto batch_size = input.size(0);
const auto channel_size = input.size(1);
// const auto dim_size = input.size(2);
auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions
auto ex = at::empty({channel_size}, input.options());
auto exs = at::empty({channel_size}, input.options());
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const dim3 blocks(input_reshaped.size(1));
int tf = getNumThreads(input_reshaped.size(2));
dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE/tf));
AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_collect_statistics_cuda", ([&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (at::cuda::detail::canUse32BitIndexMath(input)) {
hipLaunchKernelGGL(( batch_norm_collect_statistics_kernel<scalar_t, accscalar_t, int32_t>), dim3(blocks), dim3(threads), 0, stream,
input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int32_t>(),
ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>(),
exs.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>());
} else {
hipLaunchKernelGGL(( batch_norm_collect_statistics_kernel<scalar_t, accscalar_t, int64_t>), dim3(blocks), dim3(threads), 0, stream,
input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int64_t>(),
ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>(),
exs.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>());
}
}));
THCudaCheck(hipGetLastError());
return {ex, exs};
}
at::Tensor batch_norm_transform_input_cuda(
const at::Tensor input,
const at::Tensor gamma,
const at::Tensor beta,
const at::Tensor ex,
const at::Tensor exs,
float eps) {
const auto channel_size = input.size(1);
auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions
auto output_reshaped = at::empty_like(input_reshaped);
auto std = (exs - ex * ex + eps).sqrt();
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int tf = std::max<int>(getNumThreads(input_reshaped.size(2)/4),
std::min<int>(getNumThreads(input_reshaped.size(2)), 64));
int tb = std::max<int>(64/tf, 1);
dim3 blocks_trans(input_reshaped.size(1), std::max<int>(1, std::min<int>((256*1024)/input_reshaped.size(1),
(input_reshaped.size(0)+tb-1)/tb)));
dim3 threads_trans(tf, tb);
AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_transform_input_cuda", ([&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (at::cuda::detail::canUse32BitIndexMath(input)) {
hipLaunchKernelGGL(( batch_norm_transform_input_kernel<scalar_t, accscalar_t, true, int32_t>), dim3(blocks_trans), dim3(threads_trans), 0, stream,
input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int32_t>(),
output_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int32_t>(),
ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>(),
std.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int32_t>(gamma),
packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int32_t>(beta),
eps);
} else {
hipLaunchKernelGGL(( batch_norm_transform_input_kernel<scalar_t, accscalar_t, true, int64_t>), dim3(blocks_trans), dim3(threads_trans), 0, stream,
input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int64_t>(),
output_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int64_t>(),
ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>(),
std.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int64_t>(gamma),
packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int64_t>(beta),
eps);
}
}));
THCudaCheck(hipGetLastError());
return output_reshaped.view(input.sizes());
}
std::vector<at::Tensor> batch_norm_collect_grad_statistics_cuda(
const at::Tensor input,
const at::Tensor grad_output,
const at::Tensor weight,
const at::Tensor ex,
const at::Tensor exs,
float eps) {
const auto channel_size = input.size(1);
auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions
auto grad_output_reshaped = grad_output.reshape(input_reshaped.sizes());
auto std = (exs - ex * ex + eps).sqrt();
auto grad_weight = at::empty_like(weight);
auto grad_bias = at::empty_like(weight);
auto grad_ex = at::empty_like(ex);
auto grad_exs = at::empty_like(exs);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const dim3 blocks(input_reshaped.size(1));
int tf = getNumThreads(input_reshaped.size(2));
dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE/tf));
AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_collect_grad_statistics_cuda", ([&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (at::cuda::detail::canUse32BitIndexMath(input)) {
hipLaunchKernelGGL(( batch_norm_collect_grad_statistics_kernel<scalar_t, accscalar_t, int32_t>), dim3(blocks), dim3(threads), 0, stream,
input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(),
grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_weight),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_bias),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_ex),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_exs),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(weight),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(ex),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(std),
eps);
} else {
hipLaunchKernelGGL(( batch_norm_collect_grad_statistics_kernel<scalar_t, accscalar_t, int64_t>), dim3(blocks), dim3(threads), 0, stream,
input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(),
grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_weight),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_bias),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_ex),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_exs),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(weight),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(ex),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(std),
eps);
}
}));
THCudaCheck(hipGetLastError());
return {grad_weight, grad_bias, grad_ex, grad_exs};
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void batch_norm_input_backward_kernel(
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> input,
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_output,
at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_input,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_ex,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_exs,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> weight,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_mean,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_invstd,
accscalar_t epsilon) {
index_t plane = blockIdx.x;
index_t N = grad_output.size(0) * grad_output.size(2);
// accscalar_t mean, invstd;
// mean = save_mean[plane];
accscalar_t invstd;
invstd = 1.0 / save_invstd[plane];
accscalar_t weight_val = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : accscalar_t(1);
accscalar_t norm = accscalar_t(1) / N;
/*
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(grad_output)
// 2. DotProduct(input - mean, grad_output)
GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>> g(mean, input, grad_output);
Float2<scalar_t, accscalar_t> res = reduce<Float2<scalar_t, accscalar_t>, GradOp<scalar_t, accscalar_t,
at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>>>(g, grad_output, plane);
accscalar_t grad_output_sum = res.v1;
accscalar_t dot_p = res.v2;
accscalar_t grad_mean = grad_output_sum * norm;
accscalar_t proj_scale = dot_p * norm * invstd * invstd;
accscalar_t grad_scale = invstd * weight_val;
*/
if (grad_input.data() != NULL) {
for (int batch = threadIdx.y; batch < grad_output.size(0); batch += blockDim.y) {
for (int x = threadIdx.x; x < grad_output.size(2); x += blockDim.x) {
grad_input[batch][plane][x] =
static_cast<scalar_t>(grad_output[batch][plane][x] * invstd * weight_val + grad_exs[plane] * 2.0 * input[batch][plane][x] * norm + \
grad_ex[plane] * norm);
}
}
}
}
at::Tensor batch_norm_input_backward_cuda(
const at::Tensor input,
const at::Tensor grad_output,
const at::Tensor weight,
const at::Tensor ex,
const at::Tensor exs,
const at::Tensor grad_ex,
const at::Tensor grad_exs,
float eps) {
auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions
auto grad_output_reshaped = grad_output.reshape(input_reshaped.sizes());
auto std = (exs - ex * ex + eps).sqrt();
auto grad_input = at::empty_like(input);
auto grad_input_reshaped = grad_input.view(input_reshaped.sizes());
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const dim3 blocks(input_reshaped.size(1));
int tf = getNumThreads(input_reshaped.size(2));
dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE/tf));
AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_input_backward_cuda", ([&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (at::cuda::detail::canUse32BitIndexMath(input)) {
hipLaunchKernelGGL(( batch_norm_input_backward_kernel<scalar_t, accscalar_t, int32_t>), dim3(blocks), dim3(threads), 0, stream,
input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(),
grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(),
packed_accessor_or_dummy<scalar_t, 3, at::DefaultPtrTraits, int32_t>(grad_input_reshaped),
grad_ex.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int32_t>(),
grad_exs.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int32_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(weight),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(ex),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(std),
eps);
} else {
hipLaunchKernelGGL(( batch_norm_input_backward_kernel<scalar_t, accscalar_t, int64_t>), dim3(blocks), dim3(threads), 0, stream,
input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(),
grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(),
packed_accessor_or_dummy<scalar_t, 3, at::DefaultPtrTraits, int64_t>(grad_input_reshaped),
grad_ex.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int64_t>(),
grad_exs.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int64_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(weight),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(ex),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(std),
eps);
}
}));
THCudaCheck(hipGetLastError());
return grad_input;
} | 86e5639aa9b78e774845fd208be8fb9dfdcf75b4.cu | #include <vector>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <math.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCGeneral.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/TensorAccessor.h>
#if defined(__HIP_PLATFORM_HCC__)
constexpr int WARP_SIZE = 64;
#else
constexpr int WARP_SIZE = 32;
#endif
// The maximum number of threads in a block
#if defined(__HIP_PLATFORM_HCC__)
constexpr int MAX_BLOCK_SIZE = 256;
#else
constexpr int MAX_BLOCK_SIZE = 512;
#endif
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static int getNumThreads(int nElem) {
#if defined(__HIP_PLATFORM_HCC__)
int threadSizes[5] = { 16, 32, 64, 128, MAX_BLOCK_SIZE };
#else
int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE };
#endif
for (int i = 0; i != 5; ++i) {
if (nElem <= threadSizes[i]) {
return threadSizes[i];
}
}
return MAX_BLOCK_SIZE;
}
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
template <typename scalar_t, typename accscalar_t>
struct Float2 {
accscalar_t v1, v2;
__device__ Float2() {}
__device__ Float2(scalar_t v1, scalar_t v2) : v1(static_cast<accscalar_t>(v1)), v2(static_cast<accscalar_t>(v2)) {}
__device__ Float2(int v) : v1(static_cast<accscalar_t>(v)), v2(static_cast<accscalar_t>(v)) {}
__device__ Float2& operator+=(const Float2& a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
template <typename scalar_t, typename accscalar_t, typename PTA>
struct SumOp {
__device__ SumOp(const PTA& t) : tensor(t) {}
__device__ __forceinline__ accscalar_t operator()(int batch, int plane, int n) {
return static_cast<accscalar_t>(tensor[batch][plane][n]);
}
const PTA& tensor;
};
template <typename scalar_t, typename accscalar_t, typename PTA>
struct VarOp {
__device__ VarOp(accscalar_t m, const PTA& t) : mean(m), tensor(t) {}
__device__ __forceinline__ accscalar_t operator()(int batch, int plane, int n) {
accscalar_t val = tensor[batch][plane][n];
return (val - mean) * (val - mean);
}
const accscalar_t mean;
const PTA& tensor;
};
template <typename scalar_t, typename accscalar_t, typename PTA>
struct GradOp {
__device__ GradOp(accscalar_t m, const PTA& i, const PTA& g)
: mean(m), input(i), grad_output(g) {}
__device__ __forceinline__ Float2<scalar_t, accscalar_t> operator()(int batch, int plane, int n) {
accscalar_t g = grad_output[batch][plane][n];
accscalar_t c = static_cast<accscalar_t>(input[batch][plane][n]) - mean;
return Float2<scalar_t, accscalar_t>(g, g * c);
}
const accscalar_t mean;
const PTA& input;
const PTA& grad_output;
};
// Sum across all threads within a warp
template <typename T>
static __device__ __forceinline__ T warpSum(T val) {
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE);
}
return val;
}
template <typename scalar_t, typename accscalar_t>
static __device__ __forceinline__ Float2<scalar_t, accscalar_t> warpSum(Float2<scalar_t, accscalar_t> value) {
value.v1 = warpSum(value.v1);
value.v2 = warpSum(value.v2);
return value;
}
// Sum across (batch, x/y/z) applying Op() pointwise
// this works by first having each thread sum it's part
// of the data. Then there is a double-shuffeling reduction.
// First each warp (of WARP_SIZE threads) uses warpSum to reduce its
// data to the "warp leader", who writes its value into shared memory.
// Then a single warp reads the remaining (at most WARP_SIZE) items
// and reduces them using another warpSum.
// The implicit assumption is that there are no more
// than WARP_SIZE**2 threads.
template<typename scalar_t, typename Op, typename PTA>
__device__ scalar_t reduce(Op op, PTA tensor, int plane) {
// first the reductions each thread does separately
scalar_t sum = static_cast<scalar_t>(0);
for (int batch = threadIdx.y; batch < tensor.size(0); batch += blockDim.y) {
for (int x = threadIdx.x; x < tensor.size(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// first warpSum to get one value per thread to
// one value per warp
sum = warpSum(sum);
// this writes each warps item into shared memory
// there are at most WARP_SIZE items left because
// there are at most WARP_SIZE**2 threads at the beginning
__shared__ scalar_t shared[WARP_SIZE];
__syncthreads();
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (tid % WARP_SIZE == 0) {
shared[tid / WARP_SIZE] = sum;
}
if (tid >= blockDim.x * blockDim.y / WARP_SIZE && tid < WARP_SIZE) {
// zero out the other entries in shared
shared[tid] = (scalar_t)0;
}
__syncthreads();
// now have a second warpSum to reduce the intermediate values
// from shared memory to a single number. The very first
// thread writes it to shared memory.
if (tid / WARP_SIZE == 0) {
sum = warpSum(shared[tid]);
if (tid == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole grad_input
return shared[0];
}
template <typename scalar_t, typename accscalar_t, bool train, typename index_t>
__global__ void batch_norm_transform_input_kernel(
const at::PackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input,
at::PackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> output,
const at::PackedTensorAccessor<typename std::conditional<train, accscalar_t, scalar_t>::type, 1, at::RestrictPtrTraits, index_t> mean_,
const at::PackedTensorAccessor<typename std::conditional<train, accscalar_t, scalar_t>::type, 1, at::RestrictPtrTraits, index_t> var_or_std,
const at::PackedTensorAccessor<scalar_t, 1, at::RestrictPtrTraits, index_t> weight,
const at::PackedTensorAccessor<scalar_t, 1, at::RestrictPtrTraits, index_t> bias,
accscalar_t epsilon) {
index_t plane = blockIdx.x;
if (plane >= input.size(1)) {
return;
}
accscalar_t gamma = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : static_cast<accscalar_t>(1);
accscalar_t beta = bias.size(0) > 0 ? static_cast<accscalar_t>(bias[plane]) : static_cast<accscalar_t>(0);
accscalar_t mean = static_cast<accscalar_t>(mean_[plane]);
accscalar_t invstd = 1.0 / var_or_std[plane];
index_t bs = input.size(0);
index_t fs = input.size(2);
index_t bstep = blockDim.y * gridDim.y;
for (index_t batch = threadIdx.y + blockIdx.y * blockDim.y; batch < bs; batch += bstep) {
auto o = output[batch][plane];
auto i = input[batch][plane];
for (index_t feature = threadIdx.x; feature < fs; feature += blockDim.x) {
o[feature] = static_cast<scalar_t>(gamma * (i[feature] - mean) * invstd + beta);
}
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void batch_norm_collect_statistics_kernel(
const at::PackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input,
at::PackedTensorAccessor<accscalar_t, 1, at::RestrictPtrTraits, index_t> save_mean,
at::PackedTensorAccessor<accscalar_t, 1, at::RestrictPtrTraits, index_t> save_mean2) {
__shared__ int shared_n[2 * 2 * WARP_SIZE + WARP_SIZE];
int plane = blockIdx.x;
int N = input.size(0) * input.size(2);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
// Compute the mean and variance across (batch, x/y/z)
// this uses the Welford (in the for loop)/parallel algorithm (to sum across the block)
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_Online_algorithm
// and the parallel algorithm on the same page.
// We use two shuffles to reduce across the entire block.
// https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ has a description.
accscalar_t* shared_avg_var = (accscalar_t*) &shared_n[WARP_SIZE];
// first the reductions each thread does separately
accscalar_t avg = 0;
accscalar_t avg2 = 0;
int n = 0;
for (int batch = threadIdx.y; batch < input.size(0); batch += blockDim.y) {
for (int x = threadIdx.x; x < input.size(2); x += blockDim.x) {
accscalar_t v = input[batch][plane][x];
accscalar_t d1 = v - avg;
accscalar_t d2 = (v * v) - avg2;
n++;
avg += d1 / n;
avg2 += d2 / n;
}
}
// first warpSum to get one value per thread to
// one value per warp
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
accscalar_t o_avg = WARP_SHFL_XOR(avg, 1 << i, WARP_SIZE);
accscalar_t o_avg2 = WARP_SHFL_XOR(avg2, 1 << i, WARP_SIZE);
int o_n = WARP_SHFL_XOR(n, 1 << i, WARP_SIZE);
accscalar_t factor = 1.0 / fmaxf(1.0, n+o_n);
// var_n += WARP_SHFL_XOR(var_n, 1 << i, WARP_SIZE) + (avg - o_avg) * (avg - o_avg) * n * o_n * factor;
avg2 = (n * avg2 + o_n * o_avg2) * factor;
avg = (n * avg + o_n * o_avg) * factor;
n += o_n;
}
// this writes each warps item into shared memory
// there are at most WARP_SIZE items left because
// there are at most WARP_SIZE**2 threads at the beginning
__syncthreads();
if (tid % WARP_SIZE == 0) {
shared_n[tid / WARP_SIZE] = n;
shared_avg_var[tid / WARP_SIZE * 2] = avg;
shared_avg_var[tid / WARP_SIZE * 2 + 1] = avg2;
}
__syncthreads();
// now have a second warpSum to reduce the intermediate values
// from shared memory to a single number. The very first
// thread writes it to shared memory.
if (tid < WARP_SIZE) {
n = (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_n[tid] : 0);
avg = (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_avg_var[2 * tid] : 0);
avg2 = (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_avg_var[2 * tid + 1] : 0);
}
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
accscalar_t o_avg = WARP_SHFL_XOR(avg, 1 << i, WARP_SIZE);
accscalar_t o_avg2 = WARP_SHFL_XOR(avg2, 1 << i, WARP_SIZE);
int o_n = WARP_SHFL_XOR(n, 1 << i, WARP_SIZE);
accscalar_t factor = 1.0 / fmaxf(1.0, n+o_n);
// var_n += WARP_SHFL_XOR(var_n, 1 << i, WARP_SIZE) + (avg - o_avg) * (avg - o_avg) * n * o_n * factor;
avg2 = (n * avg2 + o_n * o_avg2) * factor;
avg = (n * avg + o_n * o_avg) * factor;
n += o_n;
}
// Save the mean, variance, and moving averages
if (tid == 0) {
/*
accscalar_t invstd = 0;
if (var_n != static_cast<accscalar_t>(0) || epsilon != static_cast<accscalar_t>(0)) {
invstd = static_cast<accscalar_t>(1) / device_sqrt(var_n / N + epsilon);
}
*/
save_mean[plane] = avg;
save_mean2[plane] = avg2;
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void batch_norm_collect_grad_statistics_kernel(
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> input,
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_output,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_weight,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_bias,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_ex,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_exs,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> weight,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_mean,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_std,
accscalar_t epsilon) {
index_t plane = blockIdx.x;
index_t N = grad_output.size(0) * grad_output.size(2);
accscalar_t mean, invstd;
mean = save_mean[plane];
invstd = 1.0 / save_std[plane];
/*
if (train) {
mean = save_mean[plane];
invstd = 1.0 / save_std[plane];
} else {
mean = static_cast<accscalar_t>(running_mean[plane]);
invstd = static_cast<accscalar_t>(1) / device_sqrt(static_cast<accscalar_t>(running_var[plane]) + epsilon);
}
*/
accscalar_t weight_val = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : accscalar_t(1);
// accscalar_t norm = accscalar_t(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(grad_output)
// 2. DotProduct(input - mean, grad_output)
GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>> g(mean, input, grad_output);
Float2<scalar_t, accscalar_t> res = reduce<Float2<scalar_t, accscalar_t>, GradOp<scalar_t, accscalar_t,
at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>>>(g, grad_output, plane);
accscalar_t grad_output_sum = res.v1;
accscalar_t dot_p = res.v2;
/*
accscalar_t grad_mean = grad_output_sum * norm;
accscalar_t proj_scale = dot_p * norm * invstd * invstd;
accscalar_t grad_scale = invstd * weight_val;
if (grad_input.data() != NULL) {
for (int batch = threadIdx.y; batch < grad_output.size(0); batch += blockDim.y) {
for (int x = threadIdx.x; x < grad_output.size(2); x += blockDim.x) {
scalar_t go = grad_output[batch][plane][x];
if (train) {
scalar_t inp = input[batch][plane][x];
accscalar_t proj = (inp - mean) * proj_scale;
grad_input[batch][plane][x] = static_cast<scalar_t>((go - proj - grad_mean) * grad_scale);
} else {
grad_input[batch][plane][x] = static_cast<scalar_t>(go * grad_scale);
}
}
}
}
*/
if (threadIdx.x == 0) {
grad_exs[plane] = static_cast<scalar_t>(dot_p * weight_val * (-0.5) * pow(invstd, 3.0));
grad_ex[plane] = static_cast<scalar_t>(grad_output_sum * weight_val * (-1.0) * invstd + \
dot_p * weight_val * pow(invstd, 3.0) * mean);
}
if (grad_weight.size(0) > 0) {
if (threadIdx.x == 0) {
// printf("dot_p = %f, invstd = %f\n", dot_p, invstd);
grad_weight[plane] = static_cast<scalar_t>(dot_p * invstd);
}
}
if (grad_bias.size(0) > 0) {
if (threadIdx.x == 0) {
grad_bias[plane] = static_cast<scalar_t>(grad_output_sum);
}
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void batch_norm_backward_kernel(
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> input,
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_output,
at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_input,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_weight,
at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_bias,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> weight,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> running_mean,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> running_var,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_mean,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_invstd,
bool train,
accscalar_t epsilon) {
index_t plane = blockIdx.x;
index_t N = grad_output.size(0) * grad_output.size(2);
accscalar_t mean, invstd;
if (train) {
mean = save_mean[plane];
invstd = save_invstd[plane];
} else {
mean = static_cast<accscalar_t>(running_mean[plane]);
invstd = static_cast<accscalar_t>(1) / device_sqrt(static_cast<accscalar_t>(running_var[plane]) + epsilon);
}
accscalar_t weight_val = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : accscalar_t(1);
accscalar_t norm = accscalar_t(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(grad_output)
// 2. DotProduct(input - mean, grad_output)
GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>> g(mean, input, grad_output);
Float2<scalar_t, accscalar_t> res = reduce<Float2<scalar_t, accscalar_t>, GradOp<scalar_t, accscalar_t,
at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>>>(g, grad_output, plane);
accscalar_t grad_output_sum = res.v1;
accscalar_t dot_p = res.v2;
accscalar_t grad_mean = grad_output_sum * norm;
accscalar_t proj_scale = dot_p * norm * invstd * invstd;
accscalar_t grad_scale = invstd * weight_val;
if (grad_input.data() != NULL) {
for (int batch = threadIdx.y; batch < grad_output.size(0); batch += blockDim.y) {
for (int x = threadIdx.x; x < grad_output.size(2); x += blockDim.x) {
scalar_t go = grad_output[batch][plane][x];
if (train) {
scalar_t inp = input[batch][plane][x];
accscalar_t proj = (inp - mean) * proj_scale;
grad_input[batch][plane][x] = static_cast<scalar_t>((go - proj - grad_mean) * grad_scale);
} else {
grad_input[batch][plane][x] = static_cast<scalar_t>(go * grad_scale);
}
}
}
}
if (grad_weight.size(0) > 0) {
if (threadIdx.x == 0) {
grad_weight[plane] = static_cast<scalar_t>(dot_p * invstd);
}
}
if (grad_bias.size(0) > 0) {
if (threadIdx.x == 0) {
grad_bias[plane] = static_cast<scalar_t>(grad_output_sum);
}
}
}
template <typename scalar_t, int64_t dim, template <typename U> class PtrTraits = at::DefaultPtrTraits, typename index_t = int64_t>
static at::PackedTensorAccessor<scalar_t, dim, PtrTraits, index_t> packed_accessor_or_dummy(const at::Tensor& t) {
if (! t.defined()) {
const std::vector<index_t> zeros(dim);
return at::PackedTensorAccessor<scalar_t, dim, PtrTraits, index_t>(nullptr, zeros.data(), zeros.data());
}
return t.packed_accessor<scalar_t, dim, PtrTraits, index_t>();
}
std::vector<at::Tensor> batch_norm_collect_statistics_cuda(
const at::Tensor input) {
// const auto batch_size = input.size(0);
const auto channel_size = input.size(1);
// const auto dim_size = input.size(2);
auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions
auto ex = at::empty({channel_size}, input.options());
auto exs = at::empty({channel_size}, input.options());
auto stream = at::cuda::getCurrentCUDAStream();
const dim3 blocks(input_reshaped.size(1));
int tf = getNumThreads(input_reshaped.size(2));
dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE/tf));
AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_collect_statistics_cuda", ([&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (at::cuda::detail::canUse32BitIndexMath(input)) {
batch_norm_collect_statistics_kernel<scalar_t, accscalar_t, int32_t><<<blocks, threads, 0, stream>>>(
input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int32_t>(),
ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>(),
exs.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>());
} else {
batch_norm_collect_statistics_kernel<scalar_t, accscalar_t, int64_t><<<blocks, threads, 0, stream>>>(
input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int64_t>(),
ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>(),
exs.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>());
}
}));
THCudaCheck(cudaGetLastError());
return {ex, exs};
}
at::Tensor batch_norm_transform_input_cuda(
const at::Tensor input,
const at::Tensor gamma,
const at::Tensor beta,
const at::Tensor ex,
const at::Tensor exs,
float eps) {
const auto channel_size = input.size(1);
auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions
auto output_reshaped = at::empty_like(input_reshaped);
auto std = (exs - ex * ex + eps).sqrt();
auto stream = at::cuda::getCurrentCUDAStream();
int tf = std::max<int>(getNumThreads(input_reshaped.size(2)/4),
std::min<int>(getNumThreads(input_reshaped.size(2)), 64));
int tb = std::max<int>(64/tf, 1);
dim3 blocks_trans(input_reshaped.size(1), std::max<int>(1, std::min<int>((256*1024)/input_reshaped.size(1),
(input_reshaped.size(0)+tb-1)/tb)));
dim3 threads_trans(tf, tb);
AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_transform_input_cuda", ([&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (at::cuda::detail::canUse32BitIndexMath(input)) {
batch_norm_transform_input_kernel<scalar_t, accscalar_t, true, int32_t><<<blocks_trans, threads_trans, 0, stream>>>(
input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int32_t>(),
output_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int32_t>(),
ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>(),
std.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int32_t>(gamma),
packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int32_t>(beta),
eps);
} else {
batch_norm_transform_input_kernel<scalar_t, accscalar_t, true, int64_t><<<blocks_trans, threads_trans, 0, stream>>>(
input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int64_t>(),
output_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int64_t>(),
ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>(),
std.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int64_t>(gamma),
packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int64_t>(beta),
eps);
}
}));
THCudaCheck(cudaGetLastError());
return output_reshaped.view(input.sizes());
}
std::vector<at::Tensor> batch_norm_collect_grad_statistics_cuda(
const at::Tensor input,
const at::Tensor grad_output,
const at::Tensor weight,
const at::Tensor ex,
const at::Tensor exs,
float eps) {
const auto channel_size = input.size(1);
auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions
auto grad_output_reshaped = grad_output.reshape(input_reshaped.sizes());
auto std = (exs - ex * ex + eps).sqrt();
auto grad_weight = at::empty_like(weight);
auto grad_bias = at::empty_like(weight);
auto grad_ex = at::empty_like(ex);
auto grad_exs = at::empty_like(exs);
auto stream = at::cuda::getCurrentCUDAStream();
const dim3 blocks(input_reshaped.size(1));
int tf = getNumThreads(input_reshaped.size(2));
dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE/tf));
AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_collect_grad_statistics_cuda", ([&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (at::cuda::detail::canUse32BitIndexMath(input)) {
batch_norm_collect_grad_statistics_kernel<scalar_t, accscalar_t, int32_t><<<blocks, threads, 0, stream>>>(
input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(),
grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_weight),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_bias),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_ex),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_exs),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(weight),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(ex),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(std),
eps);
} else {
batch_norm_collect_grad_statistics_kernel<scalar_t, accscalar_t, int64_t><<<blocks, threads, 0, stream>>>(
input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(),
grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_weight),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_bias),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_ex),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_exs),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(weight),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(ex),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(std),
eps);
}
}));
THCudaCheck(cudaGetLastError());
return {grad_weight, grad_bias, grad_ex, grad_exs};
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void batch_norm_input_backward_kernel(
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> input,
const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_output,
at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_input,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_ex,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_exs,
const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> weight,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_mean,
const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_invstd,
accscalar_t epsilon) {
index_t plane = blockIdx.x;
index_t N = grad_output.size(0) * grad_output.size(2);
// accscalar_t mean, invstd;
// mean = save_mean[plane];
accscalar_t invstd;
invstd = 1.0 / save_invstd[plane];
accscalar_t weight_val = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : accscalar_t(1);
accscalar_t norm = accscalar_t(1) / N;
/*
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(grad_output)
// 2. DotProduct(input - mean, grad_output)
GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>> g(mean, input, grad_output);
Float2<scalar_t, accscalar_t> res = reduce<Float2<scalar_t, accscalar_t>, GradOp<scalar_t, accscalar_t,
at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>>>(g, grad_output, plane);
accscalar_t grad_output_sum = res.v1;
accscalar_t dot_p = res.v2;
accscalar_t grad_mean = grad_output_sum * norm;
accscalar_t proj_scale = dot_p * norm * invstd * invstd;
accscalar_t grad_scale = invstd * weight_val;
*/
if (grad_input.data() != NULL) {
for (int batch = threadIdx.y; batch < grad_output.size(0); batch += blockDim.y) {
for (int x = threadIdx.x; x < grad_output.size(2); x += blockDim.x) {
grad_input[batch][plane][x] =
static_cast<scalar_t>(grad_output[batch][plane][x] * invstd * weight_val + grad_exs[plane] * 2.0 * input[batch][plane][x] * norm + \
grad_ex[plane] * norm);
}
}
}
}
at::Tensor batch_norm_input_backward_cuda(
const at::Tensor input,
const at::Tensor grad_output,
const at::Tensor weight,
const at::Tensor ex,
const at::Tensor exs,
const at::Tensor grad_ex,
const at::Tensor grad_exs,
float eps) {
auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions
auto grad_output_reshaped = grad_output.reshape(input_reshaped.sizes());
auto std = (exs - ex * ex + eps).sqrt();
auto grad_input = at::empty_like(input);
auto grad_input_reshaped = grad_input.view(input_reshaped.sizes());
auto stream = at::cuda::getCurrentCUDAStream();
const dim3 blocks(input_reshaped.size(1));
int tf = getNumThreads(input_reshaped.size(2));
dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE/tf));
AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_input_backward_cuda", ([&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (at::cuda::detail::canUse32BitIndexMath(input)) {
batch_norm_input_backward_kernel<scalar_t, accscalar_t, int32_t><<<blocks, threads, 0, stream>>>(
input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(),
grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(),
packed_accessor_or_dummy<scalar_t, 3, at::DefaultPtrTraits, int32_t>(grad_input_reshaped),
grad_ex.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int32_t>(),
grad_exs.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int32_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(weight),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(ex),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(std),
eps);
} else {
batch_norm_input_backward_kernel<scalar_t, accscalar_t, int64_t><<<blocks, threads, 0, stream>>>(
input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(),
grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(),
packed_accessor_or_dummy<scalar_t, 3, at::DefaultPtrTraits, int64_t>(grad_input_reshaped),
grad_ex.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int64_t>(),
grad_exs.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int64_t>(),
packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(weight),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(ex),
packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(std),
eps);
}
}));
THCudaCheck(cudaGetLastError());
return grad_input;
} |
3efc7b0f759ce8800adfd6efc83cfb2aa3d08524.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/Context.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
template <typename scalar_t>
__global__ void kernel_resample2d_update_output(const int n,
const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t val = 0.0f;
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - floor(xf); // alpha
scalar_t beta = yf - floor(yf); // beta
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
val += static_cast<float>((1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx));
val += static_cast<float>((alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx));
val += static_cast<float>((1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx));
val += static_cast<float>((alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx));
}
}
output[index] = val;
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input1(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(gradOutput_size);
int dim_c = DIM1(gradOutput_size);
int dim_h = DIM2(gradOutput_size);
int dim_w = DIM3(gradOutput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - int(xf); // alpha
scalar_t beta = yf - int(yf); // beta
int idim_h = DIM2(input1_size);
int idim_w = DIM3(input1_size);
int xL = max(min( int (floor(xf)), idim_w-1), 0);
int xR = max(min( int (floor(xf)+1), idim_w -1), 0);
int yT = max(min( int (floor(yf)), idim_h-1), 0);
int yB = max(min( int (floor(yf)+1), idim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
}
}
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input2(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t output = 0.0;
int kernel_rad = (kernel_size - 1)/2;
int dim_b = DIM0(gradInput_size);
int dim_c = DIM1(gradInput_size);
int dim_h = DIM2(gradInput_size);
int dim_w = DIM3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int odim_c = DIM1(gradOutput_size);
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
if (c % 2) {
float gamma = 1 - (xf - floor(xf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
}
}
}
}
else {
float gamma = 1 - (yf - floor(yf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
}
}
}
}
gradInput[index] = output;
}
void resample2d_kernel_forward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& output,
int kernel_size) {
int n = output.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3));
const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3));
// TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] {
hipLaunchKernelGGL(( kernel_resample2d_update_output<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
output.data<float>(),
output_size,
output_stride,
kernel_size);
// }));
// TODO: ATen-equivalent check
// THCudaCheck(hipGetLastError());
}
void resample2d_kernel_backward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& gradOutput,
at::Tensor& gradInput1,
at::Tensor& gradInput2,
int kernel_size) {
int n = gradOutput.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3));
const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3));
const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3));
const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3));
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] {
hipLaunchKernelGGL(( kernel_resample2d_backward_input1<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput1.data<float>(),
gradInput1_size,
gradInput1_stride,
kernel_size
);
// }));
const long4 gradInput2_size = make_long4(gradInput2.size(0), gradInput2.size(1), gradInput2.size(2), gradInput2.size(3));
const long4 gradInput2_stride = make_long4(gradInput2.stride(0), gradInput2.stride(1), gradInput2.stride(2), gradInput2.stride(3));
n = gradInput2.numel();
// AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] {
hipLaunchKernelGGL(( kernel_resample2d_backward_input2<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput2.data<float>(),
gradInput2_size,
gradInput2_stride,
kernel_size
);
// }));
// TODO: Use the ATen equivalent to get last error
// THCudaCheck(hipGetLastError());
}
| 3efc7b0f759ce8800adfd6efc83cfb2aa3d08524.cu | #include <ATen/ATen.h>
#include <ATen/Context.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
template <typename scalar_t>
__global__ void kernel_resample2d_update_output(const int n,
const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t val = 0.0f;
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - floor(xf); // alpha
scalar_t beta = yf - floor(yf); // beta
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
val += static_cast<float>((1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx));
val += static_cast<float>((alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx));
val += static_cast<float>((1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx));
val += static_cast<float>((alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx));
}
}
output[index] = val;
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input1(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(gradOutput_size);
int dim_c = DIM1(gradOutput_size);
int dim_h = DIM2(gradOutput_size);
int dim_w = DIM3(gradOutput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - int(xf); // alpha
scalar_t beta = yf - int(yf); // beta
int idim_h = DIM2(input1_size);
int idim_w = DIM3(input1_size);
int xL = max(min( int (floor(xf)), idim_w-1), 0);
int xR = max(min( int (floor(xf)+1), idim_w -1), 0);
int yT = max(min( int (floor(yf)), idim_h-1), 0);
int yB = max(min( int (floor(yf)+1), idim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
}
}
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input2(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t output = 0.0;
int kernel_rad = (kernel_size - 1)/2;
int dim_b = DIM0(gradInput_size);
int dim_c = DIM1(gradInput_size);
int dim_h = DIM2(gradInput_size);
int dim_w = DIM3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int odim_c = DIM1(gradOutput_size);
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
if (c % 2) {
float gamma = 1 - (xf - floor(xf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
}
}
}
}
else {
float gamma = 1 - (yf - floor(yf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
}
}
}
}
gradInput[index] = output;
}
void resample2d_kernel_forward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& output,
int kernel_size) {
int n = output.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3));
const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3));
// TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] {
kernel_resample2d_update_output<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
output.data<float>(),
output_size,
output_stride,
kernel_size);
// }));
// TODO: ATen-equivalent check
// THCudaCheck(cudaGetLastError());
}
void resample2d_kernel_backward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& gradOutput,
at::Tensor& gradInput1,
at::Tensor& gradInput2,
int kernel_size) {
int n = gradOutput.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3));
const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3));
const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3));
const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3));
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] {
kernel_resample2d_backward_input1<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput1.data<float>(),
gradInput1_size,
gradInput1_stride,
kernel_size
);
// }));
const long4 gradInput2_size = make_long4(gradInput2.size(0), gradInput2.size(1), gradInput2.size(2), gradInput2.size(3));
const long4 gradInput2_stride = make_long4(gradInput2.stride(0), gradInput2.stride(1), gradInput2.stride(2), gradInput2.stride(3));
n = gradInput2.numel();
// AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] {
kernel_resample2d_backward_input2<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput2.data<float>(),
gradInput2_size,
gradInput2_stride,
kernel_size
);
// }));
// TODO: Use the ATen equivalent to get last error
// THCudaCheck(cudaGetLastError());
}
|
d76b524a29908d547525408cb6499b8862fa6dc2.hip | // !!! This is a file automatically generated by hipify!!!
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <utility>
#include <algorithm>
#include <limits>
#include <hip/hip_runtime.h>
#include "helper_cuda.h"
#include "../mylib.h"
/*
* Helper Kernels
*/
/// CUDA kernel that encodes byte-per-cell data to bit-per-cell data.
/// Needs to be invoked for each byte in encoded data (cells / 8).
/*
* bitLifeEncodeKernel
* Encode the life data of 8 cells into a single byte
*/
__global__ void bitLifeEncodeKernel(
const ubyte* lifeData,
size_t encWorldSize,
ubyte* resultEncodedLifeData
) {
for (size_t outputBucketId = blockIdx.x * blockDim.x + threadIdx.x;
outputBucketId < encWorldSize;
outputBucketId += blockDim.x * gridDim.x) {
size_t cellId = outputBucketId << 3;
ubyte result = lifeData[cellId] << 7 | lifeData[cellId + 1] << 6
| lifeData[cellId + 2] << 5 | lifeData[cellId + 3] << 4
| lifeData[cellId + 4] << 3 | lifeData[cellId + 5] << 2
| lifeData[cellId + 6] << 1 | lifeData[cellId + 7];
resultEncodedLifeData[outputBucketId] = result;
}
}
/// Runs a kernel that encodes byte-per-cell data to bit-per-cell data.
void runBitLifeEncodeKernel(const ubyte* d_lifeData, world *gameWorld, ubyte* d_encodedLife) {
assert(gameWorld->worldWidth % 8 == 0);
size_t worldEncDataWidth = gameWorld->worldWidth / 8;
size_t encWorldSize = worldEncDataWidth * gameWorld->worldHeight;
ushort threadsCount = 256;
assert(encWorldSize % threadsCount == 0);
size_t reqBlocksCount = encWorldSize / threadsCount;
ushort blocksCount = (ushort)::min((size_t)32768, reqBlocksCount);
hipLaunchKernelGGL(( bitLifeEncodeKernel), dim3(blocksCount), dim3(threadsCount), 0, 0, d_lifeData, encWorldSize, d_encodedLife);
checkCudaErrors(hipDeviceSynchronize());
}
/// CUDA kernel that decodes data from bit-per-cell to byte-per-cell format.
/// Needs to be invoked for each byte in encoded data (cells / 8).
/*
* bitLifeDecodeKernel
* Decode the life data of 8 cells contained in a single byte into a eight
* separate bytes.
*/
__global__ void bitLifeDecodeKernel(
const ubyte* encodedLifeData,
uint encWorldSize,
ubyte* resultDecodedlifeData
) {
for (uint outputBucketId = blockIdx.x * blockDim.x + threadIdx.x;
outputBucketId < encWorldSize;
outputBucketId += blockDim.x * gridDim.x) {
uint cellId = outputBucketId << 3;
ubyte dataBucket = encodedLifeData[outputBucketId];
resultDecodedlifeData[cellId] = dataBucket >> 7;
resultDecodedlifeData[cellId + 1] = (dataBucket >> 6) & 0x01;
resultDecodedlifeData[cellId + 2] = (dataBucket >> 5) & 0x01;
resultDecodedlifeData[cellId + 3] = (dataBucket >> 4) & 0x01;
resultDecodedlifeData[cellId + 4] = (dataBucket >> 3) & 0x01;
resultDecodedlifeData[cellId + 5] = (dataBucket >> 2) & 0x01;
resultDecodedlifeData[cellId + 6] = (dataBucket >> 1) & 0x01;
resultDecodedlifeData[cellId + 7] = dataBucket & 0x01;
}
}
/// Runs a kernel that decodes data from bit-per-cell to byte-per-cell format.
void runBitLifeDecodeKernel(const ubyte* d_encodedLife, world *gameWorld, ubyte* d_lifeData) {
assert(gameWorld->worldWidth % 8 == 0);
uint worldEncDataWidth = gameWorld->worldWidth / 8;
uint encWorldSize = worldEncDataWidth * gameWorld->worldHeight;
ushort threadsCount = 256;
assert(encWorldSize % threadsCount == 0);
uint reqBlocksCount = encWorldSize / threadsCount;
ushort blocksCount = ushort(::min(32768u, reqBlocksCount));
// decode life data back to byte per cell format
hipLaunchKernelGGL(( bitLifeDecodeKernel), dim3(blocksCount), dim3(threadsCount), 0, 0, d_encodedLife, encWorldSize, d_lifeData);
checkCudaErrors(hipDeviceSynchronize());
}
/*
* bitLife Kernel
* Compute array and bit offsets required to access all cells in the Moore
* Neighbourhood and determine the result state of the cell under evaluation.
*
* This kernel is executed once per iteration by as many threads and blocks as
* required to complete the iteration.
*
* The number of bytes worth of cell data that each thread processes is
* variable.
*/
__global__ void bitLifeKernel(
const ubyte* lifeData,
uint worldDataWidth,
uint worldHeight,
uint bytesPerThread,
ubyte* resultLifeData) {
uint worldSize = (worldDataWidth * worldHeight);
for (uint cellId = (__mul24(blockIdx.x, blockDim.x) + threadIdx.x)
* bytesPerThread;
cellId < worldSize;
cellId += blockDim.x * gridDim.x * bytesPerThread) {
// Calculate data offsets
// Start at block x - 1.
uint x = (cellId + worldDataWidth - 1) % worldDataWidth;
uint yAbs = (cellId / worldDataWidth) * worldDataWidth;
uint yAbsUp = (yAbs + worldSize - worldDataWidth) % worldSize;
uint yAbsDown = (yAbs + worldDataWidth) % worldSize;
// Initialize data with previous byte and current byte.
uint data0 = (uint)lifeData[x + yAbsUp] << 16;
uint data1 = (uint)lifeData[x + yAbs] << 16;
uint data2 = (uint)lifeData[x + yAbsDown] << 16;
x = (x + 1) % worldDataWidth;
data0 |= (uint)lifeData[x + yAbsUp] << 8;
data1 |= (uint)lifeData[x + yAbs] << 8;
data2 |= (uint)lifeData[x + yAbsDown] << 8;
for (uint i = 0; i < bytesPerThread; ++i) {
// get the bit coordinate of the cell under evaluation.
uint oldX = x; // old x is referring to current center cell
x = (x + 1) % worldDataWidth;
// extract state of the cell under evaluation.
data0 |= (uint)lifeData[x + yAbsUp];
data1 |= (uint)lifeData[x + yAbs];
data2 |= (uint)lifeData[x + yAbsDown];
// evaluate cell iteratively.
uint result = 0;
for (uint j = 0; j < 8; ++j) {
uint aliveCells = (data0 & 0x14000) + (data1 & 0x14000)
+ (data2 & 0x14000);
aliveCells >>= 14;
aliveCells = (aliveCells & 0x3) + (aliveCells >> 2)
+ ((data0 >> 15) & 0x1u) + ((data2 >> 15) & 0x1u);
result = result << 1
| (aliveCells == 3 || (aliveCells == 2 && (data1 & 0x8000u))
? 1u
: 0u);
data0 <<= 1;
data1 <<= 1;
data2 <<= 1;
}
// write result
resultLifeData[oldX + yAbs] = result;
}
}
}
/// Runs a kernel that evaluates given world of bit-per-cell density using algorithm specified by parameters.
bool runBitLifeKernel(
ubyte *&d_encodedLifeData,
ubyte *&d_encodedlifeDataBuffer,
world *gameWorld,
size_t iterationsCount,
ushort threadsCount,
uint bytesPerThread
) {
// World has to fit into 8 bits of every byte exactly.
if (gameWorld->worldWidth % 8 != 0) {
fprintf(stderr, "World has to fit into 8 bits of every byte exactly.\n");
return false;
}
size_t worldEncDataWidth = gameWorld->worldWidth / 8;
if (worldEncDataWidth % bytesPerThread != 0) {
fprintf(stderr, "bytesPerThread must align with world size.\n");
return false;
}
size_t encWorldSize = worldEncDataWidth * gameWorld->worldHeight;
if (encWorldSize > std::numeric_limits<uint>::max()) {
fprintf(stderr, "World is too big to fit into a uint\n");
return false;
}
if ((encWorldSize / bytesPerThread) % threadsCount != 0) {
fprintf(stderr, "Number of threads must align with world size and bytesPerThread.\n");
return false;
}
size_t reqBlocksCount = (encWorldSize / bytesPerThread) / threadsCount;
ushort blocksCount = ushort(::min(size_t(32768), reqBlocksCount));
// exec kernel
for (size_t i = 0; i < iterationsCount; ++i) {
hipLaunchKernelGGL(( bitLifeKernel), dim3(blocksCount), dim3(threadsCount), 0, 0,
d_encodedLifeData,
uint(worldEncDataWidth),
uint(gameWorld->worldHeight),
bytesPerThread,
d_encodedlifeDataBuffer
);
std::swap(d_encodedLifeData, d_encodedlifeDataBuffer);
}
checkCudaErrors(hipDeviceSynchronize());
fprintf(stderr, "bitLife Kernel executed successfully.\n");
return true;
}
bool fullBitLifeKernel (board *gameBoard, size_t iterationsCount, ushort threadsCount, uint bytesPerThread, float *milli) {
world *gameWorld = gameBoard->_world;
ubyte *d_encodedData;
ubyte *d_encodedDataBuffer;
ubyte *d_data;
uint worldEncDataWidth = gameWorld->worldWidth / 8;
uint encWorldSize = worldEncDataWidth * gameWorld->worldHeight;
checkCudaErrors(hipMalloc((ubyte**)&d_data, gameWorld->dataLength));
checkCudaErrors(hipMemset(d_data, 0, gameWorld->dataLength));
checkCudaErrors(hipMemcpy(d_data, gameBoard->data, gameWorld->dataLength, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((ubyte**)&d_encodedData, encWorldSize));
checkCudaErrors(hipMemset(d_encodedData, 0, encWorldSize));
runBitLifeEncodeKernel(d_data, gameWorld, d_encodedData);
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipMalloc((ubyte**)&d_encodedDataBuffer, encWorldSize));
checkCudaErrors(hipMemset(d_encodedDataBuffer, 0, encWorldSize));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start); // start timing
bool ret = runBitLifeKernel(d_encodedData, d_encodedDataBuffer, gameWorld, iterationsCount, threadsCount, bytesPerThread);
hipEventRecord(stop); // stop timing
hipEventSynchronize(stop);
hipEventElapsedTime(milli, start, stop);
checkCudaErrors(hipFree(d_encodedDataBuffer));
checkCudaErrors(hipMalloc((ubyte**)&d_data, gameWorld->dataLength));
checkCudaErrors(hipMemset(d_data, 0, gameWorld->dataLength));
runBitLifeDecodeKernel(d_encodedData, gameWorld, d_data);
checkCudaErrors(hipFree(d_encodedData));
checkCudaErrors(hipMemcpy(gameBoard->data, d_data, gameWorld->dataLength, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_data));
hipEventDestroy(start);
hipEventDestroy(stop);
return ret;
}
/*
* Main
*/
board *gameBoard;
world *gameWorld;
FILE *out_file;
int main (int argc, char **argv)
{
int iterations = 10000;
ushort threadsCount = 64;
char *in_filename = NULL;
char *out_filename = NULL;
size_t board_size = 48;
size_t bytesPerThread = 8;
float milli;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
gameBoard = (board*)malloc(sizeof(board));
gameWorld = (world*)malloc(sizeof(world));
opterr = 0;
int c;
while ((c = getopt (argc, argv, (const char*)"o:f:i:t:s:b:")) != -1) {
switch (c) {
case 'i':
iterations = atoi(optarg);
break;
case 'f':
in_filename = optarg;
break;
case 'o':
out_filename = optarg;
break;
case 't':
threadsCount = atoi(optarg);
break;
case 'b':
bytesPerThread = (size_t)atoi(optarg);
break;
case 's':
board_size = atoi(optarg);
break;
case '?':
break;
default:
break;
}
}
// printf("iterations: %d\n", iterations);
// printf("in_file: %s\n", in_filename);
// printf("out_file: %s\n", out_filename);
// printf("threadsCount: %u\n", threadsCount);
// printf("\n");
if (!in_filename) {
printf("Please specify a board file\n");
exit(1);
}
initWorld(board_size, board_size, gameWorld);
initBoard(fopen(in_filename, "r"), gameBoard, gameWorld);
if (out_filename) out_file = fopen(out_filename, "w+");
fullBitLifeKernel(gameBoard, iterations, threadsCount, bytesPerThread, &milli);
reportTime(iterations, board_size, threadsCount, milli);
// checkCudaErrors(hipMemcpy(gameBoard->data, d_data, BOARD_BYTES, hipMemcpyDeviceToHost));
// printBoard(gameBoard->data, gameWorld);
hipEventDestroy(start);
hipEventDestroy(stop);
free(gameBoard->data);
free(gameBoard->resultData);
if (out_filename) fclose(out_file);
// printf("\n");
checkCudaErrors(hipDeviceReset());
}
| d76b524a29908d547525408cb6499b8862fa6dc2.cu |
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <utility>
#include <algorithm>
#include <limits>
#include <cuda_runtime.h>
#include "helper_cuda.h"
#include "../mylib.h"
/*
* Helper Kernels
*/
/// CUDA kernel that encodes byte-per-cell data to bit-per-cell data.
/// Needs to be invoked for each byte in encoded data (cells / 8).
/*
* bitLifeEncodeKernel
* Encode the life data of 8 cells into a single byte
*/
__global__ void bitLifeEncodeKernel(
const ubyte* lifeData,
size_t encWorldSize,
ubyte* resultEncodedLifeData
) {
for (size_t outputBucketId = blockIdx.x * blockDim.x + threadIdx.x;
outputBucketId < encWorldSize;
outputBucketId += blockDim.x * gridDim.x) {
size_t cellId = outputBucketId << 3;
ubyte result = lifeData[cellId] << 7 | lifeData[cellId + 1] << 6
| lifeData[cellId + 2] << 5 | lifeData[cellId + 3] << 4
| lifeData[cellId + 4] << 3 | lifeData[cellId + 5] << 2
| lifeData[cellId + 6] << 1 | lifeData[cellId + 7];
resultEncodedLifeData[outputBucketId] = result;
}
}
/// Runs a kernel that encodes byte-per-cell data to bit-per-cell data.
void runBitLifeEncodeKernel(const ubyte* d_lifeData, world *gameWorld, ubyte* d_encodedLife) {
assert(gameWorld->worldWidth % 8 == 0);
size_t worldEncDataWidth = gameWorld->worldWidth / 8;
size_t encWorldSize = worldEncDataWidth * gameWorld->worldHeight;
ushort threadsCount = 256;
assert(encWorldSize % threadsCount == 0);
size_t reqBlocksCount = encWorldSize / threadsCount;
ushort blocksCount = (ushort)std::min((size_t)32768, reqBlocksCount);
bitLifeEncodeKernel<<<blocksCount, threadsCount>>>(d_lifeData, encWorldSize, d_encodedLife);
checkCudaErrors(cudaDeviceSynchronize());
}
/// CUDA kernel that decodes data from bit-per-cell to byte-per-cell format.
/// Needs to be invoked for each byte in encoded data (cells / 8).
/*
* bitLifeDecodeKernel
* Decode the life data of 8 cells contained in a single byte into a eight
* separate bytes.
*/
__global__ void bitLifeDecodeKernel(
const ubyte* encodedLifeData,
uint encWorldSize,
ubyte* resultDecodedlifeData
) {
for (uint outputBucketId = blockIdx.x * blockDim.x + threadIdx.x;
outputBucketId < encWorldSize;
outputBucketId += blockDim.x * gridDim.x) {
uint cellId = outputBucketId << 3;
ubyte dataBucket = encodedLifeData[outputBucketId];
resultDecodedlifeData[cellId] = dataBucket >> 7;
resultDecodedlifeData[cellId + 1] = (dataBucket >> 6) & 0x01;
resultDecodedlifeData[cellId + 2] = (dataBucket >> 5) & 0x01;
resultDecodedlifeData[cellId + 3] = (dataBucket >> 4) & 0x01;
resultDecodedlifeData[cellId + 4] = (dataBucket >> 3) & 0x01;
resultDecodedlifeData[cellId + 5] = (dataBucket >> 2) & 0x01;
resultDecodedlifeData[cellId + 6] = (dataBucket >> 1) & 0x01;
resultDecodedlifeData[cellId + 7] = dataBucket & 0x01;
}
}
/// Runs a kernel that decodes data from bit-per-cell to byte-per-cell format.
void runBitLifeDecodeKernel(const ubyte* d_encodedLife, world *gameWorld, ubyte* d_lifeData) {
assert(gameWorld->worldWidth % 8 == 0);
uint worldEncDataWidth = gameWorld->worldWidth / 8;
uint encWorldSize = worldEncDataWidth * gameWorld->worldHeight;
ushort threadsCount = 256;
assert(encWorldSize % threadsCount == 0);
uint reqBlocksCount = encWorldSize / threadsCount;
ushort blocksCount = ushort(std::min(32768u, reqBlocksCount));
// decode life data back to byte per cell format
bitLifeDecodeKernel<<<blocksCount, threadsCount>>>(d_encodedLife, encWorldSize, d_lifeData);
checkCudaErrors(cudaDeviceSynchronize());
}
/*
* bitLife Kernel
* Compute array and bit offsets required to access all cells in the Moore
* Neighbourhood and determine the result state of the cell under evaluation.
*
* This kernel is executed once per iteration by as many threads and blocks as
* required to complete the iteration.
*
* The number of bytes worth of cell data that each thread processes is
* variable.
*/
__global__ void bitLifeKernel(
const ubyte* lifeData,
uint worldDataWidth,
uint worldHeight,
uint bytesPerThread,
ubyte* resultLifeData) {
uint worldSize = (worldDataWidth * worldHeight);
for (uint cellId = (__mul24(blockIdx.x, blockDim.x) + threadIdx.x)
* bytesPerThread;
cellId < worldSize;
cellId += blockDim.x * gridDim.x * bytesPerThread) {
// Calculate data offsets
// Start at block x - 1.
uint x = (cellId + worldDataWidth - 1) % worldDataWidth;
uint yAbs = (cellId / worldDataWidth) * worldDataWidth;
uint yAbsUp = (yAbs + worldSize - worldDataWidth) % worldSize;
uint yAbsDown = (yAbs + worldDataWidth) % worldSize;
// Initialize data with previous byte and current byte.
uint data0 = (uint)lifeData[x + yAbsUp] << 16;
uint data1 = (uint)lifeData[x + yAbs] << 16;
uint data2 = (uint)lifeData[x + yAbsDown] << 16;
x = (x + 1) % worldDataWidth;
data0 |= (uint)lifeData[x + yAbsUp] << 8;
data1 |= (uint)lifeData[x + yAbs] << 8;
data2 |= (uint)lifeData[x + yAbsDown] << 8;
for (uint i = 0; i < bytesPerThread; ++i) {
// get the bit coordinate of the cell under evaluation.
uint oldX = x; // old x is referring to current center cell
x = (x + 1) % worldDataWidth;
// extract state of the cell under evaluation.
data0 |= (uint)lifeData[x + yAbsUp];
data1 |= (uint)lifeData[x + yAbs];
data2 |= (uint)lifeData[x + yAbsDown];
// evaluate cell iteratively.
uint result = 0;
for (uint j = 0; j < 8; ++j) {
uint aliveCells = (data0 & 0x14000) + (data1 & 0x14000)
+ (data2 & 0x14000);
aliveCells >>= 14;
aliveCells = (aliveCells & 0x3) + (aliveCells >> 2)
+ ((data0 >> 15) & 0x1u) + ((data2 >> 15) & 0x1u);
result = result << 1
| (aliveCells == 3 || (aliveCells == 2 && (data1 & 0x8000u))
? 1u
: 0u);
data0 <<= 1;
data1 <<= 1;
data2 <<= 1;
}
// write result
resultLifeData[oldX + yAbs] = result;
}
}
}
/// Runs a kernel that evaluates given world of bit-per-cell density using algorithm specified by parameters.
bool runBitLifeKernel(
ubyte *&d_encodedLifeData,
ubyte *&d_encodedlifeDataBuffer,
world *gameWorld,
size_t iterationsCount,
ushort threadsCount,
uint bytesPerThread
) {
// World has to fit into 8 bits of every byte exactly.
if (gameWorld->worldWidth % 8 != 0) {
fprintf(stderr, "World has to fit into 8 bits of every byte exactly.\n");
return false;
}
size_t worldEncDataWidth = gameWorld->worldWidth / 8;
if (worldEncDataWidth % bytesPerThread != 0) {
fprintf(stderr, "bytesPerThread must align with world size.\n");
return false;
}
size_t encWorldSize = worldEncDataWidth * gameWorld->worldHeight;
if (encWorldSize > std::numeric_limits<uint>::max()) {
fprintf(stderr, "World is too big to fit into a uint\n");
return false;
}
if ((encWorldSize / bytesPerThread) % threadsCount != 0) {
fprintf(stderr, "Number of threads must align with world size and bytesPerThread.\n");
return false;
}
size_t reqBlocksCount = (encWorldSize / bytesPerThread) / threadsCount;
ushort blocksCount = ushort(std::min(size_t(32768), reqBlocksCount));
// exec kernel
for (size_t i = 0; i < iterationsCount; ++i) {
bitLifeKernel<<<blocksCount, threadsCount>>>(
d_encodedLifeData,
uint(worldEncDataWidth),
uint(gameWorld->worldHeight),
bytesPerThread,
d_encodedlifeDataBuffer
);
std::swap(d_encodedLifeData, d_encodedlifeDataBuffer);
}
checkCudaErrors(cudaDeviceSynchronize());
fprintf(stderr, "bitLife Kernel executed successfully.\n");
return true;
}
bool fullBitLifeKernel (board *gameBoard, size_t iterationsCount, ushort threadsCount, uint bytesPerThread, float *milli) {
world *gameWorld = gameBoard->_world;
ubyte *d_encodedData;
ubyte *d_encodedDataBuffer;
ubyte *d_data;
uint worldEncDataWidth = gameWorld->worldWidth / 8;
uint encWorldSize = worldEncDataWidth * gameWorld->worldHeight;
checkCudaErrors(cudaMalloc((ubyte**)&d_data, gameWorld->dataLength));
checkCudaErrors(cudaMemset(d_data, 0, gameWorld->dataLength));
checkCudaErrors(cudaMemcpy(d_data, gameBoard->data, gameWorld->dataLength, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((ubyte**)&d_encodedData, encWorldSize));
checkCudaErrors(cudaMemset(d_encodedData, 0, encWorldSize));
runBitLifeEncodeKernel(d_data, gameWorld, d_encodedData);
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaMalloc((ubyte**)&d_encodedDataBuffer, encWorldSize));
checkCudaErrors(cudaMemset(d_encodedDataBuffer, 0, encWorldSize));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start); // start timing
bool ret = runBitLifeKernel(d_encodedData, d_encodedDataBuffer, gameWorld, iterationsCount, threadsCount, bytesPerThread);
cudaEventRecord(stop); // stop timing
cudaEventSynchronize(stop);
cudaEventElapsedTime(milli, start, stop);
checkCudaErrors(cudaFree(d_encodedDataBuffer));
checkCudaErrors(cudaMalloc((ubyte**)&d_data, gameWorld->dataLength));
checkCudaErrors(cudaMemset(d_data, 0, gameWorld->dataLength));
runBitLifeDecodeKernel(d_encodedData, gameWorld, d_data);
checkCudaErrors(cudaFree(d_encodedData));
checkCudaErrors(cudaMemcpy(gameBoard->data, d_data, gameWorld->dataLength, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_data));
cudaEventDestroy(start);
cudaEventDestroy(stop);
return ret;
}
/*
* Main
*/
board *gameBoard;
world *gameWorld;
FILE *out_file;
int main (int argc, char **argv)
{
int iterations = 10000;
ushort threadsCount = 64;
char *in_filename = NULL;
char *out_filename = NULL;
size_t board_size = 48;
size_t bytesPerThread = 8;
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
gameBoard = (board*)malloc(sizeof(board));
gameWorld = (world*)malloc(sizeof(world));
opterr = 0;
int c;
while ((c = getopt (argc, argv, (const char*)"o:f:i:t:s:b:")) != -1) {
switch (c) {
case 'i':
iterations = atoi(optarg);
break;
case 'f':
in_filename = optarg;
break;
case 'o':
out_filename = optarg;
break;
case 't':
threadsCount = atoi(optarg);
break;
case 'b':
bytesPerThread = (size_t)atoi(optarg);
break;
case 's':
board_size = atoi(optarg);
break;
case '?':
break;
default:
break;
}
}
// printf("iterations: %d\n", iterations);
// printf("in_file: %s\n", in_filename);
// printf("out_file: %s\n", out_filename);
// printf("threadsCount: %u\n", threadsCount);
// printf("\n");
if (!in_filename) {
printf("Please specify a board file\n");
exit(1);
}
initWorld(board_size, board_size, gameWorld);
initBoard(fopen(in_filename, "r"), gameBoard, gameWorld);
if (out_filename) out_file = fopen(out_filename, "w+");
fullBitLifeKernel(gameBoard, iterations, threadsCount, bytesPerThread, &milli);
reportTime(iterations, board_size, threadsCount, milli);
// checkCudaErrors(cudaMemcpy(gameBoard->data, d_data, BOARD_BYTES, cudaMemcpyDeviceToHost));
// printBoard(gameBoard->data, gameWorld);
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(gameBoard->data);
free(gameBoard->resultData);
if (out_filename) fclose(out_file);
// printf("\n");
checkCudaErrors(cudaDeviceReset());
}
|
aae96f8ce36a1f74ce3a0ac57573cb552e98797e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define NUM_BINS 4096
#define NUM_THREADS 512
#define WARP_SIZE 32
#define SM_SIZE 12288
#define BLOCKS_PER_SM 8
#define HIST_SIZE 128
#define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,\
bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code),\
file, line);
if (abort)
exit(code);
}
}
__global__ void histAlgoMemPerBlock(unsigned int* devIn, \
unsigned int* devOut, int inputLength, int R)
{
__shared__ int hist_per_block[(NUM_BINS + 1) * R];
int warp_id = threadIdx.x / WARP_SIZE;
int lane = threadIdx.x % WARP_SIZE;
int warp_block = blockidx.x / WARP_SIZE;
int per_block_offset = (NUM_BINS + 1) * threadidx.x % R;
int start = (inputLength / warp_block) * warp_id + WARP_SIZE * blockidx.x + lane;
int finish = (inputLength / warp_block) * (warp_id + 1);
int step = WARP_SIZE * gridDim.x;
int i, sum, j;
for(i = threadIdx.x; i < (NUM_BINS + 1) * R; i+= blockDim.x)
hist_per_block[i] = 0;
__syncthreads();
for(i = start; i < finish; i += step)
atomicAdd(&hist_per_block[per_block_offset + devIn[i]], 1);
__syncthreads();
for(i = threadIdx.x; i < NUM_BINS; i += blockDim.x)
{
sum = 0;
for(j = 0; j < (NUM_BINS + 1) * R; j += NUM_BINS + 1)
sum += hist_per_block[i + j];
atomicAdd(devOut + i, sum);
}
}
int main(int argc, char *argv[])
{
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
int R;
/* Read input arguments here */
if(argc != 3)
{
printf("Usage: ./a.out <input.raw> <output.raw>\n");
exit(1);
}
args = {argc, argv};
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0), \
&inputLength, "Integer");
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc(deviceInput, sizeof(unsigned int) * inputLength);
hipMalloc(deviceBins, sizeof(unsigned int) * NUM_BINS);
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemCpy(deviceInput, hostInput, sizeof(unsigned int) * inputLength, \
hipMemcpyHostToDevice);
cudaMemCpy(deviceBins, 0, sizeof(unsigned int) * NUM_BINS, \
hipMemcpyHostToDevice);
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
R = SM_SIZE / (BLOCKS_PER_SM * (HIST_SIZE + 1));
// Launch kernel
// ----------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
//@@ Perform kernel computation here
hipLaunchKernelGGL(( histAlgo) , dim3((int)ceil((float)inputLength / NUM_THREADS)), dim3(NUM_THREADS), 0, 0, \
deviceInput, deviceBins, inputLength, R);
wbTime_stop(Compute, "Performing CUDA computation");
// ----------------------------------------------------------
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemCpy(hostBins, deviceBins, sizeof(unsigned int) * NUM_BINS, \
hipMemcpyDeviceToHost);
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
// Verify correctness
// -----------------------------------------------------
wbSolution(args, hostBins, NUM_BINS);
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceBins);
hipFree(deviceInput);
wbTime_stop(GPU, "Freeing GPU Memory");
free(hostBins);
free(hostInput);
return 0;
}
| aae96f8ce36a1f74ce3a0ac57573cb552e98797e.cu | #include <wb.h>
#define NUM_BINS 4096
#define NUM_THREADS 512
#define WARP_SIZE 32
#define SM_SIZE 12288
#define BLOCKS_PER_SM 8
#define HIST_SIZE 128
#define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,\
bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),\
file, line);
if (abort)
exit(code);
}
}
__global__ void histAlgoMemPerBlock(unsigned int* devIn, \
unsigned int* devOut, int inputLength, int R)
{
__shared__ int hist_per_block[(NUM_BINS + 1) * R];
int warp_id = threadIdx.x / WARP_SIZE;
int lane = threadIdx.x % WARP_SIZE;
int warp_block = blockidx.x / WARP_SIZE;
int per_block_offset = (NUM_BINS + 1) * threadidx.x % R;
int start = (inputLength / warp_block) * warp_id + WARP_SIZE * blockidx.x + lane;
int finish = (inputLength / warp_block) * (warp_id + 1);
int step = WARP_SIZE * gridDim.x;
int i, sum, j;
for(i = threadIdx.x; i < (NUM_BINS + 1) * R; i+= blockDim.x)
hist_per_block[i] = 0;
__syncthreads();
for(i = start; i < finish; i += step)
atomicAdd(&hist_per_block[per_block_offset + devIn[i]], 1);
__syncthreads();
for(i = threadIdx.x; i < NUM_BINS; i += blockDim.x)
{
sum = 0;
for(j = 0; j < (NUM_BINS + 1) * R; j += NUM_BINS + 1)
sum += hist_per_block[i + j];
atomicAdd(devOut + i, sum);
}
}
int main(int argc, char *argv[])
{
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
int R;
/* Read input arguments here */
if(argc != 3)
{
printf("Usage: ./a.out <input.raw> <output.raw>\n");
exit(1);
}
args = {argc, argv};
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0), \
&inputLength, "Integer");
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc(deviceInput, sizeof(unsigned int) * inputLength);
cudaMalloc(deviceBins, sizeof(unsigned int) * NUM_BINS);
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemCpy(deviceInput, hostInput, sizeof(unsigned int) * inputLength, \
cudaMemcpyHostToDevice);
cudaMemCpy(deviceBins, 0, sizeof(unsigned int) * NUM_BINS, \
cudaMemcpyHostToDevice);
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
R = SM_SIZE / (BLOCKS_PER_SM * (HIST_SIZE + 1));
// Launch kernel
// ----------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
//@@ Perform kernel computation here
histAlgo <<<(int)ceil((float)inputLength / NUM_THREADS), NUM_THREADS>>> \
(deviceInput, deviceBins, inputLength, R);
wbTime_stop(Compute, "Performing CUDA computation");
// ----------------------------------------------------------
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemCpy(hostBins, deviceBins, sizeof(unsigned int) * NUM_BINS, \
cudaMemcpyDeviceToHost);
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
// Verify correctness
// -----------------------------------------------------
wbSolution(args, hostBins, NUM_BINS);
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceBins);
cudaFree(deviceInput);
wbTime_stop(GPU, "Freeing GPU Memory");
free(hostBins);
free(hostInput);
return 0;
}
|
135ccd5cbf63cf8e091d9e378175d6eea88861ba.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/IndexFlat.h>
#include <faiss/gpu/GpuDistance.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
#include <faiss/gpu/test/TestUtils.h>
#include <gtest/gtest.h>
#include <sstream>
#include <vector>
void testTransposition(bool colMajorVecs,
bool colMajorQueries,
faiss::MetricType metric,
float metricArg = 0) {
int device = faiss::gpu::randVal(0, faiss::gpu::getNumDevices() - 1);
faiss::gpu::StandardGpuResources res;
res.noTempMemory();
int dim = faiss::gpu::randVal(20, 150);
int numVecs = faiss::gpu::randVal(10, 30000);
int numQuery = faiss::gpu::randVal(1, 1024);
int k = ::min(numVecs, faiss::gpu::randVal(20, 70));
// Input data for CPU
std::vector<float> vecs = faiss::gpu::randVecs(numVecs, dim);
std::vector<float> queries = faiss::gpu::randVecs(numQuery, dim);
if (metric == faiss::MetricType::METRIC_JensenShannon) {
// make values positive
for (auto& v : vecs) {
v = std::abs(v);
if (v == 0) {
v = 1e-6;
}
}
for (auto& q : queries) {
q = std::abs(q);
if (q == 0) {
q = 1e-6;
}
}
}
// The CPU index is our reference for the results
faiss::IndexFlat cpuIndex(dim, metric);
cpuIndex.metric_arg = metricArg;
cpuIndex.add(numVecs, vecs.data());
std::vector<float> cpuDistance(numQuery * k, 0);
std::vector<faiss::Index::idx_t> cpuIndices(numQuery * k, -1);
cpuIndex.search(numQuery, queries.data(), k,
cpuDistance.data(), cpuIndices.data());
// The transpose and distance code assumes the desired device is already set
faiss::gpu::DeviceScope scope(device);
auto stream = res.getDefaultStream(device);
// Copy input data to GPU, and pre-transpose both vectors and queries for
// passing
auto gpuVecs = faiss::gpu::toDevice<float, 2>(
nullptr, device, vecs.data(), stream, {numVecs, dim});
auto gpuQueries = faiss::gpu::toDevice<float, 2>(
nullptr, device, queries.data(), stream, {numQuery, dim});
faiss::gpu::DeviceTensor<float, 2, true> vecsT({dim, numVecs});
faiss::gpu::runTransposeAny(gpuVecs, 0, 1, vecsT, stream);
faiss::gpu::DeviceTensor<float, 2, true> queriesT({dim, numQuery});
faiss::gpu::runTransposeAny(gpuQueries, 0, 1, queriesT, stream);
std::vector<float> gpuDistance(numQuery * k, 0);
std::vector<faiss::Index::idx_t> gpuIndices(numQuery * k, -1);
faiss::gpu::GpuDistanceParams args;
args.metric = metric;
args.metricArg = metricArg;
args.k = k;
args.dims = dim;
args.vectors = colMajorVecs ? vecsT.data() : gpuVecs.data();
args.vectorsRowMajor = !colMajorVecs;
args.numVectors = numVecs;
args.queries = colMajorQueries ? queriesT.data() : gpuQueries.data();
args.queriesRowMajor = !colMajorQueries;
args.numQueries = numQuery;
args.outDistances = gpuDistance.data();
args.outIndices = gpuIndices.data();
faiss::gpu::bfKnn(&res, args);
std::stringstream str;
str << "metric " << metric
<< " colMajorVecs " << colMajorVecs
<< " colMajorQueries " << colMajorQueries;
faiss::gpu::compareLists(cpuDistance.data(),
cpuIndices.data(),
gpuDistance.data(),
gpuIndices.data(),
numQuery, k,
str.str(),
false, false, true,
6e-3f, 0.1f, 0.015f);
}
// Test different memory layouts for brute-force k-NN
TEST(TestGpuDistance, Transposition_RR) {
testTransposition(false, false, faiss::MetricType::METRIC_L2);
testTransposition(false, false, faiss::MetricType::METRIC_INNER_PRODUCT);
}
TEST(TestGpuDistance, Transposition_RC) {
testTransposition(false, true, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, Transposition_CR) {
testTransposition(true, false, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, Transposition_CC) {
testTransposition(true, true, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, L1) {
testTransposition(false, false, faiss::MetricType::METRIC_L1);
}
// Test other transpositions with the general distance kernel
TEST(TestGpuDistance, L1_RC) {
testTransposition(false, true, faiss::MetricType::METRIC_L1);
}
TEST(TestGpuDistance, L1_CR) {
testTransposition(true, false, faiss::MetricType::METRIC_L1);
}
TEST(TestGpuDistance, L1_CC) {
testTransposition(true, true, faiss::MetricType::METRIC_L1);
}
// Test remainder of metric types
TEST(TestGpuDistance, Linf) {
testTransposition(false, false, faiss::MetricType::METRIC_Linf);
}
TEST(TestGpuDistance, Lp) {
testTransposition(false, false, faiss::MetricType::METRIC_Lp, 3);
}
TEST(TestGpuDistance, Canberra) {
testTransposition(false, false, faiss::MetricType::METRIC_Canberra);
}
TEST(TestGpuDistance, BrayCurtis) {
testTransposition(false, false, faiss::MetricType::METRIC_BrayCurtis);
}
TEST(TestGpuDistance, JensenShannon) {
testTransposition(false, false, faiss::MetricType::METRIC_JensenShannon);
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
// just run with a fixed test seed
faiss::gpu::setTestSeed(100);
return RUN_ALL_TESTS();
}
| 135ccd5cbf63cf8e091d9e378175d6eea88861ba.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/IndexFlat.h>
#include <faiss/gpu/GpuDistance.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
#include <faiss/gpu/test/TestUtils.h>
#include <gtest/gtest.h>
#include <sstream>
#include <vector>
void testTransposition(bool colMajorVecs,
bool colMajorQueries,
faiss::MetricType metric,
float metricArg = 0) {
int device = faiss::gpu::randVal(0, faiss::gpu::getNumDevices() - 1);
faiss::gpu::StandardGpuResources res;
res.noTempMemory();
int dim = faiss::gpu::randVal(20, 150);
int numVecs = faiss::gpu::randVal(10, 30000);
int numQuery = faiss::gpu::randVal(1, 1024);
int k = std::min(numVecs, faiss::gpu::randVal(20, 70));
// Input data for CPU
std::vector<float> vecs = faiss::gpu::randVecs(numVecs, dim);
std::vector<float> queries = faiss::gpu::randVecs(numQuery, dim);
if (metric == faiss::MetricType::METRIC_JensenShannon) {
// make values positive
for (auto& v : vecs) {
v = std::abs(v);
if (v == 0) {
v = 1e-6;
}
}
for (auto& q : queries) {
q = std::abs(q);
if (q == 0) {
q = 1e-6;
}
}
}
// The CPU index is our reference for the results
faiss::IndexFlat cpuIndex(dim, metric);
cpuIndex.metric_arg = metricArg;
cpuIndex.add(numVecs, vecs.data());
std::vector<float> cpuDistance(numQuery * k, 0);
std::vector<faiss::Index::idx_t> cpuIndices(numQuery * k, -1);
cpuIndex.search(numQuery, queries.data(), k,
cpuDistance.data(), cpuIndices.data());
// The transpose and distance code assumes the desired device is already set
faiss::gpu::DeviceScope scope(device);
auto stream = res.getDefaultStream(device);
// Copy input data to GPU, and pre-transpose both vectors and queries for
// passing
auto gpuVecs = faiss::gpu::toDevice<float, 2>(
nullptr, device, vecs.data(), stream, {numVecs, dim});
auto gpuQueries = faiss::gpu::toDevice<float, 2>(
nullptr, device, queries.data(), stream, {numQuery, dim});
faiss::gpu::DeviceTensor<float, 2, true> vecsT({dim, numVecs});
faiss::gpu::runTransposeAny(gpuVecs, 0, 1, vecsT, stream);
faiss::gpu::DeviceTensor<float, 2, true> queriesT({dim, numQuery});
faiss::gpu::runTransposeAny(gpuQueries, 0, 1, queriesT, stream);
std::vector<float> gpuDistance(numQuery * k, 0);
std::vector<faiss::Index::idx_t> gpuIndices(numQuery * k, -1);
faiss::gpu::GpuDistanceParams args;
args.metric = metric;
args.metricArg = metricArg;
args.k = k;
args.dims = dim;
args.vectors = colMajorVecs ? vecsT.data() : gpuVecs.data();
args.vectorsRowMajor = !colMajorVecs;
args.numVectors = numVecs;
args.queries = colMajorQueries ? queriesT.data() : gpuQueries.data();
args.queriesRowMajor = !colMajorQueries;
args.numQueries = numQuery;
args.outDistances = gpuDistance.data();
args.outIndices = gpuIndices.data();
faiss::gpu::bfKnn(&res, args);
std::stringstream str;
str << "metric " << metric
<< " colMajorVecs " << colMajorVecs
<< " colMajorQueries " << colMajorQueries;
faiss::gpu::compareLists(cpuDistance.data(),
cpuIndices.data(),
gpuDistance.data(),
gpuIndices.data(),
numQuery, k,
str.str(),
false, false, true,
6e-3f, 0.1f, 0.015f);
}
// Test different memory layouts for brute-force k-NN
TEST(TestGpuDistance, Transposition_RR) {
testTransposition(false, false, faiss::MetricType::METRIC_L2);
testTransposition(false, false, faiss::MetricType::METRIC_INNER_PRODUCT);
}
TEST(TestGpuDistance, Transposition_RC) {
testTransposition(false, true, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, Transposition_CR) {
testTransposition(true, false, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, Transposition_CC) {
testTransposition(true, true, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, L1) {
testTransposition(false, false, faiss::MetricType::METRIC_L1);
}
// Test other transpositions with the general distance kernel
TEST(TestGpuDistance, L1_RC) {
testTransposition(false, true, faiss::MetricType::METRIC_L1);
}
TEST(TestGpuDistance, L1_CR) {
testTransposition(true, false, faiss::MetricType::METRIC_L1);
}
TEST(TestGpuDistance, L1_CC) {
testTransposition(true, true, faiss::MetricType::METRIC_L1);
}
// Test remainder of metric types
TEST(TestGpuDistance, Linf) {
testTransposition(false, false, faiss::MetricType::METRIC_Linf);
}
TEST(TestGpuDistance, Lp) {
testTransposition(false, false, faiss::MetricType::METRIC_Lp, 3);
}
TEST(TestGpuDistance, Canberra) {
testTransposition(false, false, faiss::MetricType::METRIC_Canberra);
}
TEST(TestGpuDistance, BrayCurtis) {
testTransposition(false, false, faiss::MetricType::METRIC_BrayCurtis);
}
TEST(TestGpuDistance, JensenShannon) {
testTransposition(false, false, faiss::MetricType::METRIC_JensenShannon);
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
// just run with a fixed test seed
faiss::gpu::setTestSeed(100);
return RUN_ALL_TESTS();
}
|
8f7492a0a45b0459a666cad13e04983bf0328e44.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Discrete Cosine/Sine Transform(DCT/DST and IDCT/IDST one to four-all in one)
* DCT/DST and IDCT/IDST I ---> IV
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* GpuArray/cpuArray output, B=Discrete_Transform(A, , type of Transform (sine or cosine), type of Transform(direct/inverse), type of DCT/DST or IDCT/IDST, dimensions).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "Discrete_TransformD.cu"
#include "Discrete_TransformS.cu"
extern "C" static void mexTransD(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexTransS(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
if (nrhs==5 && nlhs==1) {
if (mxIsGPUArray(prhs[0])) {
mxGPUArray const *tempGPU;
tempGPU = mxGPUCreateFromMxArray(prhs[0]);
if (mxGPUGetClassID(tempGPU) == mxDOUBLE_CLASS && mxGPUGetComplexity(tempGPU) == mxREAL){
mexTransD(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU);
return;
}
else if (mxGPUGetClassID(tempGPU) == mxSINGLE_CLASS && mxGPUGetComplexity(tempGPU) == mxREAL){
mexTransS(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU);
return;
}
else{
mxGPUDestroyGPUArray(tempGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else if(!mxIsGPUArray(prhs[0])) {
if (mxGetClassID(prhs[0]) == mxDOUBLE_CLASS && (!mxIsComplex(prhs[0]))){
mexTransD(nlhs, plhs,
nrhs, prhs);
return;
}
else if (mxGetClassID(prhs[0]) == mxSINGLE_CLASS && (!mxIsComplex(prhs[0]))){
mexTransS(nlhs, plhs,
nrhs, prhs);
return;
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
}
else if ((nrhs<5) || (nrhs>5) || (nlhs<1) || (nlhs>1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input/output arguments! input argument must be five and output arguments must be one\n");
return;
}
} | 8f7492a0a45b0459a666cad13e04983bf0328e44.cu |
/*
* Discrete Cosine/Sine Transform(DCT/DST and IDCT/IDST one to four-all in one)
* DCT/DST and IDCT/IDST I ---> IV
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* GpuArray/cpuArray output, B=Discrete_Transform(A, , type of Transform (sine or cosine), type of Transform(direct/inverse), type of DCT/DST or IDCT/IDST, dimensions).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "Discrete_TransformD.cu"
#include "Discrete_TransformS.cu"
extern "C" static void mexTransD(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexTransS(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
if (nrhs==5 && nlhs==1) {
if (mxIsGPUArray(prhs[0])) {
mxGPUArray const *tempGPU;
tempGPU = mxGPUCreateFromMxArray(prhs[0]);
if (mxGPUGetClassID(tempGPU) == mxDOUBLE_CLASS && mxGPUGetComplexity(tempGPU) == mxREAL){
mexTransD(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU);
return;
}
else if (mxGPUGetClassID(tempGPU) == mxSINGLE_CLASS && mxGPUGetComplexity(tempGPU) == mxREAL){
mexTransS(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU);
return;
}
else{
mxGPUDestroyGPUArray(tempGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else if(!mxIsGPUArray(prhs[0])) {
if (mxGetClassID(prhs[0]) == mxDOUBLE_CLASS && (!mxIsComplex(prhs[0]))){
mexTransD(nlhs, plhs,
nrhs, prhs);
return;
}
else if (mxGetClassID(prhs[0]) == mxSINGLE_CLASS && (!mxIsComplex(prhs[0]))){
mexTransS(nlhs, plhs,
nrhs, prhs);
return;
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
}
else if ((nrhs<5) || (nrhs>5) || (nlhs<1) || (nlhs>1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input/output arguments! input argument must be five and output arguments must be one\n");
return;
}
} |
19e47a590ce594f50960c7a3455e13702c090b4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.2) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date May 2015
@precisions normal z -> c d s
*/
#include "common_magma.h"
__global__ void
magma_zlobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex * x )
{
int idx = threadIdx.x ; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if( row<num_rows){
magmaDoubleComplex tmp = x[idx];
__syncthreads();
if( idx > shift-1 ){
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in/out]
x magmaDoubleComplex_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaDoubleComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = (int) sqrt( (double) num_rows);
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
hipLaunchKernelGGL(( magma_zlobpcg_shift_kernel), dim3(grid), dim3(block), Ms, queue ,
num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
| 19e47a590ce594f50960c7a3455e13702c090b4c.cu | /*
-- MAGMA (version 1.6.2) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date May 2015
@precisions normal z -> c d s
*/
#include "common_magma.h"
__global__ void
magma_zlobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex * x )
{
int idx = threadIdx.x ; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if( row<num_rows){
magmaDoubleComplex tmp = x[idx];
__syncthreads();
if( idx > shift-1 ){
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in/out]
x magmaDoubleComplex_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaDoubleComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = (int) sqrt( (double) num_rows);
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
magma_zlobpcg_shift_kernel<<< grid, block, Ms, queue >>>
( num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
|
dce1d3a6937c9ba4eabeca091351faa7af7eeb7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void what_is_my_id_2d_A(
unsigned int* const block_x,
unsigned int* const block_y,
unsigned int* const thread,
unsigned int* const calc_thread,
unsigned int* const x_thread,
unsigned int* const y_thread,
unsigned int* const grid_dimx,
unsigned int* const block_dimx,
unsigned int* const grid_dimy,
unsigned int* const block_dimy)
{
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
const unsigned int thread_idx = ((gridDim.x * blockDim.x) * idy) + idx;
block_x[thread_idx] = blockIdx.x;
block_y[thread_idx] = blockIdx.y;
thread[thread_idx] = threadIdx.x;
calc_thread[thread_idx] = thread_idx;
x_thread[thread_idx] = idx;
y_thread[thread_idx] = idy;
grid_dimx[thread_idx] = gridDim.x;
block_dimx[thread_idx] = blockDim.x;
grid_dimy[thread_idx] = gridDim.y;
block_dimy[thread_idx] = blockDim.y;
}
#ifndef ARRAY_SIZE_X
#define ARRAY_SIZE_X 32
#endif // !ARRAY_SIZE_X
#ifndef ARRAY_SIZE_Y
#define ARRAY_SIZE_Y 16
#endif // !ARRAY_SIZE_Y
#define ARRAY_SIZE_IN_BYTES ((ARRAY_SIZE_X) * (ARRAY_SIZE_Y) * (sizeof(unsigned int)))
/* Declare statically six arrays of ARRAY_SIZE each */
unsigned int cpu_block_x[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_y[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_warp[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_calc_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_xthread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_ythread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_grid_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_grid_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X];
int main(void)
{
/* Total thread count = 32 * 4 = 128 */
const dim3 threads_rect(32, 4);
const dim3 blocks_rect(1, 4);
/* Total thread count = 16 * 8 = 128 */
const dim3 threads_square(16, 8); /* 16 * 8 */
const dim3 blocks_square(2, 2);
/* Total thread count = 8 * 16 = 128 */
const dim3 reverse_threads_square(8, 16); /* 8 * 16 */
const dim3 reverse_blocks_square(2, 2);
/* Total thread count = 4 * 32 = 128 */
const dim3 reverse_threads_rect(4, 32); /* 4 * 32 */
const dim3 reverse_blocks_rect(4, 1);
/* Total thread count = 4 * 2 = 8 */
const dim3 few_threads_rect(4, 2); /* 4 * 2 */
const dim3 many_blocks_rect(8, 2);
/* Total thread count = 4 * 2 = 8 */
const dim3 many_threads(1, 512); /* 4 * 2 */
const dim3 one_blocks_rect(1, 1);
/* Needed to wait for a character at exit */
char ch;
/* Declare statically six arrays of ARRAY_SIZE each */
unsigned int* gpu_block_x;
unsigned int* gpu_block_y;
unsigned int* gpu_thread;
unsigned int* gpu_warp;
unsigned int* gpu_calc_thread;
unsigned int* gpu_xthread;
unsigned int* gpu_ythread;
unsigned int* gpu_grid_dimx;
unsigned int* gpu_block_dimx;
unsigned int* gpu_grid_dimy;
unsigned int* gpu_block_dimy;
/* Allocate arrays on the GPU */
hipMalloc((void**)&gpu_block_x, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_block_y, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_thread, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_warp, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_xthread, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_ythread, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_grid_dimx, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_block_dimx, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_grid_dimy, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_block_dimy, ARRAY_SIZE_IN_BYTES);
for (int kernel = 0; kernel < 6; kernel++)
{
dim3 threads_dim;
dim3 blocks_dim;
switch (kernel)
{
case 0:
{
blocks_dim = blocks_rect;
threads_dim = threads_rect;
/* Execute our kernel */
what_is_my_id_2d_A << <blocks_rect, threads_rect >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
case 1:
{
blocks_dim = blocks_square;
threads_dim = threads_square;
/* Execute our kernel */
what_is_my_id_2d_A << <blocks_square, threads_square >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
case 2:
{
blocks_dim = reverse_blocks_square;
threads_dim = reverse_threads_square;
/* Execute our kernel */
what_is_my_id_2d_A << <reverse_blocks_square, reverse_threads_square >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
case 3:
{
blocks_dim = reverse_blocks_square;
threads_dim = reverse_threads_rect;
/* Execute our kernel */
what_is_my_id_2d_A << <reverse_blocks_square, reverse_threads_rect >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
case 4:
{
blocks_dim = many_blocks_rect;
threads_dim = few_threads_rect;
/* Execute our kernel */
what_is_my_id_2d_A << <many_blocks_rect, few_threads_rect >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
case 5:
{
blocks_dim = many_threads;
threads_dim = one_blocks_rect;
/* Execute our kernel */
what_is_my_id_2d_A << <many_threads, one_blocks_rect >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
default: exit(1); break;
}
/* Copy back the gpu results to the CPU */
hipMemcpy(cpu_block_x, gpu_block_x, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_block_y, gpu_block_y, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_xthread, gpu_xthread, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_ythread, gpu_ythread, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_grid_dimx, gpu_grid_dimx, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_block_dimx, gpu_block_dimx, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_grid_dimy, gpu_grid_dimy, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_block_dimy, gpu_block_dimy, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
printf("\nKernel %d\n", kernel);
printf("Block Dimensions: x= %2d, y=%2d, z=%2d\n", blocks_dim.x, blocks_dim.y, blocks_dim.z);
printf("Thread Dimensions: x= %2d, y=%2d, z=%2d\n", threads_dim.x, threads_dim.y, threads_dim.z);
/* Iterate through the arrays and print */
for (int y = 0; y < ARRAY_SIZE_Y; y++)
{
for (int x = 0; x < ARRAY_SIZE_X; x++)
{
printf("CT: %3u BKX: %2u BKY: %2u TID: %3u YTID: %2u XTID: %2u GDX: %1u BDX: %1u GDY: %1u BDY: %1u\n",
cpu_calc_thread[y][x], cpu_block_x[y][x], cpu_block_y[y][x], cpu_thread[y][x], cpu_ythread[y][x],
cpu_xthread[y][x], cpu_grid_dimx[y][x], cpu_block_dimx[y][x], cpu_grid_dimy[y][x], cpu_block_dimy[y][x]);
}
}
}
/* Free the arrays on the GPU as now we're done with them */
hipFree(gpu_block_x);
hipFree(gpu_block_y);
hipFree(gpu_thread);
hipFree(gpu_warp);
hipFree(gpu_calc_thread);
hipFree(gpu_xthread);
hipFree(gpu_ythread);
hipFree(gpu_grid_dimy);
hipFree(gpu_block_dimy);
}
| dce1d3a6937c9ba4eabeca091351faa7af7eeb7c.cu | #include <stdio.h>
__global__ void what_is_my_id_2d_A(
unsigned int* const block_x,
unsigned int* const block_y,
unsigned int* const thread,
unsigned int* const calc_thread,
unsigned int* const x_thread,
unsigned int* const y_thread,
unsigned int* const grid_dimx,
unsigned int* const block_dimx,
unsigned int* const grid_dimy,
unsigned int* const block_dimy)
{
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
const unsigned int thread_idx = ((gridDim.x * blockDim.x) * idy) + idx;
block_x[thread_idx] = blockIdx.x;
block_y[thread_idx] = blockIdx.y;
thread[thread_idx] = threadIdx.x;
calc_thread[thread_idx] = thread_idx;
x_thread[thread_idx] = idx;
y_thread[thread_idx] = idy;
grid_dimx[thread_idx] = gridDim.x;
block_dimx[thread_idx] = blockDim.x;
grid_dimy[thread_idx] = gridDim.y;
block_dimy[thread_idx] = blockDim.y;
}
#ifndef ARRAY_SIZE_X
#define ARRAY_SIZE_X 32
#endif // !ARRAY_SIZE_X
#ifndef ARRAY_SIZE_Y
#define ARRAY_SIZE_Y 16
#endif // !ARRAY_SIZE_Y
#define ARRAY_SIZE_IN_BYTES ((ARRAY_SIZE_X) * (ARRAY_SIZE_Y) * (sizeof(unsigned int)))
/* Declare statically six arrays of ARRAY_SIZE each */
unsigned int cpu_block_x[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_y[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_warp[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_calc_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_xthread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_ythread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_grid_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_grid_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X];
int main(void)
{
/* Total thread count = 32 * 4 = 128 */
const dim3 threads_rect(32, 4);
const dim3 blocks_rect(1, 4);
/* Total thread count = 16 * 8 = 128 */
const dim3 threads_square(16, 8); /* 16 * 8 */
const dim3 blocks_square(2, 2);
/* Total thread count = 8 * 16 = 128 */
const dim3 reverse_threads_square(8, 16); /* 8 * 16 */
const dim3 reverse_blocks_square(2, 2);
/* Total thread count = 4 * 32 = 128 */
const dim3 reverse_threads_rect(4, 32); /* 4 * 32 */
const dim3 reverse_blocks_rect(4, 1);
/* Total thread count = 4 * 2 = 8 */
const dim3 few_threads_rect(4, 2); /* 4 * 2 */
const dim3 many_blocks_rect(8, 2);
/* Total thread count = 4 * 2 = 8 */
const dim3 many_threads(1, 512); /* 4 * 2 */
const dim3 one_blocks_rect(1, 1);
/* Needed to wait for a character at exit */
char ch;
/* Declare statically six arrays of ARRAY_SIZE each */
unsigned int* gpu_block_x;
unsigned int* gpu_block_y;
unsigned int* gpu_thread;
unsigned int* gpu_warp;
unsigned int* gpu_calc_thread;
unsigned int* gpu_xthread;
unsigned int* gpu_ythread;
unsigned int* gpu_grid_dimx;
unsigned int* gpu_block_dimx;
unsigned int* gpu_grid_dimy;
unsigned int* gpu_block_dimy;
/* Allocate arrays on the GPU */
cudaMalloc((void**)&gpu_block_x, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_block_y, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_thread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_warp, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_xthread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_ythread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_grid_dimx, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_block_dimx, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_grid_dimy, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_block_dimy, ARRAY_SIZE_IN_BYTES);
for (int kernel = 0; kernel < 6; kernel++)
{
dim3 threads_dim;
dim3 blocks_dim;
switch (kernel)
{
case 0:
{
blocks_dim = blocks_rect;
threads_dim = threads_rect;
/* Execute our kernel */
what_is_my_id_2d_A << <blocks_rect, threads_rect >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
case 1:
{
blocks_dim = blocks_square;
threads_dim = threads_square;
/* Execute our kernel */
what_is_my_id_2d_A << <blocks_square, threads_square >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
case 2:
{
blocks_dim = reverse_blocks_square;
threads_dim = reverse_threads_square;
/* Execute our kernel */
what_is_my_id_2d_A << <reverse_blocks_square, reverse_threads_square >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
case 3:
{
blocks_dim = reverse_blocks_square;
threads_dim = reverse_threads_rect;
/* Execute our kernel */
what_is_my_id_2d_A << <reverse_blocks_square, reverse_threads_rect >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
case 4:
{
blocks_dim = many_blocks_rect;
threads_dim = few_threads_rect;
/* Execute our kernel */
what_is_my_id_2d_A << <many_blocks_rect, few_threads_rect >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
case 5:
{
blocks_dim = many_threads;
threads_dim = one_blocks_rect;
/* Execute our kernel */
what_is_my_id_2d_A << <many_threads, one_blocks_rect >> > (gpu_block_x, gpu_block_y,
gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy);
} break;
default: exit(1); break;
}
/* Copy back the gpu results to the CPU */
cudaMemcpy(cpu_block_x, gpu_block_x, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_block_y, gpu_block_y, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_xthread, gpu_xthread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_ythread, gpu_ythread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_grid_dimx, gpu_grid_dimx, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_block_dimx, gpu_block_dimx, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_grid_dimy, gpu_grid_dimy, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_block_dimy, gpu_block_dimy, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
printf("\nKernel %d\n", kernel);
printf("Block Dimensions: x= %2d, y=%2d, z=%2d\n", blocks_dim.x, blocks_dim.y, blocks_dim.z);
printf("Thread Dimensions: x= %2d, y=%2d, z=%2d\n", threads_dim.x, threads_dim.y, threads_dim.z);
/* Iterate through the arrays and print */
for (int y = 0; y < ARRAY_SIZE_Y; y++)
{
for (int x = 0; x < ARRAY_SIZE_X; x++)
{
printf("CT: %3u BKX: %2u BKY: %2u TID: %3u YTID: %2u XTID: %2u GDX: %1u BDX: %1u GDY: %1u BDY: %1u\n",
cpu_calc_thread[y][x], cpu_block_x[y][x], cpu_block_y[y][x], cpu_thread[y][x], cpu_ythread[y][x],
cpu_xthread[y][x], cpu_grid_dimx[y][x], cpu_block_dimx[y][x], cpu_grid_dimy[y][x], cpu_block_dimy[y][x]);
}
}
}
/* Free the arrays on the GPU as now we're done with them */
cudaFree(gpu_block_x);
cudaFree(gpu_block_y);
cudaFree(gpu_thread);
cudaFree(gpu_warp);
cudaFree(gpu_calc_thread);
cudaFree(gpu_xthread);
cudaFree(gpu_ythread);
cudaFree(gpu_grid_dimy);
cudaFree(gpu_block_dimy);
}
|
74daa1c33b7144357ef09f9f014501db58d64bac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This program will demo how to use CUDA to accelerate inner-product
#include <iostream>
#include <cstdlib>
using namespace std;
#define VECNUM 50000
#define VECLEN 1000
#define HALFNUM 25000000
int *inputA, *inputB;
int *devInputA, *devInputB, *devOut;
int *outCPU, *outGPU;
void init()
{
delete outGPU;
int i, j, idx;
inputA = new int[VECNUM * VECLEN];
inputB = new int[VECNUM * VECLEN];
for(i = 0; i < VECNUM; i++){
for(j = 0; j < VECLEN; j++){
idx = i*VECLEN + j;
if(idx<HALFNUM){
if(idx%2==0){ //if(idx=even number) =>set value=1
inputA[idx] =1;
inputB[idx] =1;
}
else{ //if(idx=odd number) =>set value=0
inputA[idx] =0;
inputB[idx] =0;
}
}
else{
inputA[idx] =3;
inputB[idx] =3;
}
}
}
outCPU = new int[VECNUM]();
outGPU = new int[VECNUM]();
}
void initGPU()
{
int inputSize = sizeof(int)*VECNUM*VECLEN;
hipMalloc(&devInputA, inputSize);
hipMalloc(&devInputB, inputSize);
hipMalloc(&devOut, sizeof(int)*VECNUM);
hipMemcpy(devInputA, inputA, inputSize, hipMemcpyHostToDevice);
hipMemcpy(devInputB, inputB, inputSize, hipMemcpyHostToDevice);
}
__global__
void innerProductGPU(int *A, int *B, int *out)
{
int y = blockIdx.x;
int x = threadIdx.x;
__shared__ int tmp[VECLEN];
int idx = y * VECLEN + x;
tmp[x] = A[idx] * B[idx];
__syncthreads();
if(x == 0){
int i, sum = 0;
for(i = 0; i < VECLEN; i++)
sum += tmp[i];
out[y] = sum;
}
}
void innerProductCPU()
{
int i, j, acc, idx;
for(i = 0; i < VECNUM; i++){
acc = 0;
for(j = 0; j < VECLEN; j++){
idx = i*VECLEN + j;
acc += inputA[idx] * inputB[idx];
}
outCPU[i] = acc;
}
}
bool checker(){
int i;
for(i = 0; i < VECNUM; i++){
if(outCPU[i] != outGPU[i]){
cout << "The element: " << i << " is wrong!\n";
cout << "outCPU[" << i << "] = " << outCPU[i] << endl;
cout << "outGPU[" << i << "] = " << outGPU[i] << endl;
return false;
}
}
return true;
}
int timespec_diff_us(timespec& t1, timespec& t2)
{
return (t2.tv_sec - t1.tv_sec) * 1e6 + (t2.tv_nsec - t1.tv_nsec) / 1e3;
}
int main()
{
int outSize = sizeof(int)*VECNUM;
init();
initGPU();
timespec time_begin, time_end;
clock_gettime(CLOCK_REALTIME, &time_begin);
innerProductCPU();
clock_gettime(CLOCK_REALTIME, &time_end);
// cout << "CPU time for executing inner-product = " << timespec_diff_us(time_begin, time_end) / 1000 << "ms" << endl;
// GPU static version
dim3 threadsPerBlock(VECLEN);
dim3 numBlocks(VECNUM);
clock_gettime(CLOCK_REALTIME, &time_begin);
hipLaunchKernelGGL(( innerProductGPU), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, devInputA, devInputB, devOut);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &time_end);
cout << "GPU time for executing static inner-product = " << timespec_diff_us(time_begin, time_end) << "us" << endl;
//data copy from GPU to CPU
hipMemcpy(outGPU, devOut, outSize, hipMemcpyDeviceToHost);
//check
if(checker())
cout << "Congratulations! You pass the check." << endl;
else
cout << "Sorry! Your result is wrong." << endl;
//releas space
hipFree(&devInputA);
hipFree(&devInputB);
hipFree(&devOut);
delete outGPU;
return 0;
}
| 74daa1c33b7144357ef09f9f014501db58d64bac.cu | // This program will demo how to use CUDA to accelerate inner-product
#include <iostream>
#include <cstdlib>
using namespace std;
#define VECNUM 50000
#define VECLEN 1000
#define HALFNUM 25000000
int *inputA, *inputB;
int *devInputA, *devInputB, *devOut;
int *outCPU, *outGPU;
void init()
{
delete outGPU;
int i, j, idx;
inputA = new int[VECNUM * VECLEN];
inputB = new int[VECNUM * VECLEN];
for(i = 0; i < VECNUM; i++){
for(j = 0; j < VECLEN; j++){
idx = i*VECLEN + j;
if(idx<HALFNUM){
if(idx%2==0){ //if(idx=even number) =>set value=1
inputA[idx] =1;
inputB[idx] =1;
}
else{ //if(idx=odd number) =>set value=0
inputA[idx] =0;
inputB[idx] =0;
}
}
else{
inputA[idx] =3;
inputB[idx] =3;
}
}
}
outCPU = new int[VECNUM]();
outGPU = new int[VECNUM]();
}
void initGPU()
{
int inputSize = sizeof(int)*VECNUM*VECLEN;
cudaMalloc(&devInputA, inputSize);
cudaMalloc(&devInputB, inputSize);
cudaMalloc(&devOut, sizeof(int)*VECNUM);
cudaMemcpy(devInputA, inputA, inputSize, cudaMemcpyHostToDevice);
cudaMemcpy(devInputB, inputB, inputSize, cudaMemcpyHostToDevice);
}
__global__
void innerProductGPU(int *A, int *B, int *out)
{
int y = blockIdx.x;
int x = threadIdx.x;
__shared__ int tmp[VECLEN];
int idx = y * VECLEN + x;
tmp[x] = A[idx] * B[idx];
__syncthreads();
if(x == 0){
int i, sum = 0;
for(i = 0; i < VECLEN; i++)
sum += tmp[i];
out[y] = sum;
}
}
void innerProductCPU()
{
int i, j, acc, idx;
for(i = 0; i < VECNUM; i++){
acc = 0;
for(j = 0; j < VECLEN; j++){
idx = i*VECLEN + j;
acc += inputA[idx] * inputB[idx];
}
outCPU[i] = acc;
}
}
bool checker(){
int i;
for(i = 0; i < VECNUM; i++){
if(outCPU[i] != outGPU[i]){
cout << "The element: " << i << " is wrong!\n";
cout << "outCPU[" << i << "] = " << outCPU[i] << endl;
cout << "outGPU[" << i << "] = " << outGPU[i] << endl;
return false;
}
}
return true;
}
int timespec_diff_us(timespec& t1, timespec& t2)
{
return (t2.tv_sec - t1.tv_sec) * 1e6 + (t2.tv_nsec - t1.tv_nsec) / 1e3;
}
int main()
{
int outSize = sizeof(int)*VECNUM;
init();
initGPU();
timespec time_begin, time_end;
clock_gettime(CLOCK_REALTIME, &time_begin);
innerProductCPU();
clock_gettime(CLOCK_REALTIME, &time_end);
// cout << "CPU time for executing inner-product = " << timespec_diff_us(time_begin, time_end) / 1000 << "ms" << endl;
// GPU static version
dim3 threadsPerBlock(VECLEN);
dim3 numBlocks(VECNUM);
clock_gettime(CLOCK_REALTIME, &time_begin);
innerProductGPU<<<numBlocks, threadsPerBlock>>>(devInputA, devInputB, devOut);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &time_end);
cout << "GPU time for executing static inner-product = " << timespec_diff_us(time_begin, time_end) << "us" << endl;
//data copy from GPU to CPU
cudaMemcpy(outGPU, devOut, outSize, cudaMemcpyDeviceToHost);
//check
if(checker())
cout << "Congratulations! You pass the check." << endl;
else
cout << "Sorry! Your result is wrong." << endl;
//releas space
cudaFree(&devInputA);
cudaFree(&devInputB);
cudaFree(&devOut);
delete outGPU;
return 0;
}
|
0067a20e46c17a6c70136018ac16da9c66061ddf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernel( unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
__shared__ float shared[16][16];
// now calculate the value at that position
const float period = 128.0f;
shared[threadIdx.x][threadIdx.y] =
255 * (sinf(x*2.0f*PI/ period) + 1.0f) *
(sinf(y*2.0f*PI/ period) + 1.0f) / 4.0f;
// removing this syncthreads shows graphically what happens
// when it doesn't exist. this is an example of why we need it.
// __syncthreads();
ptr[offset*4 + 0] = 0;
ptr[offset*4 + 1] = shared[15-threadIdx.x][15-threadIdx.y];
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
HANDLE_ERROR( hipMalloc( (void**)&dev_bitmap,
bitmap.image_size() ) );
data.dev_bitmap = dev_bitmap;
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, dev_bitmap );
HANDLE_ERROR( hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipFree( dev_bitmap ) );
bitmap.display_and_exit();
}
| 0067a20e46c17a6c70136018ac16da9c66061ddf.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "cuda.h"
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernel( unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
__shared__ float shared[16][16];
// now calculate the value at that position
const float period = 128.0f;
shared[threadIdx.x][threadIdx.y] =
255 * (sinf(x*2.0f*PI/ period) + 1.0f) *
(sinf(y*2.0f*PI/ period) + 1.0f) / 4.0f;
// removing this syncthreads shows graphically what happens
// when it doesn't exist. this is an example of why we need it.
// __syncthreads();
ptr[offset*4 + 0] = 0;
ptr[offset*4 + 1] = shared[15-threadIdx.x][15-threadIdx.y];
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
HANDLE_ERROR( cudaMalloc( (void**)&dev_bitmap,
bitmap.image_size() ) );
data.dev_bitmap = dev_bitmap;
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
kernel<<<grids,threads>>>( dev_bitmap );
HANDLE_ERROR( cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaFree( dev_bitmap ) );
bitmap.display_and_exit();
}
|
06ae9be4a4da621231d8dbb81f21c601e073e11d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include "Logging.hpp"
#include "tensor/Tensor.hpp"
using namespace DLFS;
using namespace std;
/**
* Declarations
*/
__global__ void
SoftmaxCrossEntropyFloat(TensorInfoCUDA logitShape, float *logits,
TensorInfoCUDA labelShape, uint32_t *labels,
TensorInfoCUDA outputShape, float *output, bool reduce_mean);
__global__ void
SoftmaxCrossEntropyBackwardFloat(TensorInfoCUDA logitShape, float *logits,
TensorInfoCUDA labelShape, uint32_t *labels,
TensorInfoCUDA outputShape, float *output);
/**
* Kernels
*/
extern "C" void
LaunchSoftmaxCEKernel(CustomOpDataType dataType,
TensorShape logitShape, void *logits,
TensorShape labelShape, void *labels,
TensorShape outputShape, void *output, bool reduce_mean) {
int threads = logitShape[0];
TensorInfoCUDA ti[3] = {TensorInfoCUDA(logitShape),
TensorInfoCUDA(labelShape), TensorInfoCUDA(outputShape)};
switch (dataType) {
case CustomOpDataType::Float:
LOG.DEBUG() << "Launching softmax cross entropy (float) kernel with "
<< threads << " threads";
hipLaunchKernelGGL(( SoftmaxCrossEntropyFloat), dim3(1), dim3(threads), threads, 0, ti[0], (float *)logits, ti[1],
(uint32_t *)labels, ti[2],
(float *)output, reduce_mean);
break;
default:
throw std::runtime_error("Not implemented.");
}
}
extern "C" void
LaunchSoftmaxCEBackwardKernel(CustomOpDataType dataType, TensorShape logitShape,
void *logits, TensorShape labelShape,
void *labels, TensorShape outputShape,
void *output) {
int threads = logitShape[0];
TensorInfoCUDA ti[3] = {TensorInfoCUDA(logitShape),
TensorInfoCUDA(labelShape), TensorInfoCUDA(outputShape)};
switch (dataType) {
case CustomOpDataType::Float:
LOG.DEBUG()
<< "Launching softmax cross entropy backward (float) kernel with "
<< threads << " threads";
hipLaunchKernelGGL(( SoftmaxCrossEntropyBackwardFloat), dim3(1), dim3(threads), 0, 0,
ti[0], (float *)logits, ti[1], (uint32_t *)labels, ti[2],
(float *)output);
break;
default:
throw std::runtime_error("Not implemented.");
}
}
/**
* SigmoidCrossEntropyFloat
* Logits - of shape batch_size x 1 x 1 x num_classes
* labels - of shape batch_size x 1 x 1 x 1
*
* Parallelized over the batch dimension.
*/
__global__ void
SoftmaxCrossEntropyFloat(TensorInfoCUDA logitShape, float *logits,
TensorInfoCUDA labelShape, uint32_t *labels,
TensorInfoCUDA outputShape, float *output, bool reduce_mean) {
unsigned int batchIdx = threadIdx.x;
extern __shared__ float sdata[];
// Check to make sure we are not out of bounds.
if (batchIdx > logitShape.n-1)
return;
// float normalization = logitShape.n*logitShape.c;
float exp_sum = 0.0f;
float loss = 0.0f;
for (unsigned int classIdx = 0; classIdx < logitShape.c; classIdx++) {
unsigned int index = batchIdx * logitShape.c + classIdx;
exp_sum += expf(logits[index]);
}
loss = logf(exp_sum)-logits[batchIdx*logitShape.c+labels[batchIdx]];
sdata[batchIdx] = loss;
if(!reduce_mean){
output[batchIdx] = sdata[batchIdx];
return;
}
// Parallel reduction - requires batch to be power of 2
__syncthreads();
for(unsigned int stride = 1; stride < logitShape.n; stride*=2){
if((threadIdx.x %(stride*2))==0){
sdata[threadIdx.x] += sdata[threadIdx.x+stride];
}
__syncthreads(); // Sync must happen at every level of the pyramid;
}
if (threadIdx.x == 0){
output[0] = sdata[0] / static_cast<float>(logitShape.n);
}
}
/**
* SigmoidCrossEntropyBackwardFloat
* Logits - of shape batch_size x 1 x 1 x num_classes
* labels - of shape batch_size x 1 x 1 x 1
*
* Parallelized over the batch dimension.
*/
__global__ void
SoftmaxCrossEntropyBackwardFloat(TensorInfoCUDA logitShape, float *logits,
TensorInfoCUDA labelShape, uint32_t *labels,
TensorInfoCUDA outputShape, float *output) {
unsigned int batchIdx = threadIdx.x;
extern __shared__ float sdata[];
// Check to make sure we are not out of bounds.
if (batchIdx > logitShape.n-1)
return;
// float normalization = logitShape.n*logitShape.c;
float exp_sum = 0.0f;
float loss = 0.0f;
for (unsigned int classIdx = 0; classIdx < logitShape.c; classIdx++) {
unsigned int index = batchIdx * logitShape.c + classIdx;
exp_sum += expf(logits[index]);
}
loss = logf(exp_sum)-logits[batchIdx*logitShape.c+labels[batchIdx]];
output[batchIdx] = loss;
} | 06ae9be4a4da621231d8dbb81f21c601e073e11d.cu | #include <cuda_runtime.h>
#include <iostream>
#include "Logging.hpp"
#include "tensor/Tensor.hpp"
using namespace DLFS;
using namespace std;
/**
* Declarations
*/
__global__ void
SoftmaxCrossEntropyFloat(TensorInfoCUDA logitShape, float *logits,
TensorInfoCUDA labelShape, uint32_t *labels,
TensorInfoCUDA outputShape, float *output, bool reduce_mean);
__global__ void
SoftmaxCrossEntropyBackwardFloat(TensorInfoCUDA logitShape, float *logits,
TensorInfoCUDA labelShape, uint32_t *labels,
TensorInfoCUDA outputShape, float *output);
/**
* Kernels
*/
extern "C" void
LaunchSoftmaxCEKernel(CustomOpDataType dataType,
TensorShape logitShape, void *logits,
TensorShape labelShape, void *labels,
TensorShape outputShape, void *output, bool reduce_mean) {
int threads = logitShape[0];
TensorInfoCUDA ti[3] = {TensorInfoCUDA(logitShape),
TensorInfoCUDA(labelShape), TensorInfoCUDA(outputShape)};
switch (dataType) {
case CustomOpDataType::Float:
LOG.DEBUG() << "Launching softmax cross entropy (float) kernel with "
<< threads << " threads";
SoftmaxCrossEntropyFloat<<<1, threads, threads>>>(ti[0], (float *)logits, ti[1],
(uint32_t *)labels, ti[2],
(float *)output, reduce_mean);
break;
default:
throw std::runtime_error("Not implemented.");
}
}
extern "C" void
LaunchSoftmaxCEBackwardKernel(CustomOpDataType dataType, TensorShape logitShape,
void *logits, TensorShape labelShape,
void *labels, TensorShape outputShape,
void *output) {
int threads = logitShape[0];
TensorInfoCUDA ti[3] = {TensorInfoCUDA(logitShape),
TensorInfoCUDA(labelShape), TensorInfoCUDA(outputShape)};
switch (dataType) {
case CustomOpDataType::Float:
LOG.DEBUG()
<< "Launching softmax cross entropy backward (float) kernel with "
<< threads << " threads";
SoftmaxCrossEntropyBackwardFloat<<<1, threads>>>(
ti[0], (float *)logits, ti[1], (uint32_t *)labels, ti[2],
(float *)output);
break;
default:
throw std::runtime_error("Not implemented.");
}
}
/**
* SigmoidCrossEntropyFloat
* Logits - of shape batch_size x 1 x 1 x num_classes
* labels - of shape batch_size x 1 x 1 x 1
*
* Parallelized over the batch dimension.
*/
__global__ void
SoftmaxCrossEntropyFloat(TensorInfoCUDA logitShape, float *logits,
TensorInfoCUDA labelShape, uint32_t *labels,
TensorInfoCUDA outputShape, float *output, bool reduce_mean) {
unsigned int batchIdx = threadIdx.x;
extern __shared__ float sdata[];
// Check to make sure we are not out of bounds.
if (batchIdx > logitShape.n-1)
return;
// float normalization = logitShape.n*logitShape.c;
float exp_sum = 0.0f;
float loss = 0.0f;
for (unsigned int classIdx = 0; classIdx < logitShape.c; classIdx++) {
unsigned int index = batchIdx * logitShape.c + classIdx;
exp_sum += expf(logits[index]);
}
loss = logf(exp_sum)-logits[batchIdx*logitShape.c+labels[batchIdx]];
sdata[batchIdx] = loss;
if(!reduce_mean){
output[batchIdx] = sdata[batchIdx];
return;
}
// Parallel reduction - requires batch to be power of 2
__syncthreads();
for(unsigned int stride = 1; stride < logitShape.n; stride*=2){
if((threadIdx.x %(stride*2))==0){
sdata[threadIdx.x] += sdata[threadIdx.x+stride];
}
__syncthreads(); // Sync must happen at every level of the pyramid;
}
if (threadIdx.x == 0){
output[0] = sdata[0] / static_cast<float>(logitShape.n);
}
}
/**
* SigmoidCrossEntropyBackwardFloat
* Logits - of shape batch_size x 1 x 1 x num_classes
* labels - of shape batch_size x 1 x 1 x 1
*
* Parallelized over the batch dimension.
*/
__global__ void
SoftmaxCrossEntropyBackwardFloat(TensorInfoCUDA logitShape, float *logits,
TensorInfoCUDA labelShape, uint32_t *labels,
TensorInfoCUDA outputShape, float *output) {
unsigned int batchIdx = threadIdx.x;
extern __shared__ float sdata[];
// Check to make sure we are not out of bounds.
if (batchIdx > logitShape.n-1)
return;
// float normalization = logitShape.n*logitShape.c;
float exp_sum = 0.0f;
float loss = 0.0f;
for (unsigned int classIdx = 0; classIdx < logitShape.c; classIdx++) {
unsigned int index = batchIdx * logitShape.c + classIdx;
exp_sum += expf(logits[index]);
}
loss = logf(exp_sum)-logits[batchIdx*logitShape.c+labels[batchIdx]];
output[batchIdx] = loss;
} |
aed6a59a8f0df200a5fdcd589179b9c3df47936c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
__global__ void Muld(double* A, double* B, int wA, int wB, double* C){
int aEnd = wA * BLOCK_SIZE * blockIdx.y + wA - 1;
double Csub = 0;
for (int a = wA * BLOCK_SIZE * blockIdx.y, b = BLOCK_SIZE * blockIdx.x; a <= aEnd; a += BLOCK_SIZE, b += BLOCK_SIZE * wB) {
__shared__ double As[BLOCK_SIZE][BLOCK_SIZE], Bs[BLOCK_SIZE][BLOCK_SIZE];
As[threadIdx.y][threadIdx.x] = A[a + wA * threadIdx.y + threadIdx.x];
Bs[threadIdx.y][threadIdx.x] = B[b + wB * threadIdx.y + threadIdx.x];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
__syncthreads();
}
C[wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x + wB*threadIdx.y + threadIdx.x] += Csub;
}
| aed6a59a8f0df200a5fdcd589179b9c3df47936c.cu | #include "kernel.h"
__global__ void Muld(double* A, double* B, int wA, int wB, double* C){
int aEnd = wA * BLOCK_SIZE * blockIdx.y + wA - 1;
double Csub = 0;
for (int a = wA * BLOCK_SIZE * blockIdx.y, b = BLOCK_SIZE * blockIdx.x; a <= aEnd; a += BLOCK_SIZE, b += BLOCK_SIZE * wB) {
__shared__ double As[BLOCK_SIZE][BLOCK_SIZE], Bs[BLOCK_SIZE][BLOCK_SIZE];
As[threadIdx.y][threadIdx.x] = A[a + wA * threadIdx.y + threadIdx.x];
Bs[threadIdx.y][threadIdx.x] = B[b + wB * threadIdx.y + threadIdx.x];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
__syncthreads();
}
C[wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x + wB*threadIdx.y + threadIdx.x] += Csub;
}
|
941e5bce6b57047fa97475e29e04ded74fee8965.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <algorithm>
#include <vector>
#include <string>
#include <assert.h>
#include <map>
#include <test.hpp>
#include <dcsrmv.hpp>
#include <csr.hpp>
int main()
{
{
Test t("Init");
size_t m = 10;
size_t n = 10;
size_t nnz = 20;
hCSR<double, int> A2(m, n, nnz);
dCSR<double, int> dA2(m, n, nnz);
test(t, A2.m == m);
test(t, A2.n == n);
test(t, A2.nnz == nnz);
test(t, dA2.m == m);
test(t, dA2.n == n);
test(t, dA2.nnz == nnz);
}
{
Test t("Read/write");
hCSR<double, int> A1(10, 10, 10);
A1.row[0] = 1;
A1.col[0] = 1;
A1.val[0] = 1.0;
write("csr.bin", A1);
write("csr.bin", A1, 1);
hCSR<double, int> A2 = read("csr.bin");
hCSR<double, int> A3 = read("csr.bin", 1);
test(t, A1.row[0] == A2.row[0]);
test(t, A1.col[0] == A2.col[0]);
test(t, A1.val[0] == A2.val[0]);
}
{
Test t("Transfer to host/device");
size_t m = 10;
size_t n = 10;
size_t nnz = 20;
hCSR<double, int> A1(m, n, nnz);
A1.row[0] = 1;
A1.col[0] = 1;
A1.val[0] = 1.0;
dCSR<double, int> dA1 = htod(A1);
hCSR<double, int> A2 = dtoh(dA1);
test(t, A1.row[0] == A2.row[0]);
test(t, A1.col[0] == A2.col[0]);
test(t, A1.val[0] == A2.val[0]);
}
{
Test t("Matrix vector multiplication (double)");
hipsparseHandle_t cusparseH;
cusparseErrCheck(hipsparseCreate(&cusparseH));
hCSR<double, int> A1(1, 10, 1);
A1.row[0] = 0;
A1.row[1] = 1;
A1.col[0] = 0;
A1.val[0] = 1.0;
dCSR<double, int> dA = htod(A1);
hArray<double> x(A1.n);
x[0] = -1.0;
hArray<double> y(A1.m);
dArray<double> dx = htod(x);
dArray<double> dy = htod(y);
mv(cusparseH, dy, dA, dx);
hArray<double> y2 = dtoh(dy);
test(t, y2[0] == x[0]);
cusparseErrCheck(hipsparseDestroy(cusparseH));
}
{
Test t("Matrix vector multiplication (float)");
hipsparseHandle_t cusparseH;
cusparseErrCheck(hipsparseCreate(&cusparseH));
hCSR<float, int> A1(1, 10, 1);
A1.row[0] = 0;
A1.row[1] = 1;
A1.col[0] = 0;
A1.val[0] = 1.0;
dCSR<float, int> dA = htod(A1);
hArray<float> x(A1.n);
x[0] = -1.0;
hArray<float> y(A1.m);
dArray<float> dx = htod(x);
dArray<float> dy = htod(y);
mv(cusparseH, dy, dA, dx);
hArray<float> y2 = dtoh(dy);
test(t, y2[0] == x[0]);
cusparseErrCheck(hipsparseDestroy(cusparseH));
}
return test_status();
}
| 941e5bce6b57047fa97475e29e04ded74fee8965.cu | #include <iostream>
#include <fstream>
#include <algorithm>
#include <vector>
#include <string>
#include <assert.h>
#include <map>
#include <test.hpp>
#include <dcsrmv.hpp>
#include <csr.hpp>
int main()
{
{
Test t("Init");
size_t m = 10;
size_t n = 10;
size_t nnz = 20;
hCSR<double, int> A2(m, n, nnz);
dCSR<double, int> dA2(m, n, nnz);
test(t, A2.m == m);
test(t, A2.n == n);
test(t, A2.nnz == nnz);
test(t, dA2.m == m);
test(t, dA2.n == n);
test(t, dA2.nnz == nnz);
}
{
Test t("Read/write");
hCSR<double, int> A1(10, 10, 10);
A1.row[0] = 1;
A1.col[0] = 1;
A1.val[0] = 1.0;
write("csr.bin", A1);
write("csr.bin", A1, 1);
hCSR<double, int> A2 = read("csr.bin");
hCSR<double, int> A3 = read("csr.bin", 1);
test(t, A1.row[0] == A2.row[0]);
test(t, A1.col[0] == A2.col[0]);
test(t, A1.val[0] == A2.val[0]);
}
{
Test t("Transfer to host/device");
size_t m = 10;
size_t n = 10;
size_t nnz = 20;
hCSR<double, int> A1(m, n, nnz);
A1.row[0] = 1;
A1.col[0] = 1;
A1.val[0] = 1.0;
dCSR<double, int> dA1 = htod(A1);
hCSR<double, int> A2 = dtoh(dA1);
test(t, A1.row[0] == A2.row[0]);
test(t, A1.col[0] == A2.col[0]);
test(t, A1.val[0] == A2.val[0]);
}
{
Test t("Matrix vector multiplication (double)");
cusparseHandle_t cusparseH;
cusparseErrCheck(cusparseCreate(&cusparseH));
hCSR<double, int> A1(1, 10, 1);
A1.row[0] = 0;
A1.row[1] = 1;
A1.col[0] = 0;
A1.val[0] = 1.0;
dCSR<double, int> dA = htod(A1);
hArray<double> x(A1.n);
x[0] = -1.0;
hArray<double> y(A1.m);
dArray<double> dx = htod(x);
dArray<double> dy = htod(y);
mv(cusparseH, dy, dA, dx);
hArray<double> y2 = dtoh(dy);
test(t, y2[0] == x[0]);
cusparseErrCheck(cusparseDestroy(cusparseH));
}
{
Test t("Matrix vector multiplication (float)");
cusparseHandle_t cusparseH;
cusparseErrCheck(cusparseCreate(&cusparseH));
hCSR<float, int> A1(1, 10, 1);
A1.row[0] = 0;
A1.row[1] = 1;
A1.col[0] = 0;
A1.val[0] = 1.0;
dCSR<float, int> dA = htod(A1);
hArray<float> x(A1.n);
x[0] = -1.0;
hArray<float> y(A1.m);
dArray<float> dx = htod(x);
dArray<float> dy = htod(y);
mv(cusparseH, dy, dA, dx);
hArray<float> y2 = dtoh(dy);
test(t, y2[0] == x[0]);
cusparseErrCheck(cusparseDestroy(cusparseH));
}
return test_status();
}
|
9345294b844330d7aa26c4cebb609750f143ab28.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <map>
#include <set>
#include <vector>
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define MAX_THREADS_PER_BLOCK 1024
void safe_call(hipError_t ret, int line)
{
if(ret!=hipSuccess)
{
printf("Error at line %d : %s\n",line,hipGetErrorString(ret));
exit(-1);
}
}
typedef struct __graph
{
int E;
int *from;
int *to;
} graph_t;
__device__ bool d_over;
__global__ void reset()
{
d_over = false;
}
// Print the graph
/*__global__ void temp_kernel(graph_t * graph)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id == 0)
{
int j;
for(j=0; j<graph->adj_prefix_sum[graph->V-1]; j++)
printf("%d ",graph->adj[j]);
printf("\n");
}
}*/
__global__ void init(int * vertices, int starting_vertex, int num_vertices)
{
int v = blockDim.x*blockIdx.x + threadIdx.x;
if (v==starting_vertex)
vertices[v] = 0;
else if(v < num_vertices)
vertices[v] = -1;
}
__global__ void bfs(const graph_t * graph, int * vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id < graph->E)
{
int f = graph->from[id];
if(vertices[f] == current_depth)
{
int e = graph->to[id];
if(vertices[e] == -1)
{
vertices[e] = current_depth+1;
d_over = true;
}
}
}
}
int main(int argc, char * argv[])
{
static char * filename;
if(argc>2)
{
printf("./a.out <filename>\n");
exit(-1);
}
else if(argc==2)
{
filename = argv[1];
}
else
{
filename = "../data/input.txt";
}
FILE * fp = fopen(filename,"r");
if(!fp)
{
printf("Error reading file.\n");
exit(-1);
}
/* Set cuda device to K40 */
CUDA_SAFE_CALL(hipSetDevice(0));
/* Get graph from file into CPU memory */
int num_vertices, num_edges, i, j;
fscanf(fp,"%d %d",&num_vertices,&num_edges);
graph_t *graph_host;
CUDA_SAFE_CALL(hipMallocManaged((void **)&graph_host, sizeof(graph_t)));
graph_host->E = num_edges;
CUDA_SAFE_CALL(hipMallocManaged((void **)&(graph_host->from), num_edges*sizeof(int)));
CUDA_SAFE_CALL(hipMallocManaged((void **)&(graph_host->to), num_edges*sizeof(int *)));
set<int> vertices;
vector< pair<int,int> > edges;
int s,d;
for(i=0; i<num_edges; i++)
{
fscanf(fp,"%d",&s);
fscanf(fp,"%d",&d);
vertices.insert(s);
vertices.insert(d);
edges.push_back(make_pair(s,d));
}
sort(edges.begin(),edges.end());
i=0;
//int l=0,r=0;
//set<int>::iterator fe=vertices.begin();
//set<int>::iterator se=vertices.begin();
for(vector< pair<int,int> >::iterator it = edges.begin() ; it != edges.end(); ++it)
{
/* while((*fe)!=(*it).first && fe!=vertices.end())
{
l++;
se = vertices.begin();
r=0;
}
while((*se)!=(*it).second && se!=vertices.end())
{
r++;
}
*/
int l = distance(vertices.begin(),vertices.find((*it).first)); // C++ set stores in sorted order by default
int r = distance(vertices.begin(),vertices.find((*it).second));
graph_host->from[i]=l;
graph_host->to[i]=r;
i++;
}
/*****************************************************
XXX: GPU does not know the size of each adjacency list.
For that, a new struct containing size of list and list
has to be created and passed to GPU memory. Too much hassle.
OR
Create 1-D array in the graph itself which contains the
size of each list.
*****************************************************/
//temp_kernel<<<1,1>>>(graph_device);
int num_of_blocks = 1;
int num_of_threads_per_block = num_edges;
if(num_edges > MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(num_edges/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
int * vertices_host;
CUDA_SAFE_CALL(hipMallocManaged((void **)&vertices_host, num_vertices*sizeof(int)));
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
hipEvent_t start,end;
float diff;
double time = 0;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&end));
hipLaunchKernelGGL(( init), dim3(grid),dim3(threads), 0, 0, vertices_host, 0, num_vertices);
bool stop;
int k=0;
do
{
stop = false;
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_over, &stop, sizeof(bool),0, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipEventRecord(start,0));
hipLaunchKernelGGL(( bfs), dim3(grid), dim3(threads), 0, 0, graph_host, vertices_host, k);
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipEventRecord(end,0));
CUDA_SAFE_CALL(hipEventSynchronize(end));
CUDA_SAFE_CALL(hipEventElapsedTime(&diff, start, end));
time += diff*1.0e-3;
CUDA_SAFE_CALL(hipMemcpyFromSymbol(&stop, d_over, sizeof(bool),0, hipMemcpyDeviceToHost));
k++;
}while(stop);
printf("Number of iterations : %d\n",k);
for(int i = 0; i < num_vertices; i++)
{
printf("Vertex %d Distance %d\n",i,vertices_host[i]);
}
printf("Time: %f ms\n",time);
CUDA_SAFE_CALL(hipFree(vertices_host));
CUDA_SAFE_CALL(hipFree(graph_host->from));
CUDA_SAFE_CALL(hipFree(graph_host->to));
CUDA_SAFE_CALL(hipFree(graph_host));
CUDA_SAFE_CALL(hipEventDestroy(start));
CUDA_SAFE_CALL(hipEventDestroy(end));
return 0;
}
| 9345294b844330d7aa26c4cebb609750f143ab28.cu | #include <iostream>
#include <map>
#include <set>
#include <vector>
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define MAX_THREADS_PER_BLOCK 1024
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
printf("Error at line %d : %s\n",line,cudaGetErrorString(ret));
exit(-1);
}
}
typedef struct __graph
{
int E;
int *from;
int *to;
} graph_t;
__device__ bool d_over;
__global__ void reset()
{
d_over = false;
}
// Print the graph
/*__global__ void temp_kernel(graph_t * graph)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id == 0)
{
int j;
for(j=0; j<graph->adj_prefix_sum[graph->V-1]; j++)
printf("%d ",graph->adj[j]);
printf("\n");
}
}*/
__global__ void init(int * vertices, int starting_vertex, int num_vertices)
{
int v = blockDim.x*blockIdx.x + threadIdx.x;
if (v==starting_vertex)
vertices[v] = 0;
else if(v < num_vertices)
vertices[v] = -1;
}
__global__ void bfs(const graph_t * graph, int * vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id < graph->E)
{
int f = graph->from[id];
if(vertices[f] == current_depth)
{
int e = graph->to[id];
if(vertices[e] == -1)
{
vertices[e] = current_depth+1;
d_over = true;
}
}
}
}
int main(int argc, char * argv[])
{
static char * filename;
if(argc>2)
{
printf("./a.out <filename>\n");
exit(-1);
}
else if(argc==2)
{
filename = argv[1];
}
else
{
filename = "../data/input.txt";
}
FILE * fp = fopen(filename,"r");
if(!fp)
{
printf("Error reading file.\n");
exit(-1);
}
/* Set cuda device to K40 */
CUDA_SAFE_CALL(cudaSetDevice(0));
/* Get graph from file into CPU memory */
int num_vertices, num_edges, i, j;
fscanf(fp,"%d %d",&num_vertices,&num_edges);
graph_t *graph_host;
CUDA_SAFE_CALL(cudaMallocManaged((void **)&graph_host, sizeof(graph_t)));
graph_host->E = num_edges;
CUDA_SAFE_CALL(cudaMallocManaged((void **)&(graph_host->from), num_edges*sizeof(int)));
CUDA_SAFE_CALL(cudaMallocManaged((void **)&(graph_host->to), num_edges*sizeof(int *)));
set<int> vertices;
vector< pair<int,int> > edges;
int s,d;
for(i=0; i<num_edges; i++)
{
fscanf(fp,"%d",&s);
fscanf(fp,"%d",&d);
vertices.insert(s);
vertices.insert(d);
edges.push_back(make_pair(s,d));
}
sort(edges.begin(),edges.end());
i=0;
//int l=0,r=0;
//set<int>::iterator fe=vertices.begin();
//set<int>::iterator se=vertices.begin();
for(vector< pair<int,int> >::iterator it = edges.begin() ; it != edges.end(); ++it)
{
/* while((*fe)!=(*it).first && fe!=vertices.end())
{
l++;
se = vertices.begin();
r=0;
}
while((*se)!=(*it).second && se!=vertices.end())
{
r++;
}
*/
int l = distance(vertices.begin(),vertices.find((*it).first)); // C++ set stores in sorted order by default
int r = distance(vertices.begin(),vertices.find((*it).second));
graph_host->from[i]=l;
graph_host->to[i]=r;
i++;
}
/*****************************************************
XXX: GPU does not know the size of each adjacency list.
For that, a new struct containing size of list and list
has to be created and passed to GPU memory. Too much hassle.
OR
Create 1-D array in the graph itself which contains the
size of each list.
*****************************************************/
//temp_kernel<<<1,1>>>(graph_device);
int num_of_blocks = 1;
int num_of_threads_per_block = num_edges;
if(num_edges > MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(num_edges/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
int * vertices_host;
CUDA_SAFE_CALL(cudaMallocManaged((void **)&vertices_host, num_vertices*sizeof(int)));
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
cudaEvent_t start,end;
float diff;
double time = 0;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&end));
init<<<grid,threads>>> (vertices_host, 0, num_vertices);
bool stop;
int k=0;
do
{
stop = false;
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_over, &stop, sizeof(bool),0, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
CUDA_SAFE_CALL(cudaEventRecord(start,0));
bfs<<<grid, threads>>> (graph_host, vertices_host, k);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
CUDA_SAFE_CALL(cudaEventRecord(end,0));
CUDA_SAFE_CALL(cudaEventSynchronize(end));
CUDA_SAFE_CALL(cudaEventElapsedTime(&diff, start, end));
time += diff*1.0e-3;
CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&stop, d_over, sizeof(bool),0, cudaMemcpyDeviceToHost));
k++;
}while(stop);
printf("Number of iterations : %d\n",k);
for(int i = 0; i < num_vertices; i++)
{
printf("Vertex %d Distance %d\n",i,vertices_host[i]);
}
printf("Time: %f ms\n",time);
CUDA_SAFE_CALL(cudaFree(vertices_host));
CUDA_SAFE_CALL(cudaFree(graph_host->from));
CUDA_SAFE_CALL(cudaFree(graph_host->to));
CUDA_SAFE_CALL(cudaFree(graph_host));
CUDA_SAFE_CALL(cudaEventDestroy(start));
CUDA_SAFE_CALL(cudaEventDestroy(end));
return 0;
}
|
fd987758327286c19bb362671116e77f151e0a5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2014, Julian Straub <[email protected]>
* Licensed under the MIT license. See the license file LICENSE.
*/
#include <stdint.h>
#include <stdio.h>
#include <nvidia/helper_cuda.h>
#define PI 3.141592653589793f
#define BLOCK_SIZE 256
// step size of the normals
// for PointXYZI
#define X_STEP 8
#define X_OFFSET 0
// for PointXYZ
//#define X_STEP 4
//#define X_OFFSET 0
__constant__ float c_rgbForMFaxes[7];
extern "C" void loadRGBvaluesForMFaxes()
{
float h_rgbForMFaxes[7];
for (uint32_t k=0; k<7; ++k)
{
union{
uint8_t asByte[4];
uint32_t asInt;
float asFloat;
} rgb;
rgb.asInt = 0;
if(k ==0)
rgb.asInt = (255 << 16) | (0 << 8) | 0;
else if (k ==1)
rgb.asInt = (255 << 16) | (100 << 8) | 100;
else if (k ==2)
rgb.asInt = (0 << 16) | (255 << 8) | 0;
else if (k ==3)
rgb.asInt = (100 << 16) | (255 << 8) | 100;
else if (k ==4)
rgb.asInt = (0 << 16) | (0 << 8) | 255;
else if (k ==5)
rgb.asInt = (100 << 16) | (100 << 8) | 255;
else if (k ==6)
rgb.asInt = (200 << 16) | (200 << 8) | 200;
h_rgbForMFaxes[k] = rgb.asFloat; //*(float *)(&rgb);
}
hipMemcpyToSymbol(c_rgbForMFaxes, h_rgbForMFaxes , 7* sizeof(float));
}
/*
* Given assignments z of normals x to MF axes compute the costfunction value
*/
__global__ void robustSquaredAngleCostFct(float *cost, float *x,
unsigned short *z, float *mu, float sigma_sq, int w, int h) //, float *dbg)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
//const int tid = threadIdx.x;
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
// xi[tid*3] = x[tid];
// xi[tid*3+1] = x[tid+Nx];
// xi[tid*3+2] = x[tid+Nx*2];
unsigned short k = z[id];
if (k<6)
{
// k==6 means that xi is nan
float xiTy = x[id*X_STEP+X_OFFSET]*mui[k] + x[id*X_STEP+X_OFFSET+1]*mui[k+6]
+ x[id*X_STEP+X_OFFSET+2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
//float errSq = err*err;
rho[tid] = (err*err)/(err*err+sigma_sq);
}
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
rho[tid] += rho[tid + s];
__syncthreads();
}
if(tid==0 && rho[0]!=0 ) {
atomicAdd(&cost[0],rho[0]);
}
}
//#ifndef WEIGHTED
//extern "C" void robustSquaredAngleCostFctGPU(float *h_cost, float *d_cost,
// float *d_x, uint16_t *d_z, float *d_mu, float sigma_sq, int w, int h)
//{
//
//// float *d_dbg;
//// checkCudaErrors(hipMalloc((void **)&d_dbg, w * h * sizeof(float)));
//
// for(uint32_t k=0; k<6; ++k)
// h_cost[k] =0.0f;
// checkCudaErrors(hipMemcpy(d_cost, h_cost, 6* sizeof(float),
// hipMemcpyHostToDevice));
//
// dim3 threads(16,16,1);
// dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
// robustSquaredAngleCostFct<<<blocks,threads>>>(d_cost,d_x,d_z,d_mu,
// sigma_sq,w,h);//,d_dbg);
// checkCudaErrors(hipDeviceSynchronize());
//
// checkCudaErrors(hipMemcpy(h_cost, d_cost, 6*sizeof(float),
// hipMemcpyDeviceToHost));
//
//// float dbg[w*h];
//// checkCudaErrors(hipMemcpy(dbg, d_dbg, w*h* sizeof(float),
//// hipMemcpyDeviceToHost));
//// printf("%.2f, %.2f, %.2f, %.2f, %.2f, %.2f \n",dbg[0],dbg[1],dbg[2],dbg[3],dbg[4],dbg[5]);
//}
//#endif
//
/*
* compute the Jacobian of robust squared cost function
*/
__global__ void robustSquaredAngleCostFctJacobian(float *J, float *x,
unsigned short *z, float *mu, float sigma_sq, int w, int h)//, float *dbg)
{
const int DIM = 3;
__shared__ float mui[DIM*6];
// one J per column; BLOCK_SIZE columns; per column first 3 first col of J,
// second 3 columns second cols of J
__shared__ float J_shared[BLOCK_SIZE*3*3];
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
#pragma unroll
for(int s=0; s<3*3; ++s) {
J_shared[tid+BLOCK_SIZE*s] = 0.0f;
}
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
unsigned short k = z[id]; // which MF axis does it belong to
if (k<6)// && k!=4 && k!=5)
{
int j = k/2; // which of the rotation columns does this belong to
float sign = (- float(k%2) +0.5f)*2.0f; // sign of the axis
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6]
+ xi[2]*mui[k+12];
xiTy = max(-1.0f,min(1.0f,xiTy));
float J_ =0.0f;
if (xiTy > 1.0f-1e-10)
{
// limit according to mathematica
J_ = -2.0f/sigma_sq;
}else{
float err = acosf(xiTy);
float err_sq = err*err;
float a = sqrtf(1.0f - xiTy*xiTy);
float b = (sigma_sq + err_sq);
// obtained using Mathematica
J_ = 2.0f*( (err*err_sq/(a*b*b)) - (err/(a*b)) );
// TODO could be simplified: see writeup!
}
//dbg[id] = J_;
J_shared[tid+(j*3+0)*BLOCK_SIZE] = sign*J_*xi[0];
J_shared[tid+(j*3+1)*BLOCK_SIZE] = sign*J_*xi[1];
J_shared[tid+(j*3+2)*BLOCK_SIZE] = sign*J_*xi[2];
}else{
//dbg[id] = 9999.0f;
}
}
//reduction.....
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
#pragma unroll
for( int k=0; k<3*3; ++k) {
int tidk = k*BLOCK_SIZE+tid;
J_shared[tidk] += J_shared[tidk + s];
}
__syncthreads();
}
#pragma unroll
for( int k=0; k<3*3; ++k) {
if(tid==k && J_shared[k*BLOCK_SIZE]!=0 ) {
atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
}
}
// //reduction.....
//#pragma unroll
// for( int k=0; k<3*3; ++k) {
// int tidk = k*BLOCK_SIZE+tid;
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// J_shared[tidk] += J_shared[tidk + s];
// __syncthreads();
// }
//
// if(tid==0 && J_shared[k*BLOCK_SIZE]!=0 ) {
// atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
// }
// }
}
//#ifndef WEIGHTED
//extern "C" void robustSquaredAngleCostFctJacobianGPU(float *h_J, float *d_J,
// float *d_x, unsigned short *d_z, float *d_mu, float sigma_sq, int w, int h)
//{
//// float *d_dbg;
//// checkCudaErrors(hipMalloc((void **)&d_dbg, w * h * sizeof(float)));
//
// for(uint32_t k=0; k<3*3; ++k)
// h_J[k] =0.0f;
// checkCudaErrors(hipMemcpy(d_J, h_J, 3*3* sizeof(float),
// hipMemcpyHostToDevice));
//
// dim3 threads(16,16,1);
// dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
// robustSquaredAngleCostFctJacobian<<<blocks,threads>>>(d_J,d_x,d_z,d_mu,
// sigma_sq,w,h);//,d_dbg);
// checkCudaErrors(hipDeviceSynchronize());
//
// checkCudaErrors(hipMemcpy(h_J, d_J, 3*3*sizeof(float),
// hipMemcpyDeviceToHost));
//
//// float dbg[w*h];
//// checkCudaErrors(hipMemcpy(dbg, d_dbg, w*h* sizeof(float),
//// hipMemcpyDeviceToHost));
//// for (int i=20; i<h-20; ++i)
//// {
//// int offset = w*i + w/2;
//// printf("%.2f, %.2f, %.2f, %.2f, %.2f, %.2f \n",dbg[offset+0],dbg[offset+1],dbg[offset+2],dbg[offset+3],dbg[offset+4],dbg[offset+5]);
//// }
//}
//#endif
/*
* compute normal assignments as well as the costfunction value under that
* assignment. Normal assignments are computed according based on nearest
* distance in the arclength sense.
*/
__global__ void robustSquaredAngleCostFctAssignment(float *cost, uint32_t* N,
float *x, unsigned short *z, float* errs, float *mu, float sigma_sq,
int w, int h)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
__shared__ uint32_t Ni[BLOCK_SIZE];
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
Ni[tid] = 0;
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
float err_min = 9999999.0f;
unsigned short k_min = 6;
if((xi[0]!=xi[0] || xi[1]!=xi[1] || xi[2]!=xi[2])
|| xi[0]*xi[0]+xi[1]*xi[1]+xi[2]*xi[2] < 0.9f )
{
// if nan
k_min = 6;
err_min = .1f;
//if(X_STEP == 8) x[id*X_STEP+4] = 6.0f;
}else{
#pragma unroll
for (unsigned short k=0; k<6; ++k)
{
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6] + xi[2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
if(err_min > err)
{
err_min = err;
k_min = k;
}
}
rho[tid] = (err_min*err_min)/(err_min*err_min+sigma_sq);
Ni[tid] = 1;
}
z[id] = k_min;
errs[id] = err_min;
if(X_STEP == 8)
{
x[id*X_STEP+X_OFFSET+4] = c_rgbForMFaxes[k_min];//float(k_min);
x[id*X_STEP+X_OFFSET+5] = float(k_min);//xi[0]; //float(k_min);
x[id*X_STEP+X_OFFSET+6] = err_min; //rgb;//xi[1]; //err_min;
// x[id*X_STEP+X_OFFSET+7] = 0.0f;//err_min; //err_min;
}
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
{
rho[tid] += rho[tid + s];
Ni[tid] += Ni[tid + s];
}
__syncthreads();
}
if(tid==0 && rho[0]!=0.0f) {
atomicAdd(&cost[0],rho[0]);
}
if(tid==1 && Ni[0]!=0 ) {
atomicAdd(N,Ni[0]);
}
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// Ni[tid] += Ni[tid + s];
// __syncthreads();
// }
//
// if(tid==0 && Ni[0]!=0 ) {
// atomicAdd(N,Ni[0]);
// }
}
//#ifndef WEIGHTED
//extern "C" void robustSquaredAngleCostFctAssignmentGPU(float *h_cost, float *d_cost,
// int *h_N, int *d_N, float *d_x, uint16_t *d_z, float *d_mu,
// float sigma_sq, int w, int h)
//{
//
// for(uint32_t k=0; k<6; ++k)
// h_cost[k] =0.0f;
// *h_N =0;
// checkCudaErrors(hipMemcpy(d_cost, h_cost, 6* sizeof(float),
// hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(d_N, h_N, sizeof(int),
// hipMemcpyHostToDevice));
//
// dim3 threads(16,16,1);
// dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
// robustSquaredAngleCostFctAssignment<<<blocks,threads>>>(d_cost,d_N,d_x,d_z,d_mu,
// sigma_sq,w,h);
// checkCudaErrors(hipDeviceSynchronize());
//
// checkCudaErrors(hipMemcpy(h_cost, d_cost, 6*sizeof(float),
// hipMemcpyDeviceToHost));
// checkCudaErrors(hipMemcpy(h_N, d_N, sizeof(int),
// hipMemcpyDeviceToHost));
//
//}
//#endif
#include "optimizationSO3_weighted.cu"
| fd987758327286c19bb362671116e77f151e0a5e.cu | /* Copyright (c) 2014, Julian Straub <[email protected]>
* Licensed under the MIT license. See the license file LICENSE.
*/
#include <stdint.h>
#include <stdio.h>
#include <nvidia/helper_cuda.h>
#define PI 3.141592653589793f
#define BLOCK_SIZE 256
// step size of the normals
// for PointXYZI
#define X_STEP 8
#define X_OFFSET 0
// for PointXYZ
//#define X_STEP 4
//#define X_OFFSET 0
__constant__ float c_rgbForMFaxes[7];
extern "C" void loadRGBvaluesForMFaxes()
{
float h_rgbForMFaxes[7];
for (uint32_t k=0; k<7; ++k)
{
union{
uint8_t asByte[4];
uint32_t asInt;
float asFloat;
} rgb;
rgb.asInt = 0;
if(k ==0)
rgb.asInt = (255 << 16) | (0 << 8) | 0;
else if (k ==1)
rgb.asInt = (255 << 16) | (100 << 8) | 100;
else if (k ==2)
rgb.asInt = (0 << 16) | (255 << 8) | 0;
else if (k ==3)
rgb.asInt = (100 << 16) | (255 << 8) | 100;
else if (k ==4)
rgb.asInt = (0 << 16) | (0 << 8) | 255;
else if (k ==5)
rgb.asInt = (100 << 16) | (100 << 8) | 255;
else if (k ==6)
rgb.asInt = (200 << 16) | (200 << 8) | 200;
h_rgbForMFaxes[k] = rgb.asFloat; //*(float *)(&rgb);
}
cudaMemcpyToSymbol(c_rgbForMFaxes, h_rgbForMFaxes , 7* sizeof(float));
}
/*
* Given assignments z of normals x to MF axes compute the costfunction value
*/
__global__ void robustSquaredAngleCostFct(float *cost, float *x,
unsigned short *z, float *mu, float sigma_sq, int w, int h) //, float *dbg)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
//const int tid = threadIdx.x;
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
// xi[tid*3] = x[tid];
// xi[tid*3+1] = x[tid+Nx];
// xi[tid*3+2] = x[tid+Nx*2];
unsigned short k = z[id];
if (k<6)
{
// k==6 means that xi is nan
float xiTy = x[id*X_STEP+X_OFFSET]*mui[k] + x[id*X_STEP+X_OFFSET+1]*mui[k+6]
+ x[id*X_STEP+X_OFFSET+2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
//float errSq = err*err;
rho[tid] = (err*err)/(err*err+sigma_sq);
}
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
rho[tid] += rho[tid + s];
__syncthreads();
}
if(tid==0 && rho[0]!=0 ) {
atomicAdd(&cost[0],rho[0]);
}
}
//#ifndef WEIGHTED
//extern "C" void robustSquaredAngleCostFctGPU(float *h_cost, float *d_cost,
// float *d_x, uint16_t *d_z, float *d_mu, float sigma_sq, int w, int h)
//{
//
//// float *d_dbg;
//// checkCudaErrors(cudaMalloc((void **)&d_dbg, w * h * sizeof(float)));
//
// for(uint32_t k=0; k<6; ++k)
// h_cost[k] =0.0f;
// checkCudaErrors(cudaMemcpy(d_cost, h_cost, 6* sizeof(float),
// cudaMemcpyHostToDevice));
//
// dim3 threads(16,16,1);
// dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
// robustSquaredAngleCostFct<<<blocks,threads>>>(d_cost,d_x,d_z,d_mu,
// sigma_sq,w,h);//,d_dbg);
// checkCudaErrors(cudaDeviceSynchronize());
//
// checkCudaErrors(cudaMemcpy(h_cost, d_cost, 6*sizeof(float),
// cudaMemcpyDeviceToHost));
//
//// float dbg[w*h];
//// checkCudaErrors(cudaMemcpy(dbg, d_dbg, w*h* sizeof(float),
//// cudaMemcpyDeviceToHost));
//// printf("%.2f, %.2f, %.2f, %.2f, %.2f, %.2f \n",dbg[0],dbg[1],dbg[2],dbg[3],dbg[4],dbg[5]);
//}
//#endif
//
/*
* compute the Jacobian of robust squared cost function
*/
__global__ void robustSquaredAngleCostFctJacobian(float *J, float *x,
unsigned short *z, float *mu, float sigma_sq, int w, int h)//, float *dbg)
{
const int DIM = 3;
__shared__ float mui[DIM*6];
// one J per column; BLOCK_SIZE columns; per column first 3 first col of J,
// second 3 columns second cols of J
__shared__ float J_shared[BLOCK_SIZE*3*3];
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
#pragma unroll
for(int s=0; s<3*3; ++s) {
J_shared[tid+BLOCK_SIZE*s] = 0.0f;
}
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
unsigned short k = z[id]; // which MF axis does it belong to
if (k<6)// && k!=4 && k!=5)
{
int j = k/2; // which of the rotation columns does this belong to
float sign = (- float(k%2) +0.5f)*2.0f; // sign of the axis
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6]
+ xi[2]*mui[k+12];
xiTy = max(-1.0f,min(1.0f,xiTy));
float J_ =0.0f;
if (xiTy > 1.0f-1e-10)
{
// limit according to mathematica
J_ = -2.0f/sigma_sq;
}else{
float err = acosf(xiTy);
float err_sq = err*err;
float a = sqrtf(1.0f - xiTy*xiTy);
float b = (sigma_sq + err_sq);
// obtained using Mathematica
J_ = 2.0f*( (err*err_sq/(a*b*b)) - (err/(a*b)) );
// TODO could be simplified: see writeup!
}
//dbg[id] = J_;
J_shared[tid+(j*3+0)*BLOCK_SIZE] = sign*J_*xi[0];
J_shared[tid+(j*3+1)*BLOCK_SIZE] = sign*J_*xi[1];
J_shared[tid+(j*3+2)*BLOCK_SIZE] = sign*J_*xi[2];
}else{
//dbg[id] = 9999.0f;
}
}
//reduction.....
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
#pragma unroll
for( int k=0; k<3*3; ++k) {
int tidk = k*BLOCK_SIZE+tid;
J_shared[tidk] += J_shared[tidk + s];
}
__syncthreads();
}
#pragma unroll
for( int k=0; k<3*3; ++k) {
if(tid==k && J_shared[k*BLOCK_SIZE]!=0 ) {
atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
}
}
// //reduction.....
//#pragma unroll
// for( int k=0; k<3*3; ++k) {
// int tidk = k*BLOCK_SIZE+tid;
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// J_shared[tidk] += J_shared[tidk + s];
// __syncthreads();
// }
//
// if(tid==0 && J_shared[k*BLOCK_SIZE]!=0 ) {
// atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
// }
// }
}
//#ifndef WEIGHTED
//extern "C" void robustSquaredAngleCostFctJacobianGPU(float *h_J, float *d_J,
// float *d_x, unsigned short *d_z, float *d_mu, float sigma_sq, int w, int h)
//{
//// float *d_dbg;
//// checkCudaErrors(cudaMalloc((void **)&d_dbg, w * h * sizeof(float)));
//
// for(uint32_t k=0; k<3*3; ++k)
// h_J[k] =0.0f;
// checkCudaErrors(cudaMemcpy(d_J, h_J, 3*3* sizeof(float),
// cudaMemcpyHostToDevice));
//
// dim3 threads(16,16,1);
// dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
// robustSquaredAngleCostFctJacobian<<<blocks,threads>>>(d_J,d_x,d_z,d_mu,
// sigma_sq,w,h);//,d_dbg);
// checkCudaErrors(cudaDeviceSynchronize());
//
// checkCudaErrors(cudaMemcpy(h_J, d_J, 3*3*sizeof(float),
// cudaMemcpyDeviceToHost));
//
//// float dbg[w*h];
//// checkCudaErrors(cudaMemcpy(dbg, d_dbg, w*h* sizeof(float),
//// cudaMemcpyDeviceToHost));
//// for (int i=20; i<h-20; ++i)
//// {
//// int offset = w*i + w/2;
//// printf("%.2f, %.2f, %.2f, %.2f, %.2f, %.2f \n",dbg[offset+0],dbg[offset+1],dbg[offset+2],dbg[offset+3],dbg[offset+4],dbg[offset+5]);
//// }
//}
//#endif
/*
* compute normal assignments as well as the costfunction value under that
* assignment. Normal assignments are computed according based on nearest
* distance in the arclength sense.
*/
__global__ void robustSquaredAngleCostFctAssignment(float *cost, uint32_t* N,
float *x, unsigned short *z, float* errs, float *mu, float sigma_sq,
int w, int h)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
__shared__ uint32_t Ni[BLOCK_SIZE];
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
Ni[tid] = 0;
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
float err_min = 9999999.0f;
unsigned short k_min = 6;
if((xi[0]!=xi[0] || xi[1]!=xi[1] || xi[2]!=xi[2])
|| xi[0]*xi[0]+xi[1]*xi[1]+xi[2]*xi[2] < 0.9f )
{
// if nan
k_min = 6;
err_min = .1f;
//if(X_STEP == 8) x[id*X_STEP+4] = 6.0f;
}else{
#pragma unroll
for (unsigned short k=0; k<6; ++k)
{
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6] + xi[2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
if(err_min > err)
{
err_min = err;
k_min = k;
}
}
rho[tid] = (err_min*err_min)/(err_min*err_min+sigma_sq);
Ni[tid] = 1;
}
z[id] = k_min;
errs[id] = err_min;
if(X_STEP == 8)
{
x[id*X_STEP+X_OFFSET+4] = c_rgbForMFaxes[k_min];//float(k_min);
x[id*X_STEP+X_OFFSET+5] = float(k_min);//xi[0]; //float(k_min);
x[id*X_STEP+X_OFFSET+6] = err_min; //rgb;//xi[1]; //err_min;
// x[id*X_STEP+X_OFFSET+7] = 0.0f;//err_min; //err_min;
}
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
{
rho[tid] += rho[tid + s];
Ni[tid] += Ni[tid + s];
}
__syncthreads();
}
if(tid==0 && rho[0]!=0.0f) {
atomicAdd(&cost[0],rho[0]);
}
if(tid==1 && Ni[0]!=0 ) {
atomicAdd(N,Ni[0]);
}
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// Ni[tid] += Ni[tid + s];
// __syncthreads();
// }
//
// if(tid==0 && Ni[0]!=0 ) {
// atomicAdd(N,Ni[0]);
// }
}
//#ifndef WEIGHTED
//extern "C" void robustSquaredAngleCostFctAssignmentGPU(float *h_cost, float *d_cost,
// int *h_N, int *d_N, float *d_x, uint16_t *d_z, float *d_mu,
// float sigma_sq, int w, int h)
//{
//
// for(uint32_t k=0; k<6; ++k)
// h_cost[k] =0.0f;
// *h_N =0;
// checkCudaErrors(cudaMemcpy(d_cost, h_cost, 6* sizeof(float),
// cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(d_N, h_N, sizeof(int),
// cudaMemcpyHostToDevice));
//
// dim3 threads(16,16,1);
// dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
// robustSquaredAngleCostFctAssignment<<<blocks,threads>>>(d_cost,d_N,d_x,d_z,d_mu,
// sigma_sq,w,h);
// checkCudaErrors(cudaDeviceSynchronize());
//
// checkCudaErrors(cudaMemcpy(h_cost, d_cost, 6*sizeof(float),
// cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(h_N, d_N, sizeof(int),
// cudaMemcpyDeviceToHost));
//
//}
//#endif
#include "optimizationSO3_weighted.cu"
|
01e42d1ddccc2d5c207dd30bd2f8bb447b0fb31e.hip | // !!! This is a file automatically generated by hipify!!!
#include <cuda_runtimecu.h>
#include <sentinel.h>
#include <stdlibcu.h>
#include <stdiocu.h>
hipError_t crtdefs_test1();
hipError_t ctype_test1();
hipError_t dirent_test1();
hipError_t errno_test1();
hipError_t fcntl_test1();
hipError_t fsystem_test1();
hipError_t grp_test1();
hipError_t host_test1();
hipError_t pwd_test1();
hipError_t regex_test1();
hipError_t sentinel_test1();
hipError_t setjmp_test1();
hipError_t stddef_test1();
hipError_t stdio_test1();
hipError_t stdio_64bit();
hipError_t stdio_ganging();
hipError_t stdio_scanf();
hipError_t stdlib_test1();
hipError_t stdlib_strtol();
hipError_t stdlib_strtoq();
hipError_t string_test1();
hipError_t time_test1();
hipError_t unistd_test1();
#if _HASPAUSE
#define mainPause(fmt) { printf(fmt"\n"); char c; scanf("%c", &c); }
#else
#define mainPause(fmt) { printf(fmt"\n"); }
#endif
int main(int argc, char **argv) {
int testId = argv[1] ? atoi(argv[1]) : 11; // 23;
// Choose which GPU to run on, change this on a multi-GPU system.
hipError_t cudaStatus = hipSetDevice(gpuGetMaxGflopsDevice());
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
cudaErrorCheck(hipDeviceSetLimit(hipLimitStackSize, 1024 * 5));
sentinelServerInitialize();
sentinelRegisterFileUtils();
// Launch test
switch (testId) {
case 0: mainPause("Press any key to continue."); break;
case 1: cudaStatus = crtdefs_test1(); break;
case 2: cudaStatus = ctype_test1(); break;
case 3: cudaStatus = dirent_test1(); break;
case 4: cudaStatus = errno_test1(); break;
case 5: cudaStatus = fcntl_test1(); break;
case 6: cudaStatus = fsystem_test1(); break;
case 7: cudaStatus = grp_test1(); break;
case 8: cudaStatus = host_test1(); break;
case 9: cudaStatus = pwd_test1(); break;
case 10: cudaStatus = regex_test1(); break;
case 11: cudaStatus = sentinel_test1(); break;
case 12: cudaStatus = setjmp_test1(); break;
case 13: cudaStatus = stddef_test1(); break;
case 14: cudaStatus = stdio_test1(); break; // assert
case 15: cudaStatus = stdio_64bit(); break;
case 16: cudaStatus = stdio_ganging(); break;
case 17: cudaStatus = stdio_scanf(); break;
case 18: cudaStatus = stdlib_test1(); break;
case 19: cudaStatus = stdlib_strtol(); break;
case 20: cudaStatus = stdlib_strtoq(); break;
case 21: cudaStatus = string_test1(); break;
case 22: cudaStatus = time_test1(); break;
case 23: cudaStatus = unistd_test1(); break; //failed --missing device, throws on fast run
// default
default: cudaStatus = crtdefs_test1(); break;
}
if (cudaStatus != hipSuccess) {
fprintf(stderr, "failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// finish
mainPause("SUCCESS");
Error:
sentinelServerShutdown();
// close
if (cudaStatus != hipSuccess) {
// finish
mainPause("ERROR");
return 1;
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!\n");
return 1;
}
return 0;
}
| 01e42d1ddccc2d5c207dd30bd2f8bb447b0fb31e.cu | #include <cuda_runtimecu.h>
#include <sentinel.h>
#include <stdlibcu.h>
#include <stdiocu.h>
cudaError_t crtdefs_test1();
cudaError_t ctype_test1();
cudaError_t dirent_test1();
cudaError_t errno_test1();
cudaError_t fcntl_test1();
cudaError_t fsystem_test1();
cudaError_t grp_test1();
cudaError_t host_test1();
cudaError_t pwd_test1();
cudaError_t regex_test1();
cudaError_t sentinel_test1();
cudaError_t setjmp_test1();
cudaError_t stddef_test1();
cudaError_t stdio_test1();
cudaError_t stdio_64bit();
cudaError_t stdio_ganging();
cudaError_t stdio_scanf();
cudaError_t stdlib_test1();
cudaError_t stdlib_strtol();
cudaError_t stdlib_strtoq();
cudaError_t string_test1();
cudaError_t time_test1();
cudaError_t unistd_test1();
#if _HASPAUSE
#define mainPause(fmt) { printf(fmt"\n"); char c; scanf("%c", &c); }
#else
#define mainPause(fmt) { printf(fmt"\n"); }
#endif
int main(int argc, char **argv) {
int testId = argv[1] ? atoi(argv[1]) : 11; // 23;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaError_t cudaStatus = cudaSetDevice(gpuGetMaxGflopsDevice());
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
cudaErrorCheck(cudaDeviceSetLimit(cudaLimitStackSize, 1024 * 5));
sentinelServerInitialize();
sentinelRegisterFileUtils();
// Launch test
switch (testId) {
case 0: mainPause("Press any key to continue."); break;
case 1: cudaStatus = crtdefs_test1(); break;
case 2: cudaStatus = ctype_test1(); break;
case 3: cudaStatus = dirent_test1(); break;
case 4: cudaStatus = errno_test1(); break;
case 5: cudaStatus = fcntl_test1(); break;
case 6: cudaStatus = fsystem_test1(); break;
case 7: cudaStatus = grp_test1(); break;
case 8: cudaStatus = host_test1(); break;
case 9: cudaStatus = pwd_test1(); break;
case 10: cudaStatus = regex_test1(); break;
case 11: cudaStatus = sentinel_test1(); break;
case 12: cudaStatus = setjmp_test1(); break;
case 13: cudaStatus = stddef_test1(); break;
case 14: cudaStatus = stdio_test1(); break; // assert
case 15: cudaStatus = stdio_64bit(); break;
case 16: cudaStatus = stdio_ganging(); break;
case 17: cudaStatus = stdio_scanf(); break;
case 18: cudaStatus = stdlib_test1(); break;
case 19: cudaStatus = stdlib_strtol(); break;
case 20: cudaStatus = stdlib_strtoq(); break;
case 21: cudaStatus = string_test1(); break;
case 22: cudaStatus = time_test1(); break;
case 23: cudaStatus = unistd_test1(); break; //failed --missing device, throws on fast run
// default
default: cudaStatus = crtdefs_test1(); break;
}
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// finish
mainPause("SUCCESS");
Error:
sentinelServerShutdown();
// close
if (cudaStatus != cudaSuccess) {
// finish
mainPause("ERROR");
return 1;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!\n");
return 1;
}
return 0;
}
|
e72f7e3e32101fea05e13689a9d926f86e6b770a.hip | // !!! This is a file automatically generated by hipify!!!
//Example: Application using C++ and the CUSPARSE library
//-------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "hipsparse.h"
#define CLEANUP(s) \
do { \
printf ("%s\n", s); \
if (yHostPtr) free(yHostPtr); \
if (zHostPtr) free(zHostPtr); \
if (xIndHostPtr) free(xIndHostPtr); \
if (xValHostPtr) free(xValHostPtr); \
if (cooRowIndexHostPtr) free(cooRowIndexHostPtr);\
if (cooColIndexHostPtr) free(cooColIndexHostPtr);\
if (cooValHostPtr) free(cooValHostPtr); \
if (y) hipFree(y); \
if (z) hipFree(z); \
if (xInd) hipFree(xInd); \
if (xVal) hipFree(xVal); \
if (csrRowPtr) hipFree(csrRowPtr); \
if (cooRowIndex) hipFree(cooRowIndex); \
if (cooColIndex) hipFree(cooColIndex); \
if (cooVal) hipFree(cooVal); \
if (descr) hipsparseDestroyMatDescr(descr);\
if (handle) hipsparseDestroy(handle); \
hipDeviceReset(); \
fflush (stdout); \
} while (0)
int main(){
hipError_t cudaStat1, cudaStat2, cudaStat3, cudaStat4, cudaStat5, cudaStat6;
hipsparseStatus_t status;
hipsparseHandle_t handle=0;
hipsparseMatDescr_t descr=0;
int * cooRowIndexHostPtr=0;
int * cooColIndexHostPtr=0;
double * cooValHostPtr=0;
int * cooRowIndex=0;
int * cooColIndex=0;
double * cooVal=0;
int * xIndHostPtr=0;
double * xValHostPtr=0;
double * yHostPtr=0;
int * xInd=0;
double * xVal=0;
double * y=0;
int * csrRowPtr=0;
double * zHostPtr=0;
double * z=0;
int n, nnz, nnz_vector;
double dzero =0.0;
double dtwo =2.0;
double dthree=3.0;
double dfive =5.0;
printf("testing example\n");
/* create the following sparse test matrix in COO format */
/* |1.0 2.0 3.0|
| 4.0 |
|5.0 6.0 7.0|
| 8.0 9.0| */
n=4; nnz=9;
cooRowIndexHostPtr = (int *) malloc(nnz*sizeof(cooRowIndexHostPtr[0]));
cooColIndexHostPtr = (int *) malloc(nnz*sizeof(cooColIndexHostPtr[0]));
cooValHostPtr = (double *)malloc(nnz*sizeof(cooValHostPtr[0]));
if ((!cooRowIndexHostPtr) || (!cooColIndexHostPtr) || (!cooValHostPtr)){
CLEANUP("Host malloc failed (matrix)");
return 1;
}
cooRowIndexHostPtr[0]=0; cooColIndexHostPtr[0]=0; cooValHostPtr[0]=1.0;
cooRowIndexHostPtr[1]=0; cooColIndexHostPtr[1]=2; cooValHostPtr[1]=2.0;
cooRowIndexHostPtr[2]=0; cooColIndexHostPtr[2]=3; cooValHostPtr[2]=3.0;
cooRowIndexHostPtr[3]=1; cooColIndexHostPtr[3]=1; cooValHostPtr[3]=4.0;
cooRowIndexHostPtr[4]=2; cooColIndexHostPtr[4]=0; cooValHostPtr[4]=5.0;
cooRowIndexHostPtr[5]=2; cooColIndexHostPtr[5]=2; cooValHostPtr[5]=6.0;
cooRowIndexHostPtr[6]=2; cooColIndexHostPtr[6]=3; cooValHostPtr[6]=7.0;
cooRowIndexHostPtr[7]=3; cooColIndexHostPtr[7]=1; cooValHostPtr[7]=8.0;
cooRowIndexHostPtr[8]=3; cooColIndexHostPtr[8]=3; cooValHostPtr[8]=9.0;
/* //print the matrix printf("Input data:\n");
for (int i=0; i<nnz; i++){
printf("cooRowIndexHostPtr[%d]=%d ",i,cooRowIndexHostPtr[i]);
printf("cooColIndexHostPtr[%d]=%d ",i,cooColIndexHostPtr[i]);
printf("cooValHostPtr[%d]=%f \n",i,cooValHostPtr[i]); }
*/
/* create a sparse and dense vector */
/* xVal= [100.0 200.0 400.0] (sparse)
xInd= [0 1 3 ]
y = [10.0 20.0 30.0 40.0 | 50.0 60.0 70.0 80.0] (dense)
*/
nnz_vector = 3;
xIndHostPtr = (int *) malloc(nnz_vector*sizeof(xIndHostPtr[0]));
xValHostPtr = (double *)malloc(nnz_vector*sizeof(xValHostPtr[0]));
yHostPtr = (double *)malloc(2*n *sizeof(yHostPtr[0]));
zHostPtr = (double *)malloc(2*(n+1) *sizeof(zHostPtr[0]));
if((!xIndHostPtr) || (!xValHostPtr) || (!yHostPtr) || (!zHostPtr)){
CLEANUP("Host malloc failed (vectors)");
return 1;
}
yHostPtr[0] = 10.0; xIndHostPtr[0]=0; xValHostPtr[0]=100.0;
yHostPtr[1] = 20.0; xIndHostPtr[1]=1; xValHostPtr[1]=200.0;
yHostPtr[2] = 30.0;
yHostPtr[3] = 40.0; xIndHostPtr[2]=3; xValHostPtr[2]=400.0;
yHostPtr[4] = 50.0;
yHostPtr[5] = 60.0;
yHostPtr[6] = 70.0;
yHostPtr[7] = 80.0;
/* //print the vectors
for (int j=0; j<2; j++){
for (int i=0; i<n; i++){
printf("yHostPtr[%d,%d]=%f\n",i,j,yHostPtr[i+n*j]);
}
}
for (int i=0; i<nnz_vector; i++){
printf("xIndHostPtr[%d]=%d ",i,xIndHostPtr[i]);
printf("xValHostPtr[%d]=%f\n",i,xValHostPtr[i]);
} */
/* allocate GPU memory and copy the matrix and vectors into it */
cudaStat1 = hipMalloc((void**)&cooRowIndex,nnz*sizeof(cooRowIndex[0]));
cudaStat2 = hipMalloc((void**)&cooColIndex,nnz*sizeof(cooColIndex[0]));
cudaStat3 = hipMalloc((void**)&cooVal, nnz*sizeof(cooVal[0]));
cudaStat4 = hipMalloc((void**)&y, 2*n*sizeof(y[0]));
cudaStat5 = hipMalloc((void**)&xInd,nnz_vector*sizeof(xInd[0]));
cudaStat6 = hipMalloc((void**)&xVal,nnz_vector*sizeof(xVal[0]));
if ((cudaStat1 != hipSuccess) ||
(cudaStat2 != hipSuccess) ||
(cudaStat3 != hipSuccess) ||
(cudaStat4 != hipSuccess) ||
(cudaStat5 != hipSuccess) ||
(cudaStat6 != hipSuccess)) {
CLEANUP("Device malloc failed");
return 1;
}
cudaStat1 = hipMemcpy(cooRowIndex, cooRowIndexHostPtr,
(size_t)(nnz*sizeof(cooRowIndex[0])),
hipMemcpyHostToDevice);
cudaStat2 = hipMemcpy(cooColIndex, cooColIndexHostPtr, (size_t)(nnz*sizeof(cooColIndex[0])), hipMemcpyHostToDevice);
cudaStat3 = hipMemcpy(cooVal, cooValHostPtr, (size_t)(nnz*sizeof(cooVal[0])), hipMemcpyHostToDevice);
cudaStat4 = hipMemcpy(y, yHostPtr, (size_t)(2*n*sizeof(y[0])), hipMemcpyHostToDevice);
cudaStat5 = hipMemcpy(xInd, xIndHostPtr, (size_t)(nnz_vector*sizeof(xInd[0])), hipMemcpyHostToDevice);
cudaStat6 = hipMemcpy(xVal, xValHostPtr, (size_t)(nnz_vector*sizeof(xVal[0])), hipMemcpyHostToDevice);
if ((cudaStat1 != hipSuccess) || (cudaStat2 != hipSuccess) || (cudaStat3 != hipSuccess) || (cudaStat4 != hipSuccess) || (cudaStat5 != hipSuccess) || (cudaStat6 != hipSuccess)) {
CLEANUP("Memcpy from Host to Device failed");
return 1;
}
/* initialize cusparse library */
status= hipsparseCreate(&handle);
if (status != HIPSPARSE_STATUS_SUCCESS) {
CLEANUP("CUSPARSE Library initialization failed");
return 1;
}
/* create and setup matrix descriptor */
status= hipsparseCreateMatDescr(&descr);
if (status != HIPSPARSE_STATUS_SUCCESS) {
CLEANUP("Matrix descriptor initialization failed");
return 1;
}
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
/* exercise conversion routines (convert matrix from COO 2 CSR format) */
cudaStat1 = hipMalloc((void**)&csrRowPtr,(n+1)*sizeof(csrRowPtr[0]));
if (cudaStat1 != hipSuccess) {
CLEANUP("Device malloc failed (csrRowPtr)");
return 1;
}
status= hipsparseXcoo2csr(handle,cooRowIndex,nnz,n, csrRowPtr,HIPSPARSE_INDEX_BASE_ZERO);
if (status != HIPSPARSE_STATUS_SUCCESS) {
CLEANUP("Conversion from COO to CSR format failed");
return 1;
}
//csrRowPtr = [0 3 4 7 9]
// The following test only works for compute capability 1.3 and above
// because it needs double precision.
int devId;
hipDeviceProp_t prop;
hipError_t cudaStat;
cudaStat = hipGetDevice(&devId);
if (hipSuccess != cudaStat){
CLEANUP("hipGetDevice failed");
printf("Error: cudaStat %d, %s\n", cudaStat, hipGetErrorString(cudaStat));
return 1;
}
cudaStat = hipGetDeviceProperties( &prop, devId) ;
if (hipSuccess != cudaStat){
CLEANUP("hipGetDeviceProperties failed");
printf("Error: cudaStat %d, %s\n", cudaStat, hipGetErrorString(cudaStat));
return 1;
}
int cc = 100*prop.major + 10*prop.minor;
if (cc < 130){
CLEANUP("waive the test because only sm13 and above are supported\n");
printf("the device has compute capability %d\n", cc);
printf("example test WAIVED"); return 2;
}
/* exercise Level 1 routines (scatter vector elements) */
status= cusparseDsctr(handle, nnz_vector, xVal, xInd, &y[n], HIPSPARSE_INDEX_BASE_ZERO);
if (status != HIPSPARSE_STATUS_SUCCESS) {
CLEANUP("Scatter from sparse to dense vector failed");
return 1;
}
//y = [10 20 30 40 | 100 200 70 400]
/* exercise Level 2 routines (csrmv) */
status= hipsparseDcsrmv(handle,HIPSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnz,
&dtwo, descr, cooVal, csrRowPtr, cooColIndex,
&y[0], &dthree, &y[n]);
if (status != HIPSPARSE_STATUS_SUCCESS) {
CLEANUP("Matrix-vector multiplication failed");
return 1;
}
//y = [10 20 30 40 | 680 760 1230 2240]
hipMemcpy(yHostPtr, y, (size_t)(2*n*sizeof(y[0])), hipMemcpyDeviceToHost);
/* printf("Intermediate results:\n");
for (int j=0; j<2; j++){
for (int i=0; i<n; i++){
printf("yHostPtr[%d,%d]=%f\n",i,j,yHostPtr[i+n*j]);
}
} */
/* exercise Level 3 routines (csrmm) */
cudaStat1 = hipMalloc((void**)&z, 2*(n+1)*sizeof(z[0]));
if (cudaStat1 != hipSuccess) {
CLEANUP("Device malloc failed (z)"); return 1;
}
cudaStat1 = hipMemset((void *)z,0, 2*(n+1)*sizeof(z[0]));
if (cudaStat1 != hipSuccess) {
CLEANUP("Memset on Device failed");
return 1;
}
status= hipsparseDcsrmm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, 2, n,
nnz, &dfive, descr, cooVal, csrRowPtr, cooColIndex,
y, n, &dzero, z, n+1);
if (status != HIPSPARSE_STATUS_SUCCESS) {
CLEANUP("Matrix-matrix multiplication failed");
return 1;
}
/* print final results (z) */
cudaStat1 = hipMemcpy(zHostPtr, z, (size_t)(2*(n+1)*sizeof(z[0])), hipMemcpyDeviceToHost);
if (cudaStat1 != hipSuccess) {
CLEANUP("Memcpy from Device to Host failed");
return 1;
}
//z = [950 400 2550 2600 0 | 49300 15200 132300 131200 0]
/* printf("Final results:\n");
for (int j=0; j<2; j++){
for (int i=0; i<n+1; i++){
printf("z[%d,%d]=%f\n",i,j,zHostPtr[i+(n+1)*j]);
}
} */
/* destroy matrix descriptor */
status = hipsparseDestroyMatDescr(descr);
descr = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
CLEANUP("Matrix descriptor destruction failed");
return 1;
}
/* destroy handle */
status = hipsparseDestroy(handle);
handle = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
CLEANUP("CUSPARSE Library release of resources failed");
return 1;
}
/* check the results */
/* Notice that CLEANUP() contains a call to hipsparseDestroy(handle) */
if ((zHostPtr[0] != 950.0) ||
(zHostPtr[1] != 400.0) ||
(zHostPtr[2] != 2550.0) ||
(zHostPtr[3] != 2600.0) ||
(zHostPtr[4] != 0.0) ||
(zHostPtr[5] != 49300.0) ||
(zHostPtr[6] != 15200.0) ||
(zHostPtr[7] != 132300.0) ||
(zHostPtr[8] != 131200.0) ||
(zHostPtr[9] != 0.0) ||
(yHostPtr[0] != 10.0) ||
(yHostPtr[1] != 20.0) ||
(yHostPtr[2] != 30.0) ||
(yHostPtr[3] != 40.0) ||
(yHostPtr[4] != 680.0) ||
(yHostPtr[5] != 760.0) ||
(yHostPtr[6] != 1230.0) ||
(yHostPtr[7] != 2240.0)){
CLEANUP("example test FAILED"); return 1;
}
else{ CLEANUP("example test PASSED"); return 0; }
}
| e72f7e3e32101fea05e13689a9d926f86e6b770a.cu | //Example: Application using C++ and the CUSPARSE library
//-------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "cusparse.h"
#define CLEANUP(s) \
do { \
printf ("%s\n", s); \
if (yHostPtr) free(yHostPtr); \
if (zHostPtr) free(zHostPtr); \
if (xIndHostPtr) free(xIndHostPtr); \
if (xValHostPtr) free(xValHostPtr); \
if (cooRowIndexHostPtr) free(cooRowIndexHostPtr);\
if (cooColIndexHostPtr) free(cooColIndexHostPtr);\
if (cooValHostPtr) free(cooValHostPtr); \
if (y) cudaFree(y); \
if (z) cudaFree(z); \
if (xInd) cudaFree(xInd); \
if (xVal) cudaFree(xVal); \
if (csrRowPtr) cudaFree(csrRowPtr); \
if (cooRowIndex) cudaFree(cooRowIndex); \
if (cooColIndex) cudaFree(cooColIndex); \
if (cooVal) cudaFree(cooVal); \
if (descr) cusparseDestroyMatDescr(descr);\
if (handle) cusparseDestroy(handle); \
cudaDeviceReset(); \
fflush (stdout); \
} while (0)
int main(){
cudaError_t cudaStat1, cudaStat2, cudaStat3, cudaStat4, cudaStat5, cudaStat6;
cusparseStatus_t status;
cusparseHandle_t handle=0;
cusparseMatDescr_t descr=0;
int * cooRowIndexHostPtr=0;
int * cooColIndexHostPtr=0;
double * cooValHostPtr=0;
int * cooRowIndex=0;
int * cooColIndex=0;
double * cooVal=0;
int * xIndHostPtr=0;
double * xValHostPtr=0;
double * yHostPtr=0;
int * xInd=0;
double * xVal=0;
double * y=0;
int * csrRowPtr=0;
double * zHostPtr=0;
double * z=0;
int n, nnz, nnz_vector;
double dzero =0.0;
double dtwo =2.0;
double dthree=3.0;
double dfive =5.0;
printf("testing example\n");
/* create the following sparse test matrix in COO format */
/* |1.0 2.0 3.0|
| 4.0 |
|5.0 6.0 7.0|
| 8.0 9.0| */
n=4; nnz=9;
cooRowIndexHostPtr = (int *) malloc(nnz*sizeof(cooRowIndexHostPtr[0]));
cooColIndexHostPtr = (int *) malloc(nnz*sizeof(cooColIndexHostPtr[0]));
cooValHostPtr = (double *)malloc(nnz*sizeof(cooValHostPtr[0]));
if ((!cooRowIndexHostPtr) || (!cooColIndexHostPtr) || (!cooValHostPtr)){
CLEANUP("Host malloc failed (matrix)");
return 1;
}
cooRowIndexHostPtr[0]=0; cooColIndexHostPtr[0]=0; cooValHostPtr[0]=1.0;
cooRowIndexHostPtr[1]=0; cooColIndexHostPtr[1]=2; cooValHostPtr[1]=2.0;
cooRowIndexHostPtr[2]=0; cooColIndexHostPtr[2]=3; cooValHostPtr[2]=3.0;
cooRowIndexHostPtr[3]=1; cooColIndexHostPtr[3]=1; cooValHostPtr[3]=4.0;
cooRowIndexHostPtr[4]=2; cooColIndexHostPtr[4]=0; cooValHostPtr[4]=5.0;
cooRowIndexHostPtr[5]=2; cooColIndexHostPtr[5]=2; cooValHostPtr[5]=6.0;
cooRowIndexHostPtr[6]=2; cooColIndexHostPtr[6]=3; cooValHostPtr[6]=7.0;
cooRowIndexHostPtr[7]=3; cooColIndexHostPtr[7]=1; cooValHostPtr[7]=8.0;
cooRowIndexHostPtr[8]=3; cooColIndexHostPtr[8]=3; cooValHostPtr[8]=9.0;
/* //print the matrix printf("Input data:\n");
for (int i=0; i<nnz; i++){
printf("cooRowIndexHostPtr[%d]=%d ",i,cooRowIndexHostPtr[i]);
printf("cooColIndexHostPtr[%d]=%d ",i,cooColIndexHostPtr[i]);
printf("cooValHostPtr[%d]=%f \n",i,cooValHostPtr[i]); }
*/
/* create a sparse and dense vector */
/* xVal= [100.0 200.0 400.0] (sparse)
xInd= [0 1 3 ]
y = [10.0 20.0 30.0 40.0 | 50.0 60.0 70.0 80.0] (dense)
*/
nnz_vector = 3;
xIndHostPtr = (int *) malloc(nnz_vector*sizeof(xIndHostPtr[0]));
xValHostPtr = (double *)malloc(nnz_vector*sizeof(xValHostPtr[0]));
yHostPtr = (double *)malloc(2*n *sizeof(yHostPtr[0]));
zHostPtr = (double *)malloc(2*(n+1) *sizeof(zHostPtr[0]));
if((!xIndHostPtr) || (!xValHostPtr) || (!yHostPtr) || (!zHostPtr)){
CLEANUP("Host malloc failed (vectors)");
return 1;
}
yHostPtr[0] = 10.0; xIndHostPtr[0]=0; xValHostPtr[0]=100.0;
yHostPtr[1] = 20.0; xIndHostPtr[1]=1; xValHostPtr[1]=200.0;
yHostPtr[2] = 30.0;
yHostPtr[3] = 40.0; xIndHostPtr[2]=3; xValHostPtr[2]=400.0;
yHostPtr[4] = 50.0;
yHostPtr[5] = 60.0;
yHostPtr[6] = 70.0;
yHostPtr[7] = 80.0;
/* //print the vectors
for (int j=0; j<2; j++){
for (int i=0; i<n; i++){
printf("yHostPtr[%d,%d]=%f\n",i,j,yHostPtr[i+n*j]);
}
}
for (int i=0; i<nnz_vector; i++){
printf("xIndHostPtr[%d]=%d ",i,xIndHostPtr[i]);
printf("xValHostPtr[%d]=%f\n",i,xValHostPtr[i]);
} */
/* allocate GPU memory and copy the matrix and vectors into it */
cudaStat1 = cudaMalloc((void**)&cooRowIndex,nnz*sizeof(cooRowIndex[0]));
cudaStat2 = cudaMalloc((void**)&cooColIndex,nnz*sizeof(cooColIndex[0]));
cudaStat3 = cudaMalloc((void**)&cooVal, nnz*sizeof(cooVal[0]));
cudaStat4 = cudaMalloc((void**)&y, 2*n*sizeof(y[0]));
cudaStat5 = cudaMalloc((void**)&xInd,nnz_vector*sizeof(xInd[0]));
cudaStat6 = cudaMalloc((void**)&xVal,nnz_vector*sizeof(xVal[0]));
if ((cudaStat1 != cudaSuccess) ||
(cudaStat2 != cudaSuccess) ||
(cudaStat3 != cudaSuccess) ||
(cudaStat4 != cudaSuccess) ||
(cudaStat5 != cudaSuccess) ||
(cudaStat6 != cudaSuccess)) {
CLEANUP("Device malloc failed");
return 1;
}
cudaStat1 = cudaMemcpy(cooRowIndex, cooRowIndexHostPtr,
(size_t)(nnz*sizeof(cooRowIndex[0])),
cudaMemcpyHostToDevice);
cudaStat2 = cudaMemcpy(cooColIndex, cooColIndexHostPtr, (size_t)(nnz*sizeof(cooColIndex[0])), cudaMemcpyHostToDevice);
cudaStat3 = cudaMemcpy(cooVal, cooValHostPtr, (size_t)(nnz*sizeof(cooVal[0])), cudaMemcpyHostToDevice);
cudaStat4 = cudaMemcpy(y, yHostPtr, (size_t)(2*n*sizeof(y[0])), cudaMemcpyHostToDevice);
cudaStat5 = cudaMemcpy(xInd, xIndHostPtr, (size_t)(nnz_vector*sizeof(xInd[0])), cudaMemcpyHostToDevice);
cudaStat6 = cudaMemcpy(xVal, xValHostPtr, (size_t)(nnz_vector*sizeof(xVal[0])), cudaMemcpyHostToDevice);
if ((cudaStat1 != cudaSuccess) || (cudaStat2 != cudaSuccess) || (cudaStat3 != cudaSuccess) || (cudaStat4 != cudaSuccess) || (cudaStat5 != cudaSuccess) || (cudaStat6 != cudaSuccess)) {
CLEANUP("Memcpy from Host to Device failed");
return 1;
}
/* initialize cusparse library */
status= cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS) {
CLEANUP("CUSPARSE Library initialization failed");
return 1;
}
/* create and setup matrix descriptor */
status= cusparseCreateMatDescr(&descr);
if (status != CUSPARSE_STATUS_SUCCESS) {
CLEANUP("Matrix descriptor initialization failed");
return 1;
}
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
/* exercise conversion routines (convert matrix from COO 2 CSR format) */
cudaStat1 = cudaMalloc((void**)&csrRowPtr,(n+1)*sizeof(csrRowPtr[0]));
if (cudaStat1 != cudaSuccess) {
CLEANUP("Device malloc failed (csrRowPtr)");
return 1;
}
status= cusparseXcoo2csr(handle,cooRowIndex,nnz,n, csrRowPtr,CUSPARSE_INDEX_BASE_ZERO);
if (status != CUSPARSE_STATUS_SUCCESS) {
CLEANUP("Conversion from COO to CSR format failed");
return 1;
}
//csrRowPtr = [0 3 4 7 9]
// The following test only works for compute capability 1.3 and above
// because it needs double precision.
int devId;
cudaDeviceProp prop;
cudaError_t cudaStat;
cudaStat = cudaGetDevice(&devId);
if (cudaSuccess != cudaStat){
CLEANUP("cudaGetDevice failed");
printf("Error: cudaStat %d, %s\n", cudaStat, cudaGetErrorString(cudaStat));
return 1;
}
cudaStat = cudaGetDeviceProperties( &prop, devId) ;
if (cudaSuccess != cudaStat){
CLEANUP("cudaGetDeviceProperties failed");
printf("Error: cudaStat %d, %s\n", cudaStat, cudaGetErrorString(cudaStat));
return 1;
}
int cc = 100*prop.major + 10*prop.minor;
if (cc < 130){
CLEANUP("waive the test because only sm13 and above are supported\n");
printf("the device has compute capability %d\n", cc);
printf("example test WAIVED"); return 2;
}
/* exercise Level 1 routines (scatter vector elements) */
status= cusparseDsctr(handle, nnz_vector, xVal, xInd, &y[n], CUSPARSE_INDEX_BASE_ZERO);
if (status != CUSPARSE_STATUS_SUCCESS) {
CLEANUP("Scatter from sparse to dense vector failed");
return 1;
}
//y = [10 20 30 40 | 100 200 70 400]
/* exercise Level 2 routines (csrmv) */
status= cusparseDcsrmv(handle,CUSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnz,
&dtwo, descr, cooVal, csrRowPtr, cooColIndex,
&y[0], &dthree, &y[n]);
if (status != CUSPARSE_STATUS_SUCCESS) {
CLEANUP("Matrix-vector multiplication failed");
return 1;
}
//y = [10 20 30 40 | 680 760 1230 2240]
cudaMemcpy(yHostPtr, y, (size_t)(2*n*sizeof(y[0])), cudaMemcpyDeviceToHost);
/* printf("Intermediate results:\n");
for (int j=0; j<2; j++){
for (int i=0; i<n; i++){
printf("yHostPtr[%d,%d]=%f\n",i,j,yHostPtr[i+n*j]);
}
} */
/* exercise Level 3 routines (csrmm) */
cudaStat1 = cudaMalloc((void**)&z, 2*(n+1)*sizeof(z[0]));
if (cudaStat1 != cudaSuccess) {
CLEANUP("Device malloc failed (z)"); return 1;
}
cudaStat1 = cudaMemset((void *)z,0, 2*(n+1)*sizeof(z[0]));
if (cudaStat1 != cudaSuccess) {
CLEANUP("Memset on Device failed");
return 1;
}
status= cusparseDcsrmm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, 2, n,
nnz, &dfive, descr, cooVal, csrRowPtr, cooColIndex,
y, n, &dzero, z, n+1);
if (status != CUSPARSE_STATUS_SUCCESS) {
CLEANUP("Matrix-matrix multiplication failed");
return 1;
}
/* print final results (z) */
cudaStat1 = cudaMemcpy(zHostPtr, z, (size_t)(2*(n+1)*sizeof(z[0])), cudaMemcpyDeviceToHost);
if (cudaStat1 != cudaSuccess) {
CLEANUP("Memcpy from Device to Host failed");
return 1;
}
//z = [950 400 2550 2600 0 | 49300 15200 132300 131200 0]
/* printf("Final results:\n");
for (int j=0; j<2; j++){
for (int i=0; i<n+1; i++){
printf("z[%d,%d]=%f\n",i,j,zHostPtr[i+(n+1)*j]);
}
} */
/* destroy matrix descriptor */
status = cusparseDestroyMatDescr(descr);
descr = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
CLEANUP("Matrix descriptor destruction failed");
return 1;
}
/* destroy handle */
status = cusparseDestroy(handle);
handle = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
CLEANUP("CUSPARSE Library release of resources failed");
return 1;
}
/* check the results */
/* Notice that CLEANUP() contains a call to cusparseDestroy(handle) */
if ((zHostPtr[0] != 950.0) ||
(zHostPtr[1] != 400.0) ||
(zHostPtr[2] != 2550.0) ||
(zHostPtr[3] != 2600.0) ||
(zHostPtr[4] != 0.0) ||
(zHostPtr[5] != 49300.0) ||
(zHostPtr[6] != 15200.0) ||
(zHostPtr[7] != 132300.0) ||
(zHostPtr[8] != 131200.0) ||
(zHostPtr[9] != 0.0) ||
(yHostPtr[0] != 10.0) ||
(yHostPtr[1] != 20.0) ||
(yHostPtr[2] != 30.0) ||
(yHostPtr[3] != 40.0) ||
(yHostPtr[4] != 680.0) ||
(yHostPtr[5] != 760.0) ||
(yHostPtr[6] != 1230.0) ||
(yHostPtr[7] != 2240.0)){
CLEANUP("example test FAILED"); return 1;
}
else{ CLEANUP("example test PASSED"); return 0; }
}
|
7345b6ce7bb9bc8dab7357ab402ae57399fa03e0.hip | // !!! This is a file automatically generated by hipify!!!
/**************************************************************************
*
* set up GPU for processing
*
**************************************************************************/
#include "gpu_main.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
// #include <hip/hip_runtime.h>
#define BackgroundRed 0.0f
#define BackgroundGreen 0.0f
#define BackgroundBlue 0.0f
#define AttractorRed 0.709f
#define AttractorGreen 0.500f
#define AttractorBlue 0.0
#define zInitialSize 3
#define zScale 1.1f
#define FadeSpeed 0.01f
#define HeatTransferSpeed 0.05f
texture<float, 2> texRed;
texture<float, 2> texGreen;
texture<float, 2> texBlue;
/******************************************************************************/
GPU_Palette initGPUPalette(unsigned int imageWidth, unsigned int imageHeight)
{
GPU_Palette X;
X.gThreads.x = 32; // 32 x 32 = 1024 threads per block
X.gThreads.y = 32;
X.gThreads.z = 1;
X.gBlocks.x = ceil(imageWidth / 32); // however many blocks ng++ -w -c interface.cpp $(F1) $(F2) $(F3) $(F4)eeded for image
X.gBlocks.y = ceil(imageHeight / 32);
X.gBlocks.z = 1;
X.palette_width = imageWidth; // save this info
X.palette_height = imageHeight;
X.num_pixels = imageWidth * imageHeight;
// allocate memory on GPU corresponding to pixel colors:
hipError_t err;
err = hipMalloc((void**)&X.red, X.num_pixels * sizeof(float));
if (err != hipSuccess) {
printf("cuda error allocating red = %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMalloc((void**)&X.green, X.num_pixels * sizeof(float)); // g
if (err != hipSuccess) {
printf("cuda error allocating green = %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMalloc((void**)&X.blue, X.num_pixels * sizeof(float)); // b
if (err != hipSuccess) {
printf("cuda error allocating blue = %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipBindTexture2D(NULL, texRed, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
hipBindTexture2D(NULL, texGreen, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
hipBindTexture2D(NULL, texBlue, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
return X;
}
/******************************************************************************/
void freeGPUPalette(GPU_Palette* P)
{
hipUnbindTexture(texRed);
hipUnbindTexture(texGreen);
hipUnbindTexture(texBlue);
hipFree(P->red);
hipFree(P->green);
hipFree(P->blue);
}
/******************************************************************************/
int updatePalette(GPU_Palette* P, APoint (&points)[5])
// int updatePalette(GPU_Palette* P, int xIdx, int yIdx)
{
for (int i = 0; i < 5; i++) {
hipLaunchKernelGGL(( updateReds), dim3(P->gBlocks), dim3(P->gThreads), 0, 0, P->red, points[i].xIdx, points[i].yIdx, points[i].z);
hipLaunchKernelGGL(( updateGreens), dim3(P->gBlocks), dim3(P->gThreads), 0, 0, P->green, points[i].xIdx, points[i].yIdx, points[i].z);
hipLaunchKernelGGL(( updateBlues), dim3(P->gBlocks), dim3(P->gThreads), 0, 0, P->blue, points[i].xIdx, points[i].yIdx, points[i].z);
}
return 0;
}
/******************************************************************************/
__global__ void updateReds(float* red, int xIdx, int yIdx, float zIdx)
{
// float size = 5 + (zIdx * 0.1);
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// red[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
red[vecIdx] = AttractorRed;
} else {
float heat_average = (top + bot + right + left + center) / (5 - HeatTransferSpeed);
// if (heat_average > BackgroundRed) {
// red[vecIdx] += 0.001;
// }
if (heat_average >= AttractorRed) {
red[vecIdx] = AttractorRed / 2;
} else {
red[vecIdx] = heat_average;
}
red[vecIdx] -= FadeSpeed * red[vecIdx];
if (red[vecIdx] < BackgroundRed)
red[vecIdx] = BackgroundRed;
if (red[vecIdx] > AttractorRed)
red[vecIdx] = AttractorRed;
}
}
/******************************************************************************/
__global__ void updateGreens(float* green, int xIdx, int yIdx, float zIdx)
{
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// green[vecIdx] = center + HeatTransfered * center);
// green[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
green[vecIdx] = AttractorGreen;
} else {
float heat_average = (top + bot + right + left + center) / (5 - HeatTransferSpeed);
if (heat_average >= AttractorGreen) {
green[vecIdx] = AttractorGreen / 2;
} else {
green[vecIdx] = heat_average;
}
green[vecIdx] -= FadeSpeed * green[vecIdx];
if (green[vecIdx] < BackgroundGreen)
green[vecIdx] = BackgroundGreen;
if (green[vecIdx] > AttractorGreen)
green[vecIdx] = AttractorGreen;
}
}
/******************************************************************************/
__global__ void updateBlues(float* blue, int xIdx, int yIdx, float zIdx)
{
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// blue[vecIdx] = center + FadeSpeed * (top + bot + right + left - 4 * center);
// blue[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
blue[vecIdx] = AttractorBlue;
} else {
blue[vecIdx] -= FadeSpeed * blue[vecIdx];
if (blue[vecIdx] < BackgroundBlue)
blue[vecIdx] = BackgroundBlue;
// if (blue[vecIdx] > AttractorBlue)
// blue[vecIdx] = AttractorBlue;
}
}
/******************************************************************************/
| 7345b6ce7bb9bc8dab7357ab402ae57399fa03e0.cu | /**************************************************************************
*
* set up GPU for processing
*
**************************************************************************/
#include "gpu_main.h"
#include <cuda.h>
#include <stdio.h>
// #include <cuda_runtime.h>
#define BackgroundRed 0.0f
#define BackgroundGreen 0.0f
#define BackgroundBlue 0.0f
#define AttractorRed 0.709f
#define AttractorGreen 0.500f
#define AttractorBlue 0.0
#define zInitialSize 3
#define zScale 1.1f
#define FadeSpeed 0.01f
#define HeatTransferSpeed 0.05f
texture<float, 2> texRed;
texture<float, 2> texGreen;
texture<float, 2> texBlue;
/******************************************************************************/
GPU_Palette initGPUPalette(unsigned int imageWidth, unsigned int imageHeight)
{
GPU_Palette X;
X.gThreads.x = 32; // 32 x 32 = 1024 threads per block
X.gThreads.y = 32;
X.gThreads.z = 1;
X.gBlocks.x = ceil(imageWidth / 32); // however many blocks ng++ -w -c interface.cpp $(F1) $(F2) $(F3) $(F4)eeded for image
X.gBlocks.y = ceil(imageHeight / 32);
X.gBlocks.z = 1;
X.palette_width = imageWidth; // save this info
X.palette_height = imageHeight;
X.num_pixels = imageWidth * imageHeight;
// allocate memory on GPU corresponding to pixel colors:
cudaError_t err;
err = cudaMalloc((void**)&X.red, X.num_pixels * sizeof(float));
if (err != cudaSuccess) {
printf("cuda error allocating red = %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMalloc((void**)&X.green, X.num_pixels * sizeof(float)); // g
if (err != cudaSuccess) {
printf("cuda error allocating green = %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMalloc((void**)&X.blue, X.num_pixels * sizeof(float)); // b
if (err != cudaSuccess) {
printf("cuda error allocating blue = %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(NULL, texRed, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
cudaBindTexture2D(NULL, texGreen, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
cudaBindTexture2D(NULL, texBlue, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
return X;
}
/******************************************************************************/
void freeGPUPalette(GPU_Palette* P)
{
cudaUnbindTexture(texRed);
cudaUnbindTexture(texGreen);
cudaUnbindTexture(texBlue);
cudaFree(P->red);
cudaFree(P->green);
cudaFree(P->blue);
}
/******************************************************************************/
int updatePalette(GPU_Palette* P, APoint (&points)[5])
// int updatePalette(GPU_Palette* P, int xIdx, int yIdx)
{
for (int i = 0; i < 5; i++) {
updateReds<<<P->gBlocks, P->gThreads>>>(P->red, points[i].xIdx, points[i].yIdx, points[i].z);
updateGreens<<<P->gBlocks, P->gThreads>>>(P->green, points[i].xIdx, points[i].yIdx, points[i].z);
updateBlues<<<P->gBlocks, P->gThreads>>>(P->blue, points[i].xIdx, points[i].yIdx, points[i].z);
}
return 0;
}
/******************************************************************************/
__global__ void updateReds(float* red, int xIdx, int yIdx, float zIdx)
{
// float size = 5 + (zIdx * 0.1);
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// red[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
red[vecIdx] = AttractorRed;
} else {
float heat_average = (top + bot + right + left + center) / (5 - HeatTransferSpeed);
// if (heat_average > BackgroundRed) {
// red[vecIdx] += 0.001;
// }
if (heat_average >= AttractorRed) {
red[vecIdx] = AttractorRed / 2;
} else {
red[vecIdx] = heat_average;
}
red[vecIdx] -= FadeSpeed * red[vecIdx];
if (red[vecIdx] < BackgroundRed)
red[vecIdx] = BackgroundRed;
if (red[vecIdx] > AttractorRed)
red[vecIdx] = AttractorRed;
}
}
/******************************************************************************/
__global__ void updateGreens(float* green, int xIdx, int yIdx, float zIdx)
{
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// green[vecIdx] = center + HeatTransfered * center);
// green[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
green[vecIdx] = AttractorGreen;
} else {
float heat_average = (top + bot + right + left + center) / (5 - HeatTransferSpeed);
if (heat_average >= AttractorGreen) {
green[vecIdx] = AttractorGreen / 2;
} else {
green[vecIdx] = heat_average;
}
green[vecIdx] -= FadeSpeed * green[vecIdx];
if (green[vecIdx] < BackgroundGreen)
green[vecIdx] = BackgroundGreen;
if (green[vecIdx] > AttractorGreen)
green[vecIdx] = AttractorGreen;
}
}
/******************************************************************************/
__global__ void updateBlues(float* blue, int xIdx, int yIdx, float zIdx)
{
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// blue[vecIdx] = center + FadeSpeed * (top + bot + right + left - 4 * center);
// blue[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
blue[vecIdx] = AttractorBlue;
} else {
blue[vecIdx] -= FadeSpeed * blue[vecIdx];
if (blue[vecIdx] < BackgroundBlue)
blue[vecIdx] = BackgroundBlue;
// if (blue[vecIdx] > AttractorBlue)
// blue[vecIdx] = AttractorBlue;
}
}
/******************************************************************************/
|
6e9805606b174b81d068242a2d5caf147ea4c62f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `max`
#include "reduction_functions_hip.cuh"
#include "simple_hip.cuh"
gdf_scalar cudf::reduction::max(gdf_column const& col, gdf_dtype const output_dtype, hipStream_t stream)
{
using reducer = cudf::reduction::simple::element_type_dispatcher<cudf::reduction::op::max>;
return cudf::type_dispatcher(col.dtype, reducer(), col, output_dtype, stream);
}
| 6e9805606b174b81d068242a2d5caf147ea4c62f.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `max`
#include "reduction_functions.cuh"
#include "simple.cuh"
gdf_scalar cudf::reduction::max(gdf_column const& col, gdf_dtype const output_dtype, cudaStream_t stream)
{
using reducer = cudf::reduction::simple::element_type_dispatcher<cudf::reduction::op::max>;
return cudf::type_dispatcher(col.dtype, reducer(), col, output_dtype, stream);
}
|
cafd7dd34df7923909ddc7f10d0d651af451eb50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2011-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "hip_helpers.cuh"
__device__ static inline float clamp(float v, float low, float high)
{
return min(max(v, low), high);
}
#define float3 Float3
struct Float3
{
float x,y,z;
__device__ friend Float3 operator+(const Float3 a, const Float3 b)
{
Float3 c;
c.x = a.x+b.x;
c.y = a.y+b.y;
c.z = a.z+b.z;
return c;
}
__device__ friend Float3 operator-(const Float3 a, const Float3 b)
{
Float3 c;
c.x = a.x-b.x;
c.y = a.y-b.y;
c.z = a.z-b.z;
return c;
}
__device__ friend Float3 operator/(const Float3 a, const Float3 b)
{
Float3 c;
c.x = a.x/b.x;
c.y = a.y/b.y;
c.z = a.z/b.z;
return c;
}
__device__ friend Float3 operator*(const Float3 a, const Float3 b)
{
Float3 c;
c.x = a.x*b.x;
c.y = a.y*b.y;
c.z = a.z*b.z;
return c;
}
__device__ friend Float3 operator*(const Float3 a, const float b)
{
Float3 c;
c.x = a.x*b;
c.y = a.y*b;
c.z = a.z*b;
return c;
}
};
struct Ray {
float3 origin, dir;
};
__device__ static void
generateRay(const float raster2camera[4][4],
const float camera2world[4][4],
float x, float y, Ray &ray) {
// transform raster coordinate (x, y, 0) to camera space
float camx = raster2camera[0][0] * x + raster2camera[0][1] * y + raster2camera[0][3];
float camy = raster2camera[1][0] * x + raster2camera[1][1] * y + raster2camera[1][3];
float camz = raster2camera[2][3];
float camw = raster2camera[3][3];
camx /= camw;
camy /= camw;
camz /= camw;
ray.dir.x = camera2world[0][0] * camx + camera2world[0][1] * camy + camera2world[0][2] * camz;
ray.dir.y = camera2world[1][0] * camx + camera2world[1][1] * camy + camera2world[1][2] * camz;
ray.dir.z = camera2world[2][0] * camx + camera2world[2][1] * camy + camera2world[2][2] * camz;
ray.origin.x = camera2world[0][3] / camera2world[3][3];
ray.origin.y = camera2world[1][3] / camera2world[3][3];
ray.origin.z = camera2world[2][3] / camera2world[3][3];
}
__device__ static inline bool
Inside(float3 p, float3 pMin, float3 pMax) {
return (p.x >= pMin.x && p.x <= pMax.x &&
p.y >= pMin.y && p.y <= pMax.y &&
p.z >= pMin.z && p.z <= pMax.z);
}
__device__ static bool
IntersectP(Ray ray, float3 pMin, float3 pMax, float &hit0, float &hit1) {
float t0 = -1e30f, t1 = 1e30f;
float3 tNear = (pMin - ray.origin) / ray.dir;
float3 tFar = (pMax - ray.origin) / ray.dir;
if (tNear.x > tFar.x) {
float tmp = tNear.x;
tNear.x = tFar.x;
tFar.x = tmp;
}
t0 = max(tNear.x, t0);
t1 = min(tFar.x, t1);
if (tNear.y > tFar.y) {
float tmp = tNear.y;
tNear.y = tFar.y;
tFar.y = tmp;
}
t0 = max(tNear.y, t0);
t1 = min(tFar.y, t1);
if (tNear.z > tFar.z) {
float tmp = tNear.z;
tNear.z = tFar.z;
tFar.z = tmp;
}
t0 = max(tNear.z, t0);
t1 = min(tFar.z, t1);
if (t0 <= t1) {
hit0 = t0;
hit1 = t1;
return true;
}
else
return false;
}
__device__ static inline float Lerp(float t, float a, float b) {
return (1.f - t) * a + t * b;
}
__device__ static inline float D(int x, int y, int z, int nVoxels[3],
float density[]) {
x = clamp(x, 0, nVoxels[0]-1);
y = clamp(y, 0, nVoxels[1]-1);
z = clamp(z, 0, nVoxels[2]-1);
return density[z*nVoxels[0]*nVoxels[1] + y*nVoxels[0] + x];
}
__device__ static inline float3 Offset(float3 p, float3 pMin, float3 pMax) {
return (p - pMin) / (pMax - pMin);
}
__device__ static inline float Density(float3 Pobj, float3 pMin, float3 pMax,
float density[], int nVoxels[3]) {
if (!Inside(Pobj, pMin, pMax))
return 0;
// Compute voxel coordinates and offsets for _Pobj_
float3 vox = Offset(Pobj, pMin, pMax);
vox.x = vox.x * nVoxels[0] - .5f;
vox.y = vox.y * nVoxels[1] - .5f;
vox.z = vox.z * nVoxels[2] - .5f;
int vx = (int)(vox.x), vy = (int)(vox.y), vz = (int)(vox.z);
float dx = vox.x - vx, dy = vox.y - vy, dz = vox.z - vz;
// Trilinearly interpolate density values to compute local density
float d00 = Lerp(dx, D(vx, vy, vz, nVoxels, density),
D(vx+1, vy, vz, nVoxels, density));
float d10 = Lerp(dx, D(vx, vy+1, vz, nVoxels, density),
D(vx+1, vy+1, vz, nVoxels, density));
float d01 = Lerp(dx, D(vx, vy, vz+1, nVoxels, density),
D(vx+1, vy, vz+1, nVoxels, density));
float d11 = Lerp(dx, D(vx, vy+1, vz+1, nVoxels, density),
D(vx+1, vy+1, vz+1, nVoxels, density));
float d0 = Lerp(dy, d00, d10);
float d1 = Lerp(dy, d01, d11);
return Lerp(dz, d0, d1);
}
/* Returns the transmittance between two points p0 and p1, in a volume
with extent (pMin,pMax) with transmittance coefficient sigma_t,
defined by nVoxels[3] voxels in each dimension in the given density
array. */
__device__ static inline float
transmittance(float3 p0, float3 p1, float3 pMin,
float3 pMax, float sigma_t,
float density[], int nVoxels[3]) {
float rayT0, rayT1;
Ray ray;
ray.origin = p1;
ray.dir = p0 - p1;
// Find the parametric t range along the ray that is inside the volume.
if (!IntersectP(ray, pMin, pMax, rayT0, rayT1))
return 1.f;
rayT0 = max(rayT0, 0.f);
// Accumulate beam transmittance in tau
float tau = 0.0f;
float rayLength = sqrt(ray.dir.x * ray.dir.x + ray.dir.y * ray.dir.y +
ray.dir.z * ray.dir.z);
float stepDist = 0.2f;
float stepT = stepDist / rayLength;
float t = rayT0;
float3 pos = ray.origin + ray.dir * rayT0;
float3 dirStep = ray.dir * stepT;
while (t < rayT1) {
tau += stepDist * sigma_t * Density(pos, pMin, pMax, density, nVoxels);
pos = pos + dirStep;
t += stepT;
}
return exp(-tau);
}
__device__ static inline float
distanceSquared(float3 a, float3 b) {
float3 d = a-b;
return d.x*d.x + d.y*d.y + d.z*d.z;
}
__device__ static inline float
raymarch(float density[], int nVoxels[3], Ray ray) {
float rayT0, rayT1;
float3 pMin = {.3f, -.2f, .3f}, pMax = {1.8f, 2.3f, 1.8f};
float3 lightPos = { -1.f, 4., 1.5f };
if (!IntersectP(ray, pMin, pMax, rayT0, rayT1))
return 0.f;
rayT0 = max(rayT0, 0.f);
// Parameters that define the volume scattering characteristics and
// sampling rate for raymarching
float Le = .25f; // Emission coefficient
float sigma_a = 10.f; // Absorption coefficient
float sigma_s = 10.f; // Scattering coefficient
float stepDist = 0.025f; // Ray step amount
float lightIntensity = 40.0f; // Light source intensity
float tau = 0.f; // accumulated beam transmittance
float L = 0.f; // radiance along the ray
float rayLength = sqrt(ray.dir.x * ray.dir.x + ray.dir.y * ray.dir.y +
ray.dir.z * ray.dir.z);
float stepT = stepDist / rayLength;
float t = rayT0;
float3 pos = ray.origin + ray.dir * rayT0;
float3 dirStep = ray.dir * stepT;
while (t < rayT1)
{
float d = Density(pos, pMin, pMax, density, nVoxels);
// terminate once attenuation is high
float atten = exp(-tau);
if (atten < .005f)
break;
// direct lighting
float Li = lightIntensity / distanceSquared(lightPos, pos) *
transmittance(lightPos, pos, pMin, pMax, sigma_a + sigma_s,
density, nVoxels);
L += stepDist * atten * d * sigma_s * (Li + Le);
// update beam transmittance
tau += stepDist * (sigma_a + sigma_s) * d;
pos = pos + dirStep;
t += stepT;
}
// Gamma correction
return pow(L, 1.f / 2.2f);
}
/* Utility routine used by both the task-based and the single-core entrypoints.
Renders a tile of the image, covering [x0,x0) * [y0, y1), storing the
result into the image[] array.
*/
__device__ static void
volume_tile(int x0, int y0, int x1,
int y1, float density[], int nVoxels[3],
const float raster2camera[4][4],
const float camera2world[4][4],
int width, int height, float image[]) {
// Work on 4x4=16 pixel big tiles of the image. This function thus
// implicitly assumes that both (x1-x0) and (y1-y0) are evenly divisble
// by 4.
for (int y = y0; y < y1; y += 8) {
for (int x = x0; x < x1; x += 8) {
for (int ob = 0; ob < 64; ob += programCount)
{
const int o = ob + programIndex;
// These two arrays encode the mapping from [0,15] to
// offsets within the 4x4 pixel block so that we render
// each pixel inside the block
const int xoffsets[16] = { 0, 1, 0, 1, 2, 3, 2, 3,
0, 1, 0, 1, 2, 3, 2, 3 };
const int yoffsets[16] = { 0, 0, 1, 1, 0, 0, 1, 1,
2, 2, 3, 3, 2, 2, 3, 3 };
const int xblock[4] = {0, 4, 0, 4};
const int yblock[4] = {0, 0, 4, 4};
// Figure out the pixel to render for this program instance
const int xo = x + xblock[o/16] + xoffsets[o&15];
const int yo = y + yblock[o/16] + yoffsets[o&15];
// Use viewing parameters to compute the corresponding ray
// for the pixel
Ray ray;
generateRay(raster2camera, camera2world, xo, yo, ray);
// And raymarch through the volume to compute the pixel's
// value
int offset = yo * width + xo;
if (xo < x1 && yo < y1)
image[offset] = raymarch(density, nVoxels, ray);
}
}
}
}
__global__ void
volume_task(float density[], int _nVoxels[3],
const float _raster2camera[4][4],
const float _camera2world[4][4],
int width, int height, float image[]) {
if (taskIndex0 >= taskCount0) return;
#if 0
int nVoxels[3];
nVoxels[0] = _nVoxels[0];
nVoxels[1] = _nVoxels[1];
nVoxels[2] = _nVoxels[2];
float raster2camera[4][4];
raster2camera[0][0] = _raster2camera[0][0];
raster2camera[0][1] = _raster2camera[0][1];
raster2camera[0][2] = _raster2camera[0][2];
raster2camera[0][3] = _raster2camera[0][3];
raster2camera[1][0] = _raster2camera[1][0];
raster2camera[1][1] = _raster2camera[1][1];
raster2camera[1][2] = _raster2camera[1][2];
raster2camera[1][3] = _raster2camera[1][3];
raster2camera[2][0] = _raster2camera[2][0];
raster2camera[2][1] = _raster2camera[2][1];
raster2camera[2][2] = _raster2camera[2][2];
raster2camera[2][3] = _raster2camera[2][3];
raster2camera[3][0] = _raster2camera[3][0];
raster2camera[3][1] = _raster2camera[3][1];
raster2camera[3][2] = _raster2camera[3][2];
raster2camera[3][3] = _raster2camera[3][3];
float camera2world[4][4];
camera2world[0][0] = _camera2world[0][0];
camera2world[0][1] = _camera2world[0][1];
camera2world[0][2] = _camera2world[0][2];
camera2world[0][3] = _camera2world[0][3];
camera2world[1][0] = _camera2world[1][0];
camera2world[1][1] = _camera2world[1][1];
camera2world[1][2] = _camera2world[1][2];
camera2world[1][3] = _camera2world[1][3];
camera2world[2][0] = _camera2world[2][0];
camera2world[2][1] = _camera2world[2][1];
camera2world[2][2] = _camera2world[2][2];
camera2world[2][3] = _camera2world[2][3];
camera2world[3][0] = _camera2world[3][0];
camera2world[3][1] = _camera2world[3][1];
camera2world[3][2] = _camera2world[3][2];
camera2world[3][3] = _camera2world[3][3];
#else
#define nVoxels _nVoxels
#define raster2camera _raster2camera
#define camera2world _camera2world
#endif
int dx = 8, dy = 8; // must match value in volume_ispc_tasks
int xbuckets = (width + (dx-1)) / dx;
int ybuckets = (height + (dy-1)) / dy;
int x0 = (taskIndex % xbuckets) * dx;
int y0 = (taskIndex / xbuckets) * dy;
int x1 = x0 + dx, y1 = y0 + dy;
x1 = min(x1, width);
y1 = min(y1, height);
volume_tile(x0, y0, x1, y1, density, nVoxels, raster2camera,
camera2world, width, height, image);
}
extern "C"
__global__ void
volume_ispc_tasks___export( float density[], int nVoxels[3],
const float raster2camera[4][4],
const float camera2world[4][4],
int width, int height, float image[]) {
// Launch tasks to work on (dx,dy)-sized tiles of the image
int dx = 8, dy = 8;
int nTasks = ((width+(dx-1))/dx) * ((height+(dy-1))/dy);
launch(nTasks,1,1,volume_task)
(density, nVoxels, raster2camera, camera2world,
width, height, image);
hipDeviceSynchronize();
}
extern "C"
__host__ void
volume_ispc_tasks( float density[], int nVoxels[3],
const float raster2camera[4][4],
const float camera2world[4][4],
int width, int height, float image[]) {
hipLaunchKernelGGL(( volume_ispc_tasks___export), dim3(1),dim3(32), 0, 0, density, nVoxels, raster2camera, camera2world, width, height,image);
hipDeviceSynchronize();
}
| cafd7dd34df7923909ddc7f10d0d651af451eb50.cu | /*
Copyright (c) 2011-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "cuda_helpers.cuh"
__device__ static inline float clamp(float v, float low, float high)
{
return min(max(v, low), high);
}
#define float3 Float3
struct Float3
{
float x,y,z;
__device__ friend Float3 operator+(const Float3 a, const Float3 b)
{
Float3 c;
c.x = a.x+b.x;
c.y = a.y+b.y;
c.z = a.z+b.z;
return c;
}
__device__ friend Float3 operator-(const Float3 a, const Float3 b)
{
Float3 c;
c.x = a.x-b.x;
c.y = a.y-b.y;
c.z = a.z-b.z;
return c;
}
__device__ friend Float3 operator/(const Float3 a, const Float3 b)
{
Float3 c;
c.x = a.x/b.x;
c.y = a.y/b.y;
c.z = a.z/b.z;
return c;
}
__device__ friend Float3 operator*(const Float3 a, const Float3 b)
{
Float3 c;
c.x = a.x*b.x;
c.y = a.y*b.y;
c.z = a.z*b.z;
return c;
}
__device__ friend Float3 operator*(const Float3 a, const float b)
{
Float3 c;
c.x = a.x*b;
c.y = a.y*b;
c.z = a.z*b;
return c;
}
};
struct Ray {
float3 origin, dir;
};
__device__ static void
generateRay(const float raster2camera[4][4],
const float camera2world[4][4],
float x, float y, Ray &ray) {
// transform raster coordinate (x, y, 0) to camera space
float camx = raster2camera[0][0] * x + raster2camera[0][1] * y + raster2camera[0][3];
float camy = raster2camera[1][0] * x + raster2camera[1][1] * y + raster2camera[1][3];
float camz = raster2camera[2][3];
float camw = raster2camera[3][3];
camx /= camw;
camy /= camw;
camz /= camw;
ray.dir.x = camera2world[0][0] * camx + camera2world[0][1] * camy + camera2world[0][2] * camz;
ray.dir.y = camera2world[1][0] * camx + camera2world[1][1] * camy + camera2world[1][2] * camz;
ray.dir.z = camera2world[2][0] * camx + camera2world[2][1] * camy + camera2world[2][2] * camz;
ray.origin.x = camera2world[0][3] / camera2world[3][3];
ray.origin.y = camera2world[1][3] / camera2world[3][3];
ray.origin.z = camera2world[2][3] / camera2world[3][3];
}
__device__ static inline bool
Inside(float3 p, float3 pMin, float3 pMax) {
return (p.x >= pMin.x && p.x <= pMax.x &&
p.y >= pMin.y && p.y <= pMax.y &&
p.z >= pMin.z && p.z <= pMax.z);
}
__device__ static bool
IntersectP(Ray ray, float3 pMin, float3 pMax, float &hit0, float &hit1) {
float t0 = -1e30f, t1 = 1e30f;
float3 tNear = (pMin - ray.origin) / ray.dir;
float3 tFar = (pMax - ray.origin) / ray.dir;
if (tNear.x > tFar.x) {
float tmp = tNear.x;
tNear.x = tFar.x;
tFar.x = tmp;
}
t0 = max(tNear.x, t0);
t1 = min(tFar.x, t1);
if (tNear.y > tFar.y) {
float tmp = tNear.y;
tNear.y = tFar.y;
tFar.y = tmp;
}
t0 = max(tNear.y, t0);
t1 = min(tFar.y, t1);
if (tNear.z > tFar.z) {
float tmp = tNear.z;
tNear.z = tFar.z;
tFar.z = tmp;
}
t0 = max(tNear.z, t0);
t1 = min(tFar.z, t1);
if (t0 <= t1) {
hit0 = t0;
hit1 = t1;
return true;
}
else
return false;
}
__device__ static inline float Lerp(float t, float a, float b) {
return (1.f - t) * a + t * b;
}
__device__ static inline float D(int x, int y, int z, int nVoxels[3],
float density[]) {
x = clamp(x, 0, nVoxels[0]-1);
y = clamp(y, 0, nVoxels[1]-1);
z = clamp(z, 0, nVoxels[2]-1);
return density[z*nVoxels[0]*nVoxels[1] + y*nVoxels[0] + x];
}
__device__ static inline float3 Offset(float3 p, float3 pMin, float3 pMax) {
return (p - pMin) / (pMax - pMin);
}
__device__ static inline float Density(float3 Pobj, float3 pMin, float3 pMax,
float density[], int nVoxels[3]) {
if (!Inside(Pobj, pMin, pMax))
return 0;
// Compute voxel coordinates and offsets for _Pobj_
float3 vox = Offset(Pobj, pMin, pMax);
vox.x = vox.x * nVoxels[0] - .5f;
vox.y = vox.y * nVoxels[1] - .5f;
vox.z = vox.z * nVoxels[2] - .5f;
int vx = (int)(vox.x), vy = (int)(vox.y), vz = (int)(vox.z);
float dx = vox.x - vx, dy = vox.y - vy, dz = vox.z - vz;
// Trilinearly interpolate density values to compute local density
float d00 = Lerp(dx, D(vx, vy, vz, nVoxels, density),
D(vx+1, vy, vz, nVoxels, density));
float d10 = Lerp(dx, D(vx, vy+1, vz, nVoxels, density),
D(vx+1, vy+1, vz, nVoxels, density));
float d01 = Lerp(dx, D(vx, vy, vz+1, nVoxels, density),
D(vx+1, vy, vz+1, nVoxels, density));
float d11 = Lerp(dx, D(vx, vy+1, vz+1, nVoxels, density),
D(vx+1, vy+1, vz+1, nVoxels, density));
float d0 = Lerp(dy, d00, d10);
float d1 = Lerp(dy, d01, d11);
return Lerp(dz, d0, d1);
}
/* Returns the transmittance between two points p0 and p1, in a volume
with extent (pMin,pMax) with transmittance coefficient sigma_t,
defined by nVoxels[3] voxels in each dimension in the given density
array. */
__device__ static inline float
transmittance(float3 p0, float3 p1, float3 pMin,
float3 pMax, float sigma_t,
float density[], int nVoxels[3]) {
float rayT0, rayT1;
Ray ray;
ray.origin = p1;
ray.dir = p0 - p1;
// Find the parametric t range along the ray that is inside the volume.
if (!IntersectP(ray, pMin, pMax, rayT0, rayT1))
return 1.f;
rayT0 = max(rayT0, 0.f);
// Accumulate beam transmittance in tau
float tau = 0.0f;
float rayLength = sqrt(ray.dir.x * ray.dir.x + ray.dir.y * ray.dir.y +
ray.dir.z * ray.dir.z);
float stepDist = 0.2f;
float stepT = stepDist / rayLength;
float t = rayT0;
float3 pos = ray.origin + ray.dir * rayT0;
float3 dirStep = ray.dir * stepT;
while (t < rayT1) {
tau += stepDist * sigma_t * Density(pos, pMin, pMax, density, nVoxels);
pos = pos + dirStep;
t += stepT;
}
return exp(-tau);
}
__device__ static inline float
distanceSquared(float3 a, float3 b) {
float3 d = a-b;
return d.x*d.x + d.y*d.y + d.z*d.z;
}
__device__ static inline float
raymarch(float density[], int nVoxels[3], Ray ray) {
float rayT0, rayT1;
float3 pMin = {.3f, -.2f, .3f}, pMax = {1.8f, 2.3f, 1.8f};
float3 lightPos = { -1.f, 4., 1.5f };
if (!IntersectP(ray, pMin, pMax, rayT0, rayT1))
return 0.f;
rayT0 = max(rayT0, 0.f);
// Parameters that define the volume scattering characteristics and
// sampling rate for raymarching
float Le = .25f; // Emission coefficient
float sigma_a = 10.f; // Absorption coefficient
float sigma_s = 10.f; // Scattering coefficient
float stepDist = 0.025f; // Ray step amount
float lightIntensity = 40.0f; // Light source intensity
float tau = 0.f; // accumulated beam transmittance
float L = 0.f; // radiance along the ray
float rayLength = sqrt(ray.dir.x * ray.dir.x + ray.dir.y * ray.dir.y +
ray.dir.z * ray.dir.z);
float stepT = stepDist / rayLength;
float t = rayT0;
float3 pos = ray.origin + ray.dir * rayT0;
float3 dirStep = ray.dir * stepT;
while (t < rayT1)
{
float d = Density(pos, pMin, pMax, density, nVoxels);
// terminate once attenuation is high
float atten = exp(-tau);
if (atten < .005f)
break;
// direct lighting
float Li = lightIntensity / distanceSquared(lightPos, pos) *
transmittance(lightPos, pos, pMin, pMax, sigma_a + sigma_s,
density, nVoxels);
L += stepDist * atten * d * sigma_s * (Li + Le);
// update beam transmittance
tau += stepDist * (sigma_a + sigma_s) * d;
pos = pos + dirStep;
t += stepT;
}
// Gamma correction
return pow(L, 1.f / 2.2f);
}
/* Utility routine used by both the task-based and the single-core entrypoints.
Renders a tile of the image, covering [x0,x0) * [y0, y1), storing the
result into the image[] array.
*/
__device__ static void
volume_tile(int x0, int y0, int x1,
int y1, float density[], int nVoxels[3],
const float raster2camera[4][4],
const float camera2world[4][4],
int width, int height, float image[]) {
// Work on 4x4=16 pixel big tiles of the image. This function thus
// implicitly assumes that both (x1-x0) and (y1-y0) are evenly divisble
// by 4.
for (int y = y0; y < y1; y += 8) {
for (int x = x0; x < x1; x += 8) {
for (int ob = 0; ob < 64; ob += programCount)
{
const int o = ob + programIndex;
// These two arrays encode the mapping from [0,15] to
// offsets within the 4x4 pixel block so that we render
// each pixel inside the block
const int xoffsets[16] = { 0, 1, 0, 1, 2, 3, 2, 3,
0, 1, 0, 1, 2, 3, 2, 3 };
const int yoffsets[16] = { 0, 0, 1, 1, 0, 0, 1, 1,
2, 2, 3, 3, 2, 2, 3, 3 };
const int xblock[4] = {0, 4, 0, 4};
const int yblock[4] = {0, 0, 4, 4};
// Figure out the pixel to render for this program instance
const int xo = x + xblock[o/16] + xoffsets[o&15];
const int yo = y + yblock[o/16] + yoffsets[o&15];
// Use viewing parameters to compute the corresponding ray
// for the pixel
Ray ray;
generateRay(raster2camera, camera2world, xo, yo, ray);
// And raymarch through the volume to compute the pixel's
// value
int offset = yo * width + xo;
if (xo < x1 && yo < y1)
image[offset] = raymarch(density, nVoxels, ray);
}
}
}
}
__global__ void
volume_task(float density[], int _nVoxels[3],
const float _raster2camera[4][4],
const float _camera2world[4][4],
int width, int height, float image[]) {
if (taskIndex0 >= taskCount0) return;
#if 0
int nVoxels[3];
nVoxels[0] = _nVoxels[0];
nVoxels[1] = _nVoxels[1];
nVoxels[2] = _nVoxels[2];
float raster2camera[4][4];
raster2camera[0][0] = _raster2camera[0][0];
raster2camera[0][1] = _raster2camera[0][1];
raster2camera[0][2] = _raster2camera[0][2];
raster2camera[0][3] = _raster2camera[0][3];
raster2camera[1][0] = _raster2camera[1][0];
raster2camera[1][1] = _raster2camera[1][1];
raster2camera[1][2] = _raster2camera[1][2];
raster2camera[1][3] = _raster2camera[1][3];
raster2camera[2][0] = _raster2camera[2][0];
raster2camera[2][1] = _raster2camera[2][1];
raster2camera[2][2] = _raster2camera[2][2];
raster2camera[2][3] = _raster2camera[2][3];
raster2camera[3][0] = _raster2camera[3][0];
raster2camera[3][1] = _raster2camera[3][1];
raster2camera[3][2] = _raster2camera[3][2];
raster2camera[3][3] = _raster2camera[3][3];
float camera2world[4][4];
camera2world[0][0] = _camera2world[0][0];
camera2world[0][1] = _camera2world[0][1];
camera2world[0][2] = _camera2world[0][2];
camera2world[0][3] = _camera2world[0][3];
camera2world[1][0] = _camera2world[1][0];
camera2world[1][1] = _camera2world[1][1];
camera2world[1][2] = _camera2world[1][2];
camera2world[1][3] = _camera2world[1][3];
camera2world[2][0] = _camera2world[2][0];
camera2world[2][1] = _camera2world[2][1];
camera2world[2][2] = _camera2world[2][2];
camera2world[2][3] = _camera2world[2][3];
camera2world[3][0] = _camera2world[3][0];
camera2world[3][1] = _camera2world[3][1];
camera2world[3][2] = _camera2world[3][2];
camera2world[3][3] = _camera2world[3][3];
#else
#define nVoxels _nVoxels
#define raster2camera _raster2camera
#define camera2world _camera2world
#endif
int dx = 8, dy = 8; // must match value in volume_ispc_tasks
int xbuckets = (width + (dx-1)) / dx;
int ybuckets = (height + (dy-1)) / dy;
int x0 = (taskIndex % xbuckets) * dx;
int y0 = (taskIndex / xbuckets) * dy;
int x1 = x0 + dx, y1 = y0 + dy;
x1 = min(x1, width);
y1 = min(y1, height);
volume_tile(x0, y0, x1, y1, density, nVoxels, raster2camera,
camera2world, width, height, image);
}
extern "C"
__global__ void
volume_ispc_tasks___export( float density[], int nVoxels[3],
const float raster2camera[4][4],
const float camera2world[4][4],
int width, int height, float image[]) {
// Launch tasks to work on (dx,dy)-sized tiles of the image
int dx = 8, dy = 8;
int nTasks = ((width+(dx-1))/dx) * ((height+(dy-1))/dy);
launch(nTasks,1,1,volume_task)
(density, nVoxels, raster2camera, camera2world,
width, height, image);
cudaDeviceSynchronize();
}
extern "C"
__host__ void
volume_ispc_tasks( float density[], int nVoxels[3],
const float raster2camera[4][4],
const float camera2world[4][4],
int width, int height, float image[]) {
volume_ispc_tasks___export<<<1,32>>>(density, nVoxels, raster2camera, camera2world, width, height,image);
cudaDeviceSynchronize();
}
|
8c9d6bd76652be495f62f348c74410ef27aad38a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main() {
int count = 0;
if (hipGetDeviceCount(&count) != hipSuccess) return -1;
if (count == 0) return -1;
for (int device = 0; device < count; ++device) {
hipDeviceProp_t prop;
if (hipGetDeviceProperties(&prop, device) == hipSuccess) {
printf("%d%d;", prop.major, prop.minor);
}
}
return 0;
}
| 8c9d6bd76652be495f62f348c74410ef27aad38a.cu | #include <stdio.h>
int main() {
int count = 0;
if (cudaGetDeviceCount(&count) != cudaSuccess) return -1;
if (count == 0) return -1;
for (int device = 0; device < count; ++device) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, device) == cudaSuccess) {
printf("%d%d;", prop.major, prop.minor);
}
}
return 0;
}
|
4e42d11b9f9e7658f853a886d4fef9dda0871fda.hip | // !!! This is a file automatically generated by hipify!!!
extern "C"
{
#include "completion.h"
#include "ciss.h"
#include "base.h"
#include "matrixprocess.h"
#include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
}
#include "sgd.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
//#include "loss.h"
//the gpu kernel
__global__ void p_update_sgd_gpu(ciss_t * d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double learning_rate,
double regularization_index,
idx_t tilenum)
{
//get thread and block index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
double * entries = d_traina->entries;
idx_t localtile = tileid*((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
//buffer for matrices
double __align__(256) mabuffer[DEFAULT_NFACTORS];
double __align__(256) mbbuffer[DEFAULT_NFACTORS];
double __align__(256) mcbuffer[DEFAULT_NFACTORS];
double __align__(256) localtbuffer[6];
idx_t a,b,c, localcounter;
double localvalue;
if(tileid < tilenum)
{
//get the indices and value
idx_t f_id = (idx_t)(entries[localtile] * (-1));
idx_t l_id = (idx_t)(entries[localtile+1] * (-1));
idx_t bitmap = (idx_t)(entries[localtile+2]);
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = (bitmap >> 1);}
bitmap = (bitmap >> 1);
localtile += DEFAULT_T_TILE_WIDTH;
for(idx_t j = 0; j < DEFAULT_T_TILE_LENGTH/2; j++)
{
//unroll loop and load
localtbuffer[0] = entries[localtile];
localtbuffer[1] = entries[localtile + 1];
localtbuffer[2] = entries[localtile + 2];
if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
//for the first
f_id += (!(bitmap & 1));
bitmap = bitmap >> 1;
a = d_traina->directory[f_id] - 1;
localcounter = d_traina->dcounter[f_id + 1] - d_traina->dcounter[f_id];
b = (idx_t)localtbuffer[0] - 1;
c = (idx_t)localtbuffer[1] - 1;
localvalue = localtbuffer[2];
#ifdef SGD_DEBUG
printf("now a b c in tile %ld are %ld %ld %ld\n", tileid, a, b, c);
#endif
//if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
for(idx_t i = 0; i< DEFAULT_NFACTORS; i++)
{
//((double2*)mabuffer)[i] = ((double2*)d_factora->values)[a * DEFAULT_NFACTORS/2 + i];
//((double2*)mbbuffer)[i] = ((double2*)d_factorb->values)[b * DEFAULT_NFACTORS/2 + i];
//((double2*)mcbuffer)[i] = ((double2*)d_factorc->values)[c * DEFAULT_NFACTORS/2 + i];
mabuffer[i] = (d_factora->values)[a * DEFAULT_NFACTORS + i];
mbbuffer[i] = (d_factorb->values)[b * DEFAULT_NFACTORS + i];
mcbuffer[i] = (d_factorc->values)[c * DEFAULT_NFACTORS + i];
}
/* predict value */
double predicted = 0;
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
predicted += mabuffer[f] * mbbuffer[f] * mcbuffer[f];
}
predicted = localvalue - predicted;
/* update rows */
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
double moda = (predicted * mbbuffer[f] * mcbuffer[f]) - (regularization_index * mabuffer[f]);
double modb = (predicted * mabuffer[f] * mcbuffer[f]) - (regularization_index * mbbuffer[f]);
double modc = (predicted * mbbuffer[f] * mabuffer[f]) - (regularization_index * mcbuffer[f]);
atomicAdd(&(d_factora->values[a * DEFAULT_NFACTORS + f]), learning_rate*moda * (double)SGD_MODIFICATIONA);
atomicAdd(&(d_factorb->values[b * DEFAULT_NFACTORS + f]), learning_rate*modb * (double)SGD_MODIFICATIONB);
atomicAdd(&(d_factorc->values[c * DEFAULT_NFACTORS + f]), learning_rate*modc * (double)SGD_MODIFICATIONC);
}
//for the second
localtbuffer[3] = entries[localtile + 3];
localtbuffer[4] = entries[localtile + 4];
localtbuffer[5] = entries[localtile + 5];
f_id += (!(bitmap & 1));
bitmap = bitmap >> 1;
a = d_traina->directory[f_id] - 1;
localcounter = d_traina->dcounter[f_id + 1] - d_traina->dcounter[f_id];
b = (idx_t)localtbuffer[3] - 1;
c = (idx_t)localtbuffer[4] - 1;
#ifdef SGD_DEBUG
printf("now a b c in tile %ld are %ld %ld %ld\n", tileid, a, b, c);
#endif
localvalue = localtbuffer[5];
if(localtbuffer[3] == -1 && localtbuffer[4] == -1) break;
for(idx_t i = 0; i< DEFAULT_NFACTORS; i++)
{
mabuffer[i] = (d_factora->values)[a * DEFAULT_NFACTORS + i];
mbbuffer[i] = (d_factorb->values)[b * DEFAULT_NFACTORS + i];
mcbuffer[i] = (d_factorc->values)[c * DEFAULT_NFACTORS + i];
}
/* predict value */
predicted = 0;
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
predicted += mabuffer[f] * mbbuffer[f] * mcbuffer[f];
}
predicted = localvalue - predicted;
/* update rows */
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
double moda = (predicted * mbbuffer[f] * mcbuffer[f]) - (regularization_index * mabuffer[f]);
double modb = (predicted * mabuffer[f] * mcbuffer[f]) - (regularization_index * mbbuffer[f]);
double modc = (predicted * mbbuffer[f] * mabuffer[f]) - (regularization_index * mcbuffer[f]);
atomicAdd(&(d_factora->values[a * DEFAULT_NFACTORS + f]), learning_rate*moda * (double)SGD_MODIFICATIONA);
atomicAdd(&(d_factorb->values[b * DEFAULT_NFACTORS + f]), learning_rate*modb * (double)SGD_MODIFICATIONA);
atomicAdd(&(d_factorc->values[c * DEFAULT_NFACTORS + f]), learning_rate*modc * (double)SGD_MODIFICATIONA);
}
localtile += 2 * DEFAULT_T_TILE_WIDTH;
}
}
}
/**
* @brief The main function for tensor completion in sgd
* @param train The tensor for generating factor matrices
* @param validation The tensor for validation(RMSE)
* @param test The tensor for testing the quality
* @param regularization_index Lambda
*/
extern "C"{
void tc_sgd(sptensor_t * traina,
sptensor_t * trainb,
sptensor_t * trainc,
sptensor_t * validation,
sptensor_t * test,
ordi_matrix ** mats,
ordi_matrix ** best_mats,
int algorithm_index,
double regularization_index,
double learning_rate,
double * best_rmse,
double * tolerance,
idx_t * nbadepochs,
idx_t * bestepochs,
idx_t * max_badepochs)
{
//only in sgd
idx_t steps_size = 1000;
idx_t nmodes = traina->nmodes;
//initialize the devices
int deviceCount;
hipGetDeviceCount(&deviceCount);
hipSetDevice(0);
//prepare the tensor in TB-COO
ciss_t * h_traina = ciss_alloc(traina, 1);
#ifdef CISS_DEBUG
ciss_display(h_traina);
#endif
//ciss_t * h_trainb = ciss_alloc(train, 1);
//ciss_t * h_trainc = ciss_alloc(train, 2);
struct timeval start;
struct timeval end;
idx_t diff;
//malloc and copy the tensors + matrices to gpu
ciss_t * d_traina;
idx_t * d_directory_a, * d_counter_a;
idx_t * d_dims_a;
idx_t * d_itemp1, *d_itemp2, *d_itemp3;
double * d_entries_a;
double * d_ftemp;
//copy tensor for mode-1
HANDLE_ERROR(hipMalloc((void**)&d_traina, sizeof(ciss_t)));
HANDLE_ERROR(hipMalloc((void**)&d_directory_a, h_traina->dlength * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_counter_a), (h_traina->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&d_entries_a, h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&d_dims_a, nmodes * sizeof(idx_t)));
HANDLE_ERROR(hipMemcpy(d_directory_a, h_traina->directory, h_traina->dlength*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_counter_a, h_traina->dcounter, (h_traina->dlength + 1)*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_entries_a, h_traina->entries, h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_dims_a, h_traina->dims, nmodes*sizeof(idx_t), hipMemcpyHostToDevice));
d_itemp1 = h_traina->directory;
d_itemp2 = h_traina->dims;
d_itemp3 = h_traina->dcounter;
d_ftemp = h_traina->entries;
h_traina->directory = d_directory_a;
h_traina->dcounter = d_counter_a;
h_traina->dims = d_dims_a;
h_traina->entries = d_entries_a;
HANDLE_ERROR(hipMemcpy(d_traina, h_traina, sizeof(ciss_t), hipMemcpyHostToDevice));
h_traina->directory = d_itemp1;
h_traina->dims = d_itemp2;
h_traina->dcounter = d_itemp3;
h_traina->entries = d_ftemp;
//buffer for HTH
//idx_t maxdlength = SS_MAX(SS_MAX(h_traina->dlength, h_trainb->dlength),h_trainc->dlength);
//double * h_hbuffer = (double *)malloc(DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double));
//double * h_invbuffer = (double *)malloc(DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double));
//HANDLE_ERROR(hipMalloc((void**)&d_hbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)));
//double* d_invbuffer; //for inverse
//HANDLE_ERROR(hipMalloc((void**)&d_invbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)));
//copy the factor matrices
ordi_matrix * d_factora, * d_factorb, * d_factorc;
double * d_value_a, * d_value_b, * d_value_c;
HANDLE_ERROR(hipMalloc((void**)&d_factora, sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&d_value_a, mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_a, mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
d_ftemp = mats[0]->values;
mats[0]->values = d_value_a;
HANDLE_ERROR(hipMemcpy(d_factora, mats[0], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[0]->values = d_ftemp;
HANDLE_ERROR(hipMalloc((void**)&d_factorb, sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&d_value_b, mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_b, mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
d_ftemp = mats[1]->values;
mats[1]->values = d_value_b;
HANDLE_ERROR(hipMemcpy(d_factorb, mats[1], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[1]->values = d_ftemp;
HANDLE_ERROR(hipMalloc((void**)&d_factorc, sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&d_value_c, mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_c, mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
d_ftemp = mats[2]->values;
mats[2]->values = d_value_c;
HANDLE_ERROR(hipMemcpy(d_factorc, mats[2], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[2]->values = d_ftemp;
#ifdef CUDA_LOSS
//to be done
#else
double loss = tc_loss_sq(traina, mats, algorithm_index);
double frobsq = tc_frob_sq(nmodes, regularization_index, mats);
tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, 0, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs);
#endif
/* for bold driver */
double obj = loss + frobsq;
double prev_obj = obj;
//step into the kernel
idx_t nnz = traina->nnz;
idx_t tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
idx_t blocknum_m = tilenum/DEFAULT_BLOCKSIZE + 1;
#ifdef SGD_DEBUG
printf("nnz %d tilenum %d\n", nnz, tilenum);
#endif
/* foreach epoch */
for(idx_t e=1; e < DEFAULT_MAX_ITERATE; ++e) {
/* update model from all training observations */
gettimeofday(&start,NULL);
HANDLE_ERROR(hipMemcpy(d_value_a, mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_value_b, mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_value_c, mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( p_update_sgd_gpu), dim3(blocknum_m), dim3(DEFAULT_BLOCKSIZE), 0, 0, d_traina, d_factora, d_factorb, d_factorc, learning_rate, regularization_index, tilenum);
HANDLE_ERROR(hipDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
HANDLE_ERROR(hipMemcpy(mats[0]->values, d_value_a, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(mats[1]->values, d_value_b, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(mats[2]->values, d_value_c, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipDeviceSynchronize());
#ifdef SGD_DEBUG
printf("start display matrices\n");
matrix_display(mats[0]);
matrix_display(mats[1]);
matrix_display(mats[2]);
#endif
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
/* compute RMSE and adjust learning rate */
loss = tc_loss_sq(traina, mats, algorithm_index);
frobsq = tc_frob_sq(nmodes, regularization_index, mats);
obj = loss + frobsq;
if(tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, e, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs)) {
break;
}
/* bold driver */
if(e > 1) {
if(obj < prev_obj) {
learning_rate *= 1.05;
} else {
learning_rate *= 0.50;
}
}
prev_obj = obj;
}
//free the cudabuffer
hipFree(d_directory_a);
hipFree(d_dims_a);
hipFree(d_entries_a);
//hipFree(d_hbuffer);
hipFree(d_value_a);
hipFree(d_value_b);
hipFree(d_value_c);
hipFree(d_traina);
hipFree(d_factora);
hipFree(d_factorb);
hipFree(d_factorc);
ciss_free(h_traina);
hipDeviceReset();
}
} | 4e42d11b9f9e7658f853a886d4fef9dda0871fda.cu |
extern "C"
{
#include "completion.h"
#include "ciss.h"
#include "base.h"
#include "matrixprocess.h"
#include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
}
#include "sgd.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
//#include "loss.h"
//the gpu kernel
__global__ void p_update_sgd_gpu(ciss_t * d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double learning_rate,
double regularization_index,
idx_t tilenum)
{
//get thread and block index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
double * entries = d_traina->entries;
idx_t localtile = tileid*((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
//buffer for matrices
double __align__(256) mabuffer[DEFAULT_NFACTORS];
double __align__(256) mbbuffer[DEFAULT_NFACTORS];
double __align__(256) mcbuffer[DEFAULT_NFACTORS];
double __align__(256) localtbuffer[6];
idx_t a,b,c, localcounter;
double localvalue;
if(tileid < tilenum)
{
//get the indices and value
idx_t f_id = (idx_t)(entries[localtile] * (-1));
idx_t l_id = (idx_t)(entries[localtile+1] * (-1));
idx_t bitmap = (idx_t)(entries[localtile+2]);
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = (bitmap >> 1);}
bitmap = (bitmap >> 1);
localtile += DEFAULT_T_TILE_WIDTH;
for(idx_t j = 0; j < DEFAULT_T_TILE_LENGTH/2; j++)
{
//unroll loop and load
localtbuffer[0] = entries[localtile];
localtbuffer[1] = entries[localtile + 1];
localtbuffer[2] = entries[localtile + 2];
if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
//for the first
f_id += (!(bitmap & 1));
bitmap = bitmap >> 1;
a = d_traina->directory[f_id] - 1;
localcounter = d_traina->dcounter[f_id + 1] - d_traina->dcounter[f_id];
b = (idx_t)localtbuffer[0] - 1;
c = (idx_t)localtbuffer[1] - 1;
localvalue = localtbuffer[2];
#ifdef SGD_DEBUG
printf("now a b c in tile %ld are %ld %ld %ld\n", tileid, a, b, c);
#endif
//if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
for(idx_t i = 0; i< DEFAULT_NFACTORS; i++)
{
//((double2*)mabuffer)[i] = ((double2*)d_factora->values)[a * DEFAULT_NFACTORS/2 + i];
//((double2*)mbbuffer)[i] = ((double2*)d_factorb->values)[b * DEFAULT_NFACTORS/2 + i];
//((double2*)mcbuffer)[i] = ((double2*)d_factorc->values)[c * DEFAULT_NFACTORS/2 + i];
mabuffer[i] = (d_factora->values)[a * DEFAULT_NFACTORS + i];
mbbuffer[i] = (d_factorb->values)[b * DEFAULT_NFACTORS + i];
mcbuffer[i] = (d_factorc->values)[c * DEFAULT_NFACTORS + i];
}
/* predict value */
double predicted = 0;
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
predicted += mabuffer[f] * mbbuffer[f] * mcbuffer[f];
}
predicted = localvalue - predicted;
/* update rows */
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
double moda = (predicted * mbbuffer[f] * mcbuffer[f]) - (regularization_index * mabuffer[f]);
double modb = (predicted * mabuffer[f] * mcbuffer[f]) - (regularization_index * mbbuffer[f]);
double modc = (predicted * mbbuffer[f] * mabuffer[f]) - (regularization_index * mcbuffer[f]);
atomicAdd(&(d_factora->values[a * DEFAULT_NFACTORS + f]), learning_rate*moda * (double)SGD_MODIFICATIONA);
atomicAdd(&(d_factorb->values[b * DEFAULT_NFACTORS + f]), learning_rate*modb * (double)SGD_MODIFICATIONB);
atomicAdd(&(d_factorc->values[c * DEFAULT_NFACTORS + f]), learning_rate*modc * (double)SGD_MODIFICATIONC);
}
//for the second
localtbuffer[3] = entries[localtile + 3];
localtbuffer[4] = entries[localtile + 4];
localtbuffer[5] = entries[localtile + 5];
f_id += (!(bitmap & 1));
bitmap = bitmap >> 1;
a = d_traina->directory[f_id] - 1;
localcounter = d_traina->dcounter[f_id + 1] - d_traina->dcounter[f_id];
b = (idx_t)localtbuffer[3] - 1;
c = (idx_t)localtbuffer[4] - 1;
#ifdef SGD_DEBUG
printf("now a b c in tile %ld are %ld %ld %ld\n", tileid, a, b, c);
#endif
localvalue = localtbuffer[5];
if(localtbuffer[3] == -1 && localtbuffer[4] == -1) break;
for(idx_t i = 0; i< DEFAULT_NFACTORS; i++)
{
mabuffer[i] = (d_factora->values)[a * DEFAULT_NFACTORS + i];
mbbuffer[i] = (d_factorb->values)[b * DEFAULT_NFACTORS + i];
mcbuffer[i] = (d_factorc->values)[c * DEFAULT_NFACTORS + i];
}
/* predict value */
predicted = 0;
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
predicted += mabuffer[f] * mbbuffer[f] * mcbuffer[f];
}
predicted = localvalue - predicted;
/* update rows */
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
double moda = (predicted * mbbuffer[f] * mcbuffer[f]) - (regularization_index * mabuffer[f]);
double modb = (predicted * mabuffer[f] * mcbuffer[f]) - (regularization_index * mbbuffer[f]);
double modc = (predicted * mbbuffer[f] * mabuffer[f]) - (regularization_index * mcbuffer[f]);
atomicAdd(&(d_factora->values[a * DEFAULT_NFACTORS + f]), learning_rate*moda * (double)SGD_MODIFICATIONA);
atomicAdd(&(d_factorb->values[b * DEFAULT_NFACTORS + f]), learning_rate*modb * (double)SGD_MODIFICATIONA);
atomicAdd(&(d_factorc->values[c * DEFAULT_NFACTORS + f]), learning_rate*modc * (double)SGD_MODIFICATIONA);
}
localtile += 2 * DEFAULT_T_TILE_WIDTH;
}
}
}
/**
* @brief The main function for tensor completion in sgd
* @param train The tensor for generating factor matrices
* @param validation The tensor for validation(RMSE)
* @param test The tensor for testing the quality
* @param regularization_index Lambda
*/
extern "C"{
void tc_sgd(sptensor_t * traina,
sptensor_t * trainb,
sptensor_t * trainc,
sptensor_t * validation,
sptensor_t * test,
ordi_matrix ** mats,
ordi_matrix ** best_mats,
int algorithm_index,
double regularization_index,
double learning_rate,
double * best_rmse,
double * tolerance,
idx_t * nbadepochs,
idx_t * bestepochs,
idx_t * max_badepochs)
{
//only in sgd
idx_t steps_size = 1000;
idx_t nmodes = traina->nmodes;
//initialize the devices
int deviceCount;
cudaGetDeviceCount(&deviceCount);
cudaSetDevice(0);
//prepare the tensor in TB-COO
ciss_t * h_traina = ciss_alloc(traina, 1);
#ifdef CISS_DEBUG
ciss_display(h_traina);
#endif
//ciss_t * h_trainb = ciss_alloc(train, 1);
//ciss_t * h_trainc = ciss_alloc(train, 2);
struct timeval start;
struct timeval end;
idx_t diff;
//malloc and copy the tensors + matrices to gpu
ciss_t * d_traina;
idx_t * d_directory_a, * d_counter_a;
idx_t * d_dims_a;
idx_t * d_itemp1, *d_itemp2, *d_itemp3;
double * d_entries_a;
double * d_ftemp;
//copy tensor for mode-1
HANDLE_ERROR(cudaMalloc((void**)&d_traina, sizeof(ciss_t)));
HANDLE_ERROR(cudaMalloc((void**)&d_directory_a, h_traina->dlength * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_counter_a), (h_traina->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&d_entries_a, h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&d_dims_a, nmodes * sizeof(idx_t)));
HANDLE_ERROR(cudaMemcpy(d_directory_a, h_traina->directory, h_traina->dlength*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_counter_a, h_traina->dcounter, (h_traina->dlength + 1)*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_entries_a, h_traina->entries, h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_dims_a, h_traina->dims, nmodes*sizeof(idx_t), cudaMemcpyHostToDevice));
d_itemp1 = h_traina->directory;
d_itemp2 = h_traina->dims;
d_itemp3 = h_traina->dcounter;
d_ftemp = h_traina->entries;
h_traina->directory = d_directory_a;
h_traina->dcounter = d_counter_a;
h_traina->dims = d_dims_a;
h_traina->entries = d_entries_a;
HANDLE_ERROR(cudaMemcpy(d_traina, h_traina, sizeof(ciss_t), cudaMemcpyHostToDevice));
h_traina->directory = d_itemp1;
h_traina->dims = d_itemp2;
h_traina->dcounter = d_itemp3;
h_traina->entries = d_ftemp;
//buffer for HTH
//idx_t maxdlength = SS_MAX(SS_MAX(h_traina->dlength, h_trainb->dlength),h_trainc->dlength);
//double * h_hbuffer = (double *)malloc(DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double));
//double * h_invbuffer = (double *)malloc(DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double));
//HANDLE_ERROR(cudaMalloc((void**)&d_hbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)));
//double* d_invbuffer; //for inverse
//HANDLE_ERROR(cudaMalloc((void**)&d_invbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)));
//copy the factor matrices
ordi_matrix * d_factora, * d_factorb, * d_factorc;
double * d_value_a, * d_value_b, * d_value_c;
HANDLE_ERROR(cudaMalloc((void**)&d_factora, sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&d_value_a, mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_a, mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
d_ftemp = mats[0]->values;
mats[0]->values = d_value_a;
HANDLE_ERROR(cudaMemcpy(d_factora, mats[0], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[0]->values = d_ftemp;
HANDLE_ERROR(cudaMalloc((void**)&d_factorb, sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&d_value_b, mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_b, mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
d_ftemp = mats[1]->values;
mats[1]->values = d_value_b;
HANDLE_ERROR(cudaMemcpy(d_factorb, mats[1], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[1]->values = d_ftemp;
HANDLE_ERROR(cudaMalloc((void**)&d_factorc, sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&d_value_c, mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_c, mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
d_ftemp = mats[2]->values;
mats[2]->values = d_value_c;
HANDLE_ERROR(cudaMemcpy(d_factorc, mats[2], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[2]->values = d_ftemp;
#ifdef CUDA_LOSS
//to be done
#else
double loss = tc_loss_sq(traina, mats, algorithm_index);
double frobsq = tc_frob_sq(nmodes, regularization_index, mats);
tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, 0, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs);
#endif
/* for bold driver */
double obj = loss + frobsq;
double prev_obj = obj;
//step into the kernel
idx_t nnz = traina->nnz;
idx_t tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
idx_t blocknum_m = tilenum/DEFAULT_BLOCKSIZE + 1;
#ifdef SGD_DEBUG
printf("nnz %d tilenum %d\n", nnz, tilenum);
#endif
/* foreach epoch */
for(idx_t e=1; e < DEFAULT_MAX_ITERATE; ++e) {
/* update model from all training observations */
gettimeofday(&start,NULL);
HANDLE_ERROR(cudaMemcpy(d_value_a, mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_value_b, mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_value_c, mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaDeviceSynchronize());
p_update_sgd_gpu<<<blocknum_m, DEFAULT_BLOCKSIZE, 0>>>(d_traina, d_factora, d_factorb, d_factorc, learning_rate, regularization_index, tilenum);
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
HANDLE_ERROR(cudaMemcpy(mats[0]->values, d_value_a, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(mats[1]->values, d_value_b, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(mats[2]->values, d_value_c, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaDeviceSynchronize());
#ifdef SGD_DEBUG
printf("start display matrices\n");
matrix_display(mats[0]);
matrix_display(mats[1]);
matrix_display(mats[2]);
#endif
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
/* compute RMSE and adjust learning rate */
loss = tc_loss_sq(traina, mats, algorithm_index);
frobsq = tc_frob_sq(nmodes, regularization_index, mats);
obj = loss + frobsq;
if(tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, e, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs)) {
break;
}
/* bold driver */
if(e > 1) {
if(obj < prev_obj) {
learning_rate *= 1.05;
} else {
learning_rate *= 0.50;
}
}
prev_obj = obj;
}
//free the cudabuffer
cudaFree(d_directory_a);
cudaFree(d_dims_a);
cudaFree(d_entries_a);
//cudaFree(d_hbuffer);
cudaFree(d_value_a);
cudaFree(d_value_b);
cudaFree(d_value_c);
cudaFree(d_traina);
cudaFree(d_factora);
cudaFree(d_factorb);
cudaFree(d_factorc);
ciss_free(h_traina);
cudaDeviceReset();
}
} |
1e7508df15d9575687f54e775186e320686b3cf1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <functional>
#include <limits>
#include <memory>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/table_utilities.hpp>
#include <tests/utilities/type_lists.hpp>
#include <cudf/copying.hpp>
#include <cudf/scalar/scalar.hpp>
#include <type_traits>
using cudf::test::fixed_width_column_wrapper;
using TestTypes = cudf::test::Types<int32_t>;
template<typename T,
typename ScalarType = cudf::experimental::scalar_type_t<T>>
std::unique_ptr<cudf::scalar>
make_scalar(
hipStream_t stream = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource()) {
auto s = new ScalarType(0, false, stream, mr);
return std::unique_ptr<cudf::scalar>(s);
}
template<typename T,
typename ScalarType = cudf::experimental::scalar_type_t<T>>
std::unique_ptr<cudf::scalar>
make_scalar(
T value,
hipStream_t stream = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource()) {
auto s = new ScalarType(value, true, stream, mr);
return std::unique_ptr<cudf::scalar>(s);
}
template<typename T>
auto lowest = std::numeric_limits<T>::lowest();
template<typename T>
auto highest = std::numeric_limits<T>::max();
template <typename T>
struct ShiftTest : public cudf::test::BaseFixture {};
TYPED_TEST_CASE(ShiftTest, cudf::test::FixedWidthTypes);
TYPED_TEST(ShiftTest, OneColumnEmpty)
{
using T = TypeParam;
fixed_width_column_wrapper<T> a{};
cudf::table_view input{ { a } };
auto fill = make_scalar<T>();
std::vector<std::reference_wrapper<cudf::scalar>> fills{ *fill };
cudf::experimental::shift(input, 5, fills);
}
TYPED_TEST(ShiftTest, TwoColumnsEmpty)
{
using T = TypeParam;
fixed_width_column_wrapper<T> input_a({}, {});
fixed_width_column_wrapper<T> input_b{};
cudf::table_view input{ { input_a, input_b } };
fixed_width_column_wrapper<T> expected_a({}, {});
fixed_width_column_wrapper<T> expected_b{};
cudf::table_view expected{ { input_a, input_b } };
auto fill_a = make_scalar<T>();
auto fill_b = make_scalar<T>();
std::vector<std::reference_wrapper<cudf::scalar>> fills{ *fill_a, *fill_b };
auto actual = cudf::experimental::shift(input, 5, fills);
cudf::test::expect_tables_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumn)
{
using T = TypeParam;
auto input_a = fixed_width_column_wrapper<T>{ lowest<T>, 1, 2, 3, 4, 5, highest<T> };
auto input = cudf::table_view { { input_a } };
auto expected_a = fixed_width_column_wrapper<T>{ 7, 7, lowest<T>, 1, 2, 3, 4 };
auto expected = cudf::table_view{ { expected_a } };
auto fill = make_scalar<T>(7);
std::vector<std::reference_wrapper<cudf::scalar>> fills { *fill };
auto actual = cudf::experimental::shift(input, 2, fills);
cudf::test::expect_tables_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumnNegativeShift)
{
using T = TypeParam;
auto input_a = fixed_width_column_wrapper<T>{ lowest<T>, 1, 2, 3, 4, 5, highest<T> };
auto input = cudf::table_view { { input_a } };
auto expected_a = fixed_width_column_wrapper<T>{ 4, 5, highest<T>, 7, 7, 7, 7 };
auto expected = cudf::table_view{ { expected_a } };
auto fill = make_scalar<T>(7);
std::vector<std::reference_wrapper<cudf::scalar>> fills { *fill };
auto actual = cudf::experimental::shift(input, -4, fills);
cudf::test::expect_tables_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumnNullFill)
{
using T = TypeParam;
auto input_a = fixed_width_column_wrapper<T>{ lowest<T>, 5, 0, 3, 0, 1, highest<T> };
auto input = cudf::table_view { { input_a } };
auto expected_a = fixed_width_column_wrapper<T>({ 0, 0, lowest<T>, 5, 0, 3, 0 }, { 0, 0, 1, 1, 1, 1, 1 });
auto expected = cudf::table_view{ { expected_a } };
auto fill = make_scalar<T>();
std::vector<std::reference_wrapper<cudf::scalar>> fills { *fill };
auto actual = cudf::experimental::shift(input, 2, fills);
cudf::test::expect_tables_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, TwoColumnsNullableInput)
{
using T = TypeParam;
auto input_a = fixed_width_column_wrapper<T>({ 1, 2, 3, 4, 5 }, { 0, 1, 1, 1, 0});
auto input_b = fixed_width_column_wrapper<T>({ 5, 4, 3, 2, 1 }, { 1, 0, 1, 1, 1});
auto input = cudf::table_view { { input_a, input_b } };
auto expected_a = fixed_width_column_wrapper<T>({ 7, 7, 1, 2, 3 }, { 1, 1, 0, 1, 1});
auto expected_b = fixed_width_column_wrapper<T>({ 7, 7, 5, 4, 3 }, { 0, 0, 1, 0, 1});
auto expected = cudf::table_view{ { expected_a, expected_b } };
auto fill_a = make_scalar<T>(7);
auto fill_b = make_scalar<T>();
std::vector<std::reference_wrapper<cudf::scalar>> fills { *fill_a, *fill_b };
auto actual = cudf::experimental::shift(input, 2, fills);
cudf::test::expect_tables_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, MismatchFillValueCount)
{
using T = TypeParam;
fixed_width_column_wrapper<T> a{};
fixed_width_column_wrapper<T> b{};
cudf::table_view input{ { a, b } };
auto fill = make_scalar<T>();
std::vector<std::reference_wrapper<cudf::scalar>> fills{ *fill };
std::unique_ptr<cudf::experimental::table> output;
EXPECT_THROW(output = cudf::experimental::shift(input, 5, fills),
cudf::logic_error);
}
TYPED_TEST(ShiftTest, MismatchFillValueDtypes)
{
using T = TypeParam;
if (std::is_same<T, int>::value) {
return;
}
fixed_width_column_wrapper<T> a{};
cudf::table_view input{ { a } };
auto fill = make_scalar<int>();
std::vector<std::reference_wrapper<cudf::scalar>> fills{ *fill };
std::unique_ptr<cudf::experimental::table> output;
EXPECT_THROW(output = cudf::experimental::shift(input, 5, fills),
cudf::logic_error);
}
| 1e7508df15d9575687f54e775186e320686b3cf1.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <functional>
#include <limits>
#include <memory>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/table_utilities.hpp>
#include <tests/utilities/type_lists.hpp>
#include <cudf/copying.hpp>
#include <cudf/scalar/scalar.hpp>
#include <type_traits>
using cudf::test::fixed_width_column_wrapper;
using TestTypes = cudf::test::Types<int32_t>;
template<typename T,
typename ScalarType = cudf::experimental::scalar_type_t<T>>
std::unique_ptr<cudf::scalar>
make_scalar(
cudaStream_t stream = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource()) {
auto s = new ScalarType(0, false, stream, mr);
return std::unique_ptr<cudf::scalar>(s);
}
template<typename T,
typename ScalarType = cudf::experimental::scalar_type_t<T>>
std::unique_ptr<cudf::scalar>
make_scalar(
T value,
cudaStream_t stream = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource()) {
auto s = new ScalarType(value, true, stream, mr);
return std::unique_ptr<cudf::scalar>(s);
}
template<typename T>
auto lowest = std::numeric_limits<T>::lowest();
template<typename T>
auto highest = std::numeric_limits<T>::max();
template <typename T>
struct ShiftTest : public cudf::test::BaseFixture {};
TYPED_TEST_CASE(ShiftTest, cudf::test::FixedWidthTypes);
TYPED_TEST(ShiftTest, OneColumnEmpty)
{
using T = TypeParam;
fixed_width_column_wrapper<T> a{};
cudf::table_view input{ { a } };
auto fill = make_scalar<T>();
std::vector<std::reference_wrapper<cudf::scalar>> fills{ *fill };
cudf::experimental::shift(input, 5, fills);
}
TYPED_TEST(ShiftTest, TwoColumnsEmpty)
{
using T = TypeParam;
fixed_width_column_wrapper<T> input_a({}, {});
fixed_width_column_wrapper<T> input_b{};
cudf::table_view input{ { input_a, input_b } };
fixed_width_column_wrapper<T> expected_a({}, {});
fixed_width_column_wrapper<T> expected_b{};
cudf::table_view expected{ { input_a, input_b } };
auto fill_a = make_scalar<T>();
auto fill_b = make_scalar<T>();
std::vector<std::reference_wrapper<cudf::scalar>> fills{ *fill_a, *fill_b };
auto actual = cudf::experimental::shift(input, 5, fills);
cudf::test::expect_tables_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumn)
{
using T = TypeParam;
auto input_a = fixed_width_column_wrapper<T>{ lowest<T>, 1, 2, 3, 4, 5, highest<T> };
auto input = cudf::table_view { { input_a } };
auto expected_a = fixed_width_column_wrapper<T>{ 7, 7, lowest<T>, 1, 2, 3, 4 };
auto expected = cudf::table_view{ { expected_a } };
auto fill = make_scalar<T>(7);
std::vector<std::reference_wrapper<cudf::scalar>> fills { *fill };
auto actual = cudf::experimental::shift(input, 2, fills);
cudf::test::expect_tables_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumnNegativeShift)
{
using T = TypeParam;
auto input_a = fixed_width_column_wrapper<T>{ lowest<T>, 1, 2, 3, 4, 5, highest<T> };
auto input = cudf::table_view { { input_a } };
auto expected_a = fixed_width_column_wrapper<T>{ 4, 5, highest<T>, 7, 7, 7, 7 };
auto expected = cudf::table_view{ { expected_a } };
auto fill = make_scalar<T>(7);
std::vector<std::reference_wrapper<cudf::scalar>> fills { *fill };
auto actual = cudf::experimental::shift(input, -4, fills);
cudf::test::expect_tables_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumnNullFill)
{
using T = TypeParam;
auto input_a = fixed_width_column_wrapper<T>{ lowest<T>, 5, 0, 3, 0, 1, highest<T> };
auto input = cudf::table_view { { input_a } };
auto expected_a = fixed_width_column_wrapper<T>({ 0, 0, lowest<T>, 5, 0, 3, 0 }, { 0, 0, 1, 1, 1, 1, 1 });
auto expected = cudf::table_view{ { expected_a } };
auto fill = make_scalar<T>();
std::vector<std::reference_wrapper<cudf::scalar>> fills { *fill };
auto actual = cudf::experimental::shift(input, 2, fills);
cudf::test::expect_tables_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, TwoColumnsNullableInput)
{
using T = TypeParam;
auto input_a = fixed_width_column_wrapper<T>({ 1, 2, 3, 4, 5 }, { 0, 1, 1, 1, 0});
auto input_b = fixed_width_column_wrapper<T>({ 5, 4, 3, 2, 1 }, { 1, 0, 1, 1, 1});
auto input = cudf::table_view { { input_a, input_b } };
auto expected_a = fixed_width_column_wrapper<T>({ 7, 7, 1, 2, 3 }, { 1, 1, 0, 1, 1});
auto expected_b = fixed_width_column_wrapper<T>({ 7, 7, 5, 4, 3 }, { 0, 0, 1, 0, 1});
auto expected = cudf::table_view{ { expected_a, expected_b } };
auto fill_a = make_scalar<T>(7);
auto fill_b = make_scalar<T>();
std::vector<std::reference_wrapper<cudf::scalar>> fills { *fill_a, *fill_b };
auto actual = cudf::experimental::shift(input, 2, fills);
cudf::test::expect_tables_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, MismatchFillValueCount)
{
using T = TypeParam;
fixed_width_column_wrapper<T> a{};
fixed_width_column_wrapper<T> b{};
cudf::table_view input{ { a, b } };
auto fill = make_scalar<T>();
std::vector<std::reference_wrapper<cudf::scalar>> fills{ *fill };
std::unique_ptr<cudf::experimental::table> output;
EXPECT_THROW(output = cudf::experimental::shift(input, 5, fills),
cudf::logic_error);
}
TYPED_TEST(ShiftTest, MismatchFillValueDtypes)
{
using T = TypeParam;
if (std::is_same<T, int>::value) {
return;
}
fixed_width_column_wrapper<T> a{};
cudf::table_view input{ { a } };
auto fill = make_scalar<int>();
std::vector<std::reference_wrapper<cudf::scalar>> fills{ *fill };
std::unique_ptr<cudf::experimental::table> output;
EXPECT_THROW(output = cudf::experimental::shift(input, 5, fills),
cudf::logic_error);
}
|
0fd8876fff9ffb67c7b4cb5427534aefb294711c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <ctime>
#include <time.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
using namespace std;
#define N 1024 // define n*n matrix
#define S (N/1) // sparse matrix's nonzero elements per col (75% currently)
#define BLOCK_SIZE 32 // block size ( 256->16 / 1024->32 )
#define TEST_TIMES 100 // mul module caculate times (for easy record the running time)
#define TEST_REPEAT 100
// simple cuda mul kernel
static void __global__ mul_kernel(int rowsize,int colsize,int colpitch,const float *d_a,const float *d_b,float *d_c)
{
uint index= threadIdx.x + blockIdx.x * blockDim.x;
if(rowsize <= index) return;
float temp=0.0f;
for(int i=0;i<rowsize;i++)
{
temp+=d_a[i*colpitch+index]*d_b[i];
}
d_c[index]=temp;
}
// cuda mul kernel with shared memory
static void __global__ mul_kernel_shared(int rowsize,int colsize,int colpitch,const float *d_a,const float *d_b,float *d_c,const int sharedsize)
{
__shared__ float s_b[N];
float temp=0.0f;
uint index= threadIdx.x + blockIdx.x * blockDim.x;
for(int start=0;start<rowsize;start+=sharedsize)
{
// load shared memory (vec)
__syncthreads();
for(int i=threadIdx.x;i<sharedsize&&(i+start)<rowsize;i+=blockDim.x)
s_b[i]=d_b[i+start];
__syncthreads();
if(rowsize <= index) continue;
int end=start+sharedsize > rowsize ? rowsize : start+sharedsize;
for(int i=start;i<end;i++)
{
temp+=d_a[i*colpitch+index]*s_b[i-start];
}
}
if(index<colsize)
d_c[index]=temp;
}
// cuda mul kernel with shared memory and csr
static void __global__ mul_kernel_shared_csr(int rowsize,int colsize,int colpitch,const int *d_row,const float *d_val,const float *d_b,float *d_c,const int sharedsize)
{
__shared__ float s_b[N];
float temp=0.0f;
uint index= threadIdx.x + blockIdx.x * blockDim.x;
if(index>=colsize) return;
// load shared memory (vec)
for(int start=0;start<rowsize;start+=sharedsize)
{
for(int i=threadIdx.x;i<sharedsize&&(i+start)<rowsize;i+=blockDim.x)
{
s_b[i]=d_b[i+start];
}
__syncthreads();
}
for(int i=0;i<S;i++)
{
temp+=d_val[index+N*i]*s_b[d_row[index+i*N]];
//printf("Thread %d: d_row=%d,d_val=%f\n",index,d_val[index+N*i],d_row[index+i*N]);
}
if(index<colsize)
d_c[index]=temp;
}
// use register cache row data
static void __global__ mul_kernel_shared_csr_reg(int rowsize,int colsize,int colpitch,const int *d_row,const float *d_val,const float *d_b,float *d_c,const int sharedsize)
{
__shared__ float s_b[N];
float temp=0.0f;
float val[S];
int row[S];
uint index= threadIdx.x + blockIdx.x * blockDim.x;
if(index>=colsize) return;
for(int i=0;i<S;i++)
{
val[i]=d_val[index+N*i];
row[i]=d_row[index+i*N];
}
// load shared memory (vec)
for(int start=0;start<rowsize;start+=sharedsize)
{
for(int i=threadIdx.x;i<sharedsize&&(i+start)<rowsize;i+=blockDim.x)
{
s_b[i]=d_b[i+start];
}
__syncthreads();
}
for(int i=0;i<S;i++)
{
temp+=val[i]*s_b[row[i]];
}
if(index<colsize)
d_c[index]=temp;
}
// cpu matrix mul
void mul_cpu(float *a,float *b,float *c)
{
for(int i=0;i<N;i++)
{
c[i]=0;
for(int j=0;j<N;j++)
c[i]+=(*(a+i*N+j)**(b+j));
}
}
// test cpu and gpu mul result
bool resultcompare(float *ref,float *test,float accu)
{
for(int i=0;i<N;i++)
{
if(fabs(*(ref+i)-*(test+i))>accu) return false;
}
return true;
}
int main()
{
srand(time(0));
// Memory Manage
int *sma_a_col=new int[N*S]; // CSR row array
float *sma_a_val=new float[N*S]; // CSR value array
int *d_row; // CSR row array (transpose)
float *d_val; // CSR value array (transpose)
hipMallocManaged(&d_row, sizeof(int)*N*S);
hipMallocManaged(&d_val, sizeof(float)*N*S);
float *d_a,*d_b,*d_c; // Matrix A, Vector B, Result C
hipMallocManaged(&d_a, sizeof(float)*N*N);
hipMallocManaged(&d_b, sizeof(float)*N);
hipMallocManaged(&d_c, sizeof(float)*N);
float *c1,*c2,*c3,*c4,*c5; // Mul result C (on GPU : 1-4)
hipMallocManaged(&c1, sizeof(float)*N);
hipMallocManaged(&c2, sizeof(float)*N);
hipMallocManaged(&c3, sizeof(float)*N);
hipMallocManaged(&c4, sizeof(float)*N);
hipMallocManaged(&c5, sizeof(float)*N);
// Init setting
bool a_row[N]; // nozero element flag
int pos;
float temp;
int rand_temp;
// CPU timer
clock_t begin,end;
double timer;
double total_csr=0,total_trans=0,total_cpu=0; // Save total time of each work
// GPU timer
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float gpu_timer;
float total_gpu1=0,total_gpu2=0,total_gpu3=0,total_gpu4=0,total_gpu5=0;
// GPU set threads & blocks
uint threads=256;
int sharedsize=N;
int blocknum=(N+threads-1)/threads;
// * Test begin *
for(int tt=0; tt<TEST_TIMES; tt++)
{
//printf("Test %d: \n",tt);
// Random fill in matrix & vector
for (int i = 0; i < N; i++)
{
for(int j=0;j<N;j++)
a_row[j]=false;
for(int j=0;j<S;j++)
{
int temp_pos = rand() % N;
while(a_row[temp_pos])
{
temp_pos++;
if(temp_pos==N) temp_pos=0;
}
a_row[temp_pos]=true;
}
pos=S*i;
for(int k=0;k<N;k++)
{
*(d_a+i*N+k)=0;
if(a_row[k])
{
rand_temp=rand()%10;
while(rand_temp==0)
rand_temp=rand()%10;
*(d_a+i*N+k)=rand_temp;
//printf("row:%d val:%f \n",*(sma_a_col+pos),*(sma_a_val+pos));
pos++;
}
}
}
for (int i = 0; i < N; i++)
*(d_b+i) = rand() % 10;
// * Recording csr decoding time *
begin=clock();
for (int i = 0; i < N; i++)
{
pos=S*i;
for(int k=0;k<N;k++)
{
if(*(d_a+i*N+k)!=0)
{
*(sma_a_col+pos)=k;
*(sma_a_val+pos)=*(d_a+i*N+k);
//printf("row:%d val:%f \n",*(sma_a_col+pos),*(sma_a_val+pos));
pos++;
}
}
}
end=clock();
timer=(double)(end-begin)/CLOCKS_PER_SEC;
total_csr+=timer;
//printf("The csr decoding time is %f ms.\n",timer*1000);
// Cpu Mul reference
begin=clock();
for(int jj=0; jj<TEST_REPEAT; jj++)
mul_cpu(d_a,d_b,d_c);
end=clock();
timer=(double)(end-begin)/CLOCKS_PER_SEC;
total_cpu+=timer;
//printf("The total cpu run time is %f ms.\n",timer*1000);
// Matrix tranpose (for memory coalesced)
begin=clock();
for (int i = 0; i < N; i++)
for(int j = i+1; j < N; j++)
{
temp = *(d_a+j*N+i);
*(d_a+j*N+i) = *(d_a+i*N+j);
*(d_a+i*N+j) = temp;
}
for (int i = 0; i < N; i++)
for(int j = 0; j < S; j++)
{
*(d_row+j*N+i)=*(sma_a_col+i*S+j);
*(d_val+j*N+i)=*(sma_a_val+i*S+j);
//printf("[%d,%d]d_row=%d,d_val=%f\n",i,j,*(d_row+j*N+i),*(d_val+j*N+i));
}
end=clock();
timer=(double)(end-begin)/CLOCKS_PER_SEC;
total_trans+=timer;
// * GPU Caculation Part *
// 1.Normal Matrix mul kernel
hipEventRecord(start,0);
for(int jj=0; jj<TEST_REPEAT; jj++)
hipLaunchKernelGGL(( mul_kernel), dim3(blocknum), dim3(threads), 0, 0, N,N,N,d_a,d_b,c1);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_timer,start,stop);
total_gpu1+=gpu_timer;
//printf("The total gpu run time is %f ms.\n",gpu_timer);
// 2.Matrix mul using shared memory kernel
hipEventRecord(start,0);
for(int jj=0; jj<TEST_REPEAT; jj++)
hipLaunchKernelGGL(( mul_kernel_shared), dim3(blocknum), dim3(threads), 0, 0, N,N,N,d_a,d_b,c2,sharedsize);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_timer,start,stop);
total_gpu2+=gpu_timer;
//printf("The total gpu (use shared memory) run time is %f ms.\n",gpu_timer);
// 3.Matrix mul using shared memory and csr kernel
hipEventRecord(start,0);
for(int jj=0; jj<TEST_REPEAT; jj++)
hipLaunchKernelGGL(( mul_kernel_shared_csr), dim3(blocknum), dim3(threads), 0, 0, N,N,N,d_row,d_val,d_b,c3,sharedsize);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_timer,start,stop);
total_gpu3+=gpu_timer;
//printf("The total gpu (using csr and shared memory) run time is %f ms.\n",gpu_timer);
// 4.Use register
hipEventRecord(start,0);
for(int jj=0; jj<TEST_REPEAT; jj++)
hipLaunchKernelGGL(( mul_kernel_shared_csr_reg), dim3(blocknum), dim3(threads), 0, 0, N,N,N,d_row,d_val,d_b,c4,sharedsize);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_timer,start,stop);
total_gpu4+=gpu_timer;
//printf("The total gpu (using csr by register and shared memory) run time is %f ms.\n",gpu_timer);
// 5.Matrix using cublas function call
float alpha = 1;
float beta = 0;
int M=1; // B->vector
hipblasHandle_t handle;
hipblasCreate(&handle);
hipEventRecord(start,0);
// matrix cublas call
for(int jj=0; jj<TEST_REPEAT; jj++)
hipblasSgemm(handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
M, // row of B
N, // col of A
N, // row of B
&alpha,
d_b,
M,
d_a,
N,
&beta,
c5,
M);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_timer,start,stop);
total_gpu5+=gpu_timer;
//printf("The total gpu (using cublas) run time is %f ms.\n",gpu_timer);
// Correct test Part
printf("Test %d: ",tt);
bool res;
res=resultcompare(d_c,c1,1e-4f);
if(res) printf("1P! ");
else printf("1F! ");
res=resultcompare(d_c,c2,1e-4f);
if(res) printf("2P! ");
else printf("2F! ");
res=resultcompare(d_c,c3,1e-4f);
if(res) printf("3P! ");
else printf("3F! ");
res=resultcompare(d_c,c4,1e-4f);
if(res) printf("4P! ");
else printf("4F! ");
res=resultcompare(d_c,c5,1e-4f);
if(res) printf("5P!\n");
else printf("5F!\n");
// test diff
/*for(int i=0;i<TEST_TIMES*N;i++)
{
printf("c=%f\to1=%f\to2=%f\to3=%f\to4=%f\to5=%f\n",*(d_c+i),*(c1+i),*(c2+i),*(c3+i),*(c4+i),*(c5+i));
}*/
}
// Statistic Total Time
printf("Matrxi: %d*%d, S: %d,Test Times: %d\n",N,N,S,TEST_TIMES);
printf("The csr decoding time is %.4lf ms.\n",total_csr*1000);
printf("The matrix trans time is %.4lf ms.\n",total_trans*1000);
printf("The total cpu run time is %.4lf ms.\n",total_cpu*1000);
printf("The total gpu run time is %f ms.\n",total_gpu1);
printf("The total gpu (use shared memory) run time is %f ms.\n",total_gpu2);
printf("The total gpu (using csr and shared memory) run time is %f ms.\n",total_gpu3);
printf("The total gpu (using csr by register and shared memory) run time is %f ms.\n",total_gpu4);
printf("The total gpu (using cublas) run time is %f ms.\n",total_gpu5);
// Free memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_row);
hipFree(d_val);
hipFree(c1);
hipFree(c2);
hipFree(c3);
hipFree(c4);
hipFree(c5);
free(sma_a_col);
free(sma_a_val);
return 0;
} | 0fd8876fff9ffb67c7b4cb5427534aefb294711c.cu | #include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <ctime>
#include <time.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
using namespace std;
#define N 1024 // define n*n matrix
#define S (N/1) // sparse matrix's nonzero elements per col (75% currently)
#define BLOCK_SIZE 32 // block size ( 256->16 / 1024->32 )
#define TEST_TIMES 100 // mul module caculate times (for easy record the running time)
#define TEST_REPEAT 100
// simple cuda mul kernel
static void __global__ mul_kernel(int rowsize,int colsize,int colpitch,const float *d_a,const float *d_b,float *d_c)
{
uint index= threadIdx.x + blockIdx.x * blockDim.x;
if(rowsize <= index) return;
float temp=0.0f;
for(int i=0;i<rowsize;i++)
{
temp+=d_a[i*colpitch+index]*d_b[i];
}
d_c[index]=temp;
}
// cuda mul kernel with shared memory
static void __global__ mul_kernel_shared(int rowsize,int colsize,int colpitch,const float *d_a,const float *d_b,float *d_c,const int sharedsize)
{
__shared__ float s_b[N];
float temp=0.0f;
uint index= threadIdx.x + blockIdx.x * blockDim.x;
for(int start=0;start<rowsize;start+=sharedsize)
{
// load shared memory (vec)
__syncthreads();
for(int i=threadIdx.x;i<sharedsize&&(i+start)<rowsize;i+=blockDim.x)
s_b[i]=d_b[i+start];
__syncthreads();
if(rowsize <= index) continue;
int end=start+sharedsize > rowsize ? rowsize : start+sharedsize;
for(int i=start;i<end;i++)
{
temp+=d_a[i*colpitch+index]*s_b[i-start];
}
}
if(index<colsize)
d_c[index]=temp;
}
// cuda mul kernel with shared memory and csr
static void __global__ mul_kernel_shared_csr(int rowsize,int colsize,int colpitch,const int *d_row,const float *d_val,const float *d_b,float *d_c,const int sharedsize)
{
__shared__ float s_b[N];
float temp=0.0f;
uint index= threadIdx.x + blockIdx.x * blockDim.x;
if(index>=colsize) return;
// load shared memory (vec)
for(int start=0;start<rowsize;start+=sharedsize)
{
for(int i=threadIdx.x;i<sharedsize&&(i+start)<rowsize;i+=blockDim.x)
{
s_b[i]=d_b[i+start];
}
__syncthreads();
}
for(int i=0;i<S;i++)
{
temp+=d_val[index+N*i]*s_b[d_row[index+i*N]];
//printf("Thread %d: d_row=%d,d_val=%f\n",index,d_val[index+N*i],d_row[index+i*N]);
}
if(index<colsize)
d_c[index]=temp;
}
// use register cache row data
static void __global__ mul_kernel_shared_csr_reg(int rowsize,int colsize,int colpitch,const int *d_row,const float *d_val,const float *d_b,float *d_c,const int sharedsize)
{
__shared__ float s_b[N];
float temp=0.0f;
float val[S];
int row[S];
uint index= threadIdx.x + blockIdx.x * blockDim.x;
if(index>=colsize) return;
for(int i=0;i<S;i++)
{
val[i]=d_val[index+N*i];
row[i]=d_row[index+i*N];
}
// load shared memory (vec)
for(int start=0;start<rowsize;start+=sharedsize)
{
for(int i=threadIdx.x;i<sharedsize&&(i+start)<rowsize;i+=blockDim.x)
{
s_b[i]=d_b[i+start];
}
__syncthreads();
}
for(int i=0;i<S;i++)
{
temp+=val[i]*s_b[row[i]];
}
if(index<colsize)
d_c[index]=temp;
}
// cpu matrix mul
void mul_cpu(float *a,float *b,float *c)
{
for(int i=0;i<N;i++)
{
c[i]=0;
for(int j=0;j<N;j++)
c[i]+=(*(a+i*N+j)**(b+j));
}
}
// test cpu and gpu mul result
bool resultcompare(float *ref,float *test,float accu)
{
for(int i=0;i<N;i++)
{
if(fabs(*(ref+i)-*(test+i))>accu) return false;
}
return true;
}
int main()
{
srand(time(0));
// Memory Manage
int *sma_a_col=new int[N*S]; // CSR row array
float *sma_a_val=new float[N*S]; // CSR value array
int *d_row; // CSR row array (transpose)
float *d_val; // CSR value array (transpose)
cudaMallocManaged(&d_row, sizeof(int)*N*S);
cudaMallocManaged(&d_val, sizeof(float)*N*S);
float *d_a,*d_b,*d_c; // Matrix A, Vector B, Result C
cudaMallocManaged(&d_a, sizeof(float)*N*N);
cudaMallocManaged(&d_b, sizeof(float)*N);
cudaMallocManaged(&d_c, sizeof(float)*N);
float *c1,*c2,*c3,*c4,*c5; // Mul result C (on GPU : 1-4)
cudaMallocManaged(&c1, sizeof(float)*N);
cudaMallocManaged(&c2, sizeof(float)*N);
cudaMallocManaged(&c3, sizeof(float)*N);
cudaMallocManaged(&c4, sizeof(float)*N);
cudaMallocManaged(&c5, sizeof(float)*N);
// Init setting
bool a_row[N]; // nozero element flag
int pos;
float temp;
int rand_temp;
// CPU timer
clock_t begin,end;
double timer;
double total_csr=0,total_trans=0,total_cpu=0; // Save total time of each work
// GPU timer
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float gpu_timer;
float total_gpu1=0,total_gpu2=0,total_gpu3=0,total_gpu4=0,total_gpu5=0;
// GPU set threads & blocks
uint threads=256;
int sharedsize=N;
int blocknum=(N+threads-1)/threads;
// * Test begin *
for(int tt=0; tt<TEST_TIMES; tt++)
{
//printf("Test %d: \n",tt);
// Random fill in matrix & vector
for (int i = 0; i < N; i++)
{
for(int j=0;j<N;j++)
a_row[j]=false;
for(int j=0;j<S;j++)
{
int temp_pos = rand() % N;
while(a_row[temp_pos])
{
temp_pos++;
if(temp_pos==N) temp_pos=0;
}
a_row[temp_pos]=true;
}
pos=S*i;
for(int k=0;k<N;k++)
{
*(d_a+i*N+k)=0;
if(a_row[k])
{
rand_temp=rand()%10;
while(rand_temp==0)
rand_temp=rand()%10;
*(d_a+i*N+k)=rand_temp;
//printf("row:%d val:%f \n",*(sma_a_col+pos),*(sma_a_val+pos));
pos++;
}
}
}
for (int i = 0; i < N; i++)
*(d_b+i) = rand() % 10;
// * Recording csr decoding time *
begin=clock();
for (int i = 0; i < N; i++)
{
pos=S*i;
for(int k=0;k<N;k++)
{
if(*(d_a+i*N+k)!=0)
{
*(sma_a_col+pos)=k;
*(sma_a_val+pos)=*(d_a+i*N+k);
//printf("row:%d val:%f \n",*(sma_a_col+pos),*(sma_a_val+pos));
pos++;
}
}
}
end=clock();
timer=(double)(end-begin)/CLOCKS_PER_SEC;
total_csr+=timer;
//printf("The csr decoding time is %f ms.\n",timer*1000);
// Cpu Mul reference
begin=clock();
for(int jj=0; jj<TEST_REPEAT; jj++)
mul_cpu(d_a,d_b,d_c);
end=clock();
timer=(double)(end-begin)/CLOCKS_PER_SEC;
total_cpu+=timer;
//printf("The total cpu run time is %f ms.\n",timer*1000);
// Matrix tranpose (for memory coalesced)
begin=clock();
for (int i = 0; i < N; i++)
for(int j = i+1; j < N; j++)
{
temp = *(d_a+j*N+i);
*(d_a+j*N+i) = *(d_a+i*N+j);
*(d_a+i*N+j) = temp;
}
for (int i = 0; i < N; i++)
for(int j = 0; j < S; j++)
{
*(d_row+j*N+i)=*(sma_a_col+i*S+j);
*(d_val+j*N+i)=*(sma_a_val+i*S+j);
//printf("[%d,%d]d_row=%d,d_val=%f\n",i,j,*(d_row+j*N+i),*(d_val+j*N+i));
}
end=clock();
timer=(double)(end-begin)/CLOCKS_PER_SEC;
total_trans+=timer;
// * GPU Caculation Part *
// 1.Normal Matrix mul kernel
cudaEventRecord(start,0);
for(int jj=0; jj<TEST_REPEAT; jj++)
mul_kernel<<<blocknum, threads>>>(N,N,N,d_a,d_b,c1);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_timer,start,stop);
total_gpu1+=gpu_timer;
//printf("The total gpu run time is %f ms.\n",gpu_timer);
// 2.Matrix mul using shared memory kernel
cudaEventRecord(start,0);
for(int jj=0; jj<TEST_REPEAT; jj++)
mul_kernel_shared<<<blocknum, threads>>>(N,N,N,d_a,d_b,c2,sharedsize);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_timer,start,stop);
total_gpu2+=gpu_timer;
//printf("The total gpu (use shared memory) run time is %f ms.\n",gpu_timer);
// 3.Matrix mul using shared memory and csr kernel
cudaEventRecord(start,0);
for(int jj=0; jj<TEST_REPEAT; jj++)
mul_kernel_shared_csr<<<blocknum, threads>>>(N,N,N,d_row,d_val,d_b,c3,sharedsize);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_timer,start,stop);
total_gpu3+=gpu_timer;
//printf("The total gpu (using csr and shared memory) run time is %f ms.\n",gpu_timer);
// 4.Use register
cudaEventRecord(start,0);
for(int jj=0; jj<TEST_REPEAT; jj++)
mul_kernel_shared_csr_reg<<<blocknum, threads>>>(N,N,N,d_row,d_val,d_b,c4,sharedsize);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_timer,start,stop);
total_gpu4+=gpu_timer;
//printf("The total gpu (using csr by register and shared memory) run time is %f ms.\n",gpu_timer);
// 5.Matrix using cublas function call
float alpha = 1;
float beta = 0;
int M=1; // B->vector
cublasHandle_t handle;
cublasCreate(&handle);
cudaEventRecord(start,0);
// matrix cublas call
for(int jj=0; jj<TEST_REPEAT; jj++)
cublasSgemm(handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
M, // row of B
N, // col of A
N, // row of B
&alpha,
d_b,
M,
d_a,
N,
&beta,
c5,
M);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_timer,start,stop);
total_gpu5+=gpu_timer;
//printf("The total gpu (using cublas) run time is %f ms.\n",gpu_timer);
// Correct test Part
printf("Test %d: ",tt);
bool res;
res=resultcompare(d_c,c1,1e-4f);
if(res) printf("1P! ");
else printf("1F! ");
res=resultcompare(d_c,c2,1e-4f);
if(res) printf("2P! ");
else printf("2F! ");
res=resultcompare(d_c,c3,1e-4f);
if(res) printf("3P! ");
else printf("3F! ");
res=resultcompare(d_c,c4,1e-4f);
if(res) printf("4P! ");
else printf("4F! ");
res=resultcompare(d_c,c5,1e-4f);
if(res) printf("5P!\n");
else printf("5F!\n");
// test diff
/*for(int i=0;i<TEST_TIMES*N;i++)
{
printf("c=%f\to1=%f\to2=%f\to3=%f\to4=%f\to5=%f\n",*(d_c+i),*(c1+i),*(c2+i),*(c3+i),*(c4+i),*(c5+i));
}*/
}
// Statistic Total Time
printf("Matrxi: %d*%d, S: %d,Test Times: %d\n",N,N,S,TEST_TIMES);
printf("The csr decoding time is %.4lf ms.\n",total_csr*1000);
printf("The matrix trans time is %.4lf ms.\n",total_trans*1000);
printf("The total cpu run time is %.4lf ms.\n",total_cpu*1000);
printf("The total gpu run time is %f ms.\n",total_gpu1);
printf("The total gpu (use shared memory) run time is %f ms.\n",total_gpu2);
printf("The total gpu (using csr and shared memory) run time is %f ms.\n",total_gpu3);
printf("The total gpu (using csr by register and shared memory) run time is %f ms.\n",total_gpu4);
printf("The total gpu (using cublas) run time is %f ms.\n",total_gpu5);
// Free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_row);
cudaFree(d_val);
cudaFree(c1);
cudaFree(c2);
cudaFree(c3);
cudaFree(c4);
cudaFree(c5);
free(sma_a_col);
free(sma_a_val);
return 0;
} |
255a0dc302ce52c8659c67ab3a8b5e5ccadecf99.hip | // !!! This is a file automatically generated by hipify!!!
$!MEASURE_DECLARE_TIMER!$
hipEvent_t start, stop;
float elapsedTime;
$!MEASURE_CREATE_TIMER!$
/* create the timers */
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
/* start the timer */
HANDLE_ERROR(hipEventRecord(start, 0));
$!MEASURE_TERMINATE_TIMER!$
HANDLE_ERROR(hipEventRecord(stop, 0));
hipEventSynchronize(stop);
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
printf(\"execution took %3.6f miliseconds\", elapsedTime);
$!MEASURE_FREE_TIMER!$
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
| 255a0dc302ce52c8659c67ab3a8b5e5ccadecf99.cu | $!MEASURE_DECLARE_TIMER!$
cudaEvent_t start, stop;
float elapsedTime;
$!MEASURE_CREATE_TIMER!$
/* create the timers */
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
/* start the timer */
HANDLE_ERROR(cudaEventRecord(start, 0));
$!MEASURE_TERMINATE_TIMER!$
HANDLE_ERROR(cudaEventRecord(stop, 0));
cudaEventSynchronize(stop);
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
printf(\"execution took %3.6f miliseconds\", elapsedTime);
$!MEASURE_FREE_TIMER!$
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
|
cead2bfe30cbc33a3300eeb850fc07691c63c7ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <c10/util/Exception.h>
#include <c10/macros/Macros.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/unique.h>
#include <ATen/native/hip/EmbeddingBackwardKernel.cuh>
namespace at { namespace native {
namespace {
#ifdef __HIP_PLATFORM_HCC__
static const int BLOCKDIMY = 16;
#else
static const int BLOCKDIMY = 32;
#endif
template
<typename scalar_t,
typename accscalar_t>
__global__ void embedding_backward_feature_kernel
(int64_t* indices,
const scalar_t* __restrict__ grad,
scalar_t* __restrict__ grad_weight,
int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot
int64_t stride,
int padding_idx)
{
extern __shared__ char buf[];
accscalar_t* smem = (accscalar_t*)buf;
accscalar_t* my_s = smem + C10_WARP_SIZE*threadIdx.y;
int* indices_batch = (int*)(buf + sizeof(accscalar_t)*C10_WARP_SIZE*blockDim.y);
const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size
const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim
for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y)
{
// Entire block cooperates to load a batch of 1024 indices to process
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if(batch_start + tid < n)
indices_batch[tid] = (int)indices[batch_start + tid];
int batch_end = batch_start + blockDim.x*blockDim.y < n ?
batch_start + blockDim.x*blockDim.y : n;
// Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32
for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y)
{
// This does double duty: it makes sure indices_batch is ready, and it makes sure match-group
// leaders are done with their accumulates before other warps start loading again.
__syncthreads();
int n_this_chunk = (batch_end - chunk_start) < blockDim.y ?
(batch_end - chunk_start) : blockDim.y;
int src_row = chunk_start + threadIdx.y;
int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight
// All warps load their smem segments with incoming grad data
if(src_row < n && f < s && dst_row != padding_idx)
my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]);
__syncthreads();
// To ensure determinism, we can't just have each warp add its grad data to its dst_row.
// We need to check if any other warps pulled grad data targeting dst_row.
// If so, we elect the first warp in each matching group as the leader.
// Each leader warp serializes the accumulates targeting dst_row in shared memory,
// then finishes by adding the accumulated buffer to dst_row in grad_weight.
if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync
{
int match_found_this_thread =
(dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]);
if(threadIdx.x >= n_this_chunk)
match_found_this_thread = 0;
#ifdef __HIP_PLATFORM_HCC__
unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffsll(matchmask) - 1;
#else
unsigned int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffs(matchmask) - 1;
#endif
if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader
{
matchmask ^= (1 << first_remaining_peer);
while(matchmask)
{
#ifdef __HIP_PLATFORM_HCC__
first_remaining_peer = __ffsll(matchmask) - 1;
#else
first_remaining_peer = __ffs(matchmask) - 1;
#endif
my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer];
matchmask ^= (1 << first_remaining_peer);
}
if(f < s)
grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]);
}
}
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int64_t dim,
int64_t weights_stride0, int64_t weights_stride1) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * weights_stride0;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += ::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = ::pow(v, static_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i * weights_stride1] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (num_indices <= 768 && !scale_grad_by_freq) {
auto indices_contig = indices.contiguous();
auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options());
int64_t stride = grad_weight.stride(0);
dim3 grid(THCCeilDiv(stride, (int64_t)C10_WARP_SIZE));
dim3 block(C10_WARP_SIZE, BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(grad.scalar_type(),
"embedding_backward",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( embedding_backward_feature_kernel<scalar_t, accscalar_t>)
, dim3(grid),
dim3(block),
sizeof(accscalar_t)*C10_WARP_SIZE*BLOCKDIMY + sizeof(int)*C10_WARP_SIZE*BLOCKDIMY,
stream,
indices_contig.data_ptr<int64_t>(),
grad.data_ptr<scalar_t>(),
grad_weight.data_ptr<scalar_t>(),
static_cast<int>(num_indices),
static_cast<int64_t>(stride),
static_cast<int>(padding_idx));
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
auto sorted_indices = at::empty_like(indices, at::MemoryFormat::Contiguous);
auto orig_indices = at::empty_like(indices, at::MemoryFormat::Contiguous);
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = at::empty_like(indices, at::MemoryFormat::Contiguous);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
auto count_data = device_ptr(count.data_ptr<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
return embedding_backward_cuda_kernel(grad, orig_indices,
sorted_indices, count, num_weights, padding_idx);
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_contig = indices.contiguous();
auto indices_data = device_ptr(indices_contig.data_ptr<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = at::empty(indices.numel(), indices.options());
auto unique_data = device_ptr(unique_indices.data_ptr<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "embedding_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream,
self.data_ptr<scalar_t>(),
unique_indices.data_ptr<int64_t>(),
static_cast<accscalar_t>(max_norm),
static_cast<accscalar_t>(norm_type),
dim, self.stride(0), self.stride(1));
});
THCudaCheck(hipGetLastError());
return self;
}
}} // namespace at::native
| cead2bfe30cbc33a3300eeb850fc07691c63c7ae.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Exception.h>
#include <c10/macros/Macros.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/unique.h>
#include <ATen/native/cuda/EmbeddingBackwardKernel.cuh>
namespace at { namespace native {
namespace {
#ifdef __HIP_PLATFORM_HCC__
static const int BLOCKDIMY = 16;
#else
static const int BLOCKDIMY = 32;
#endif
template
<typename scalar_t,
typename accscalar_t>
__global__ void embedding_backward_feature_kernel
(int64_t* indices,
const scalar_t* __restrict__ grad,
scalar_t* __restrict__ grad_weight,
int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot
int64_t stride,
int padding_idx)
{
extern __shared__ char buf[];
accscalar_t* smem = (accscalar_t*)buf;
accscalar_t* my_s = smem + C10_WARP_SIZE*threadIdx.y;
int* indices_batch = (int*)(buf + sizeof(accscalar_t)*C10_WARP_SIZE*blockDim.y);
const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size
const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim
for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y)
{
// Entire block cooperates to load a batch of 1024 indices to process
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if(batch_start + tid < n)
indices_batch[tid] = (int)indices[batch_start + tid];
int batch_end = batch_start + blockDim.x*blockDim.y < n ?
batch_start + blockDim.x*blockDim.y : n;
// Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32
for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y)
{
// This does double duty: it makes sure indices_batch is ready, and it makes sure match-group
// leaders are done with their accumulates before other warps start loading again.
__syncthreads();
int n_this_chunk = (batch_end - chunk_start) < blockDim.y ?
(batch_end - chunk_start) : blockDim.y;
int src_row = chunk_start + threadIdx.y;
int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight
// All warps load their smem segments with incoming grad data
if(src_row < n && f < s && dst_row != padding_idx)
my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]);
__syncthreads();
// To ensure determinism, we can't just have each warp add its grad data to its dst_row.
// We need to check if any other warps pulled grad data targeting dst_row.
// If so, we elect the first warp in each matching group as the leader.
// Each leader warp serializes the accumulates targeting dst_row in shared memory,
// then finishes by adding the accumulated buffer to dst_row in grad_weight.
if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync
{
int match_found_this_thread =
(dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]);
if(threadIdx.x >= n_this_chunk)
match_found_this_thread = 0;
#ifdef __HIP_PLATFORM_HCC__
unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffsll(matchmask) - 1;
#else
unsigned int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffs(matchmask) - 1;
#endif
if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader
{
matchmask ^= (1 << first_remaining_peer);
while(matchmask)
{
#ifdef __HIP_PLATFORM_HCC__
first_remaining_peer = __ffsll(matchmask) - 1;
#else
first_remaining_peer = __ffs(matchmask) - 1;
#endif
my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer];
matchmask ^= (1 << first_remaining_peer);
}
if(f < s)
grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]);
}
}
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int64_t dim,
int64_t weights_stride0, int64_t weights_stride1) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * weights_stride0;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += std::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = std::pow(v, static_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i * weights_stride1] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (num_indices <= 768 && !scale_grad_by_freq) {
auto indices_contig = indices.contiguous();
auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options());
int64_t stride = grad_weight.stride(0);
dim3 grid(THCCeilDiv(stride, (int64_t)C10_WARP_SIZE));
dim3 block(C10_WARP_SIZE, BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(grad.scalar_type(),
"embedding_backward",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
embedding_backward_feature_kernel<scalar_t, accscalar_t>
<<<grid,
block,
sizeof(accscalar_t)*C10_WARP_SIZE*BLOCKDIMY + sizeof(int)*C10_WARP_SIZE*BLOCKDIMY,
stream>>>
(indices_contig.data_ptr<int64_t>(),
grad.data_ptr<scalar_t>(),
grad_weight.data_ptr<scalar_t>(),
static_cast<int>(num_indices),
static_cast<int64_t>(stride),
static_cast<int>(padding_idx));
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
auto sorted_indices = at::empty_like(indices, at::MemoryFormat::Contiguous);
auto orig_indices = at::empty_like(indices, at::MemoryFormat::Contiguous);
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = at::empty_like(indices, at::MemoryFormat::Contiguous);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
auto count_data = device_ptr(count.data_ptr<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
return embedding_backward_cuda_kernel(grad, orig_indices,
sorted_indices, count, num_weights, padding_idx);
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_contig = indices.contiguous();
auto indices_data = device_ptr(indices_contig.data_ptr<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = at::empty(indices.numel(), indices.options());
auto unique_data = device_ptr(unique_indices.data_ptr<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "embedding_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>(
self.data_ptr<scalar_t>(),
unique_indices.data_ptr<int64_t>(),
static_cast<accscalar_t>(max_norm),
static_cast<accscalar_t>(norm_type),
dim, self.stride(0), self.stride(1));
});
THCudaCheck(cudaGetLastError());
return self;
}
}} // namespace at::native
|
987dc2b11a93110751c99e15e1e76af7536dc0f2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
//-----------------------------------------------------------------------------------------
// Funkcja sprawdza poprawnosc dzialania GPU - kod znaleziony dzieki zyczliwosci StackOverflow
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// __global__ - kwalifikator - informuje kompilator, ze dana funkcja powinna byc skompilowana dla urzadzenia a nie dla hosta
//-----------------------------------------------------------------------------------------
// Funkcja wypelnia macierze
__global__ void uzupelnij(long nMacierzy, float *d_A, float *d_B)
{
long d_x = threadIdx.x + blockIdx.x * blockDim.x;
long d_y = threadIdx.y + blockIdx.y * blockDim.y;
long temp = d_x + nMacierzy * d_y;
if (d_x < nMacierzy && d_y < nMacierzy)
{
d_A[temp] = fmodf(sinf(d_x)*d_x*d_y, 10);
d_B[temp] = fmodf(cosf(d_y)*(d_x + d_y), 10);
//printf ("wtek: %dx%d - [%f,%f]\n",threadIdx.x,threadIdx.y,d_A[temp],d_B[temp]);
}
}
//-----------------------------------------------------------------------------------------
// Funkcja oblicza macierz C
__global__ void obliczC(long nMacierzy, float *d_A, float *d_B, float *d_C)
{
long d_x = blockIdx.x * blockDim.x + threadIdx.x;
long d_y = blockIdx.y * blockDim.y + threadIdx.y;
if (d_x < nMacierzy && d_y < nMacierzy)
{
for (int i = 0; i < nMacierzy; i++)
{
d_C[d_x + nMacierzy * d_y] += d_A[d_x + i * nMacierzy] * d_B[i + nMacierzy * d_y];
}
}
}
int main(int argc, char** argv)
{
// 1. -----------------------------------------------------------------------
// Warunek sprawdzajacy czy istnieja wszystkie niezbedne argumenty wejsciowe.
if (!argv[1])
{
printf("0 ms - brak rozmiarMacu macierzy \n"); //jesli nie to wypisuje komunikat i wychodzi z programu
return 0;
}
// 2. -----------------------------------------------------------------------
// Zapisanie argumentu do zmiennej - rozmiar macierzy
long rozmiarMac = atol(argv[1]);
//warunek sprawdzajcy czy wartosci argumentow nie znajduja sie poza przewidywanym przedzialem wartosci
if (rozmiarMac <= 1)
{
printf("0 ms - zly rozmiarMac macierzy \n"); //jesli nie to wypisuje komunikat i wychodzi z programu
return 0;
}
// 3. -----------------------------------------------------------------------
// Definiowanie zmiennych
// zmmienne do wyznaczania czasu wykonania
hipEvent_t czasStart, czasStop;
hipEventCreate(&czasStart);
hipEventCreate(&czasStop);
// deklaracja tablic , rozmiaru grida oraz rozmiaru bloku
float *d_A,*d_B,*d_C,*C;
int rozmiarGrid, rozmiarBlok;
// 4. -----------------------------------------------------------------------
// Deklaracja macierzy A, B i C - tablic dwuwymiarowych
/*
hipMalloc - alokuje pamiec na na karcie graficznej
(void**) - wskaznik wskazuje adres nowo alokowanej pamieci
sizeof() - rozmiar alokowanej pamieci
*/
gpuErrchk(hipMalloc((void**) &d_A, sizeof(float)*rozmiarMac*rozmiarMac));
gpuErrchk(hipMalloc((void**) &d_B, sizeof(float)*rozmiarMac*rozmiarMac));
gpuErrchk(hipMalloc((void**) &d_C, sizeof(float)*rozmiarMac*rozmiarMac));
// 5. -----------------------------------------------------------------------
// Deklaracja rozmiaru grida oraz iloci wtkw w bloku
/*
16 16 16 16
16 16 16 16
16 16 16 16
16 16 16 16
64 x 64 = 1024
*/
rozmiarGrid = 4;
rozmiarBlok = 16;
dim3 grids(rozmiarGrid, rozmiarGrid);
dim3 blocks(rozmiarBlok, rozmiarBlok);
// 6. -----------------------------------------------------------------------
// Wywolanie funkcji "uzupelnij" z podanym rozmiarem grida (grids) i iloci watkw (blocks)
hipLaunchKernelGGL(( uzupelnij) , dim3(grids), dim3(blocks), 0, 0, rozmiarMac, d_A, d_B);
hipDeviceSynchronize(); // blokuje biecy wtek aplikacji do czasu zakoczenia wszystkich oczekiwanych oblicze na karcie graficznej.
// 7. -----------------------------------------------------------------------
// Alokowanie pamici dla macierzy C - bez uycia CUDA
C = (float*)malloc(sizeof(float)*rozmiarMac*rozmiarMac);
// 8. -----------------------------------------------------------------------
// Rozpoczecie pomiaru czasu obliczen
hipEventRecord(czasStart, 0);
hipLaunchKernelGGL(( obliczC) , dim3(grids), dim3(blocks), 0, 0, rozmiarMac, d_A, d_B, d_C); //
hipDeviceSynchronize(); // blokuje biecy wtek aplikacji do czasu zakoczenia wszystkich oczekiwanych oblicze na karcie graficznej.
//zakoczenie pomiaru czasu obliczen
hipEventRecord(czasStop, 0);
hipEventSynchronize(czasStop);
/*
hipMemcpy() - kopiuje dane miedzy karta graficzna a pamiecia RAM
C - wskanik na obszar pamici, do ktrej nastpi kopiowanie
d_C - wskanik na obszar pamici, z ktrej nastpi kopiowanie
sizeof(float)*rozmiarMac*rozmiarMac - liczba bajtw do skopiowania
hipMemcpyDeviceToHost - obszar pamici rdowej naley do pamici karty graficznej, natomiast docelowy obszar pamici naley do komputera (RAM)
*/
gpuErrchk(hipMemcpy(C, d_C , sizeof(float)*rozmiarMac*rozmiarMac, hipMemcpyDeviceToHost));
// 9. -----------------------------------------------------------------------
// Zwalnienie wczeniej zaalokowanej pamici na karcie graficznej
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(C); // zwalnianie pamici w typowy sposb (bez CUDA)
// 10. -----------------------------------------------------------------------
// Obliczenie czasu wykonywanych oblicze
float roznica;
gpuErrchk(hipEventElapsedTime(&roznica, czasStart, czasStop));
printf("Czas: %f ms\n",(roznica*1000));
return 0;
}
| 987dc2b11a93110751c99e15e1e76af7536dc0f2.cu | #include <stdlib.h>
#include <stdio.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
//-----------------------------------------------------------------------------------------
// Funkcja sprawdza poprawnosc dzialania GPU - kod znaleziony dzieki zyczliwosci StackOverflow
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// __global__ - kwalifikator - informuje kompilator, ze dana funkcja powinna byc skompilowana dla urzadzenia a nie dla hosta
//-----------------------------------------------------------------------------------------
// Funkcja wypelnia macierze
__global__ void uzupelnij(long nMacierzy, float *d_A, float *d_B)
{
long d_x = threadIdx.x + blockIdx.x * blockDim.x;
long d_y = threadIdx.y + blockIdx.y * blockDim.y;
long temp = d_x + nMacierzy * d_y;
if (d_x < nMacierzy && d_y < nMacierzy)
{
d_A[temp] = fmodf(sinf(d_x)*d_x*d_y, 10);
d_B[temp] = fmodf(cosf(d_y)*(d_x + d_y), 10);
//printf ("wątek: %dx%d - [%f,%f]\n",threadIdx.x,threadIdx.y,d_A[temp],d_B[temp]);
}
}
//-----------------------------------------------------------------------------------------
// Funkcja oblicza macierz C
__global__ void obliczC(long nMacierzy, float *d_A, float *d_B, float *d_C)
{
long d_x = blockIdx.x * blockDim.x + threadIdx.x;
long d_y = blockIdx.y * blockDim.y + threadIdx.y;
if (d_x < nMacierzy && d_y < nMacierzy)
{
for (int i = 0; i < nMacierzy; i++)
{
d_C[d_x + nMacierzy * d_y] += d_A[d_x + i * nMacierzy] * d_B[i + nMacierzy * d_y];
}
}
}
int main(int argc, char** argv)
{
// 1. -----------------------------------------------------------------------
// Warunek sprawdzajacy czy istnieja wszystkie niezbedne argumenty wejsciowe.
if (!argv[1])
{
printf("0 ms - brak rozmiarMacu macierzy \n"); //jesli nie to wypisuje komunikat i wychodzi z programu
return 0;
}
// 2. -----------------------------------------------------------------------
// Zapisanie argumentu do zmiennej - rozmiar macierzy
long rozmiarMac = atol(argv[1]);
//warunek sprawdzający czy wartosci argumentow nie znajduja sie poza przewidywanym przedzialem wartosci
if (rozmiarMac <= 1)
{
printf("0 ms - zly rozmiarMac macierzy \n"); //jesli nie to wypisuje komunikat i wychodzi z programu
return 0;
}
// 3. -----------------------------------------------------------------------
// Definiowanie zmiennych
// zmmienne do wyznaczania czasu wykonania
cudaEvent_t czasStart, czasStop;
cudaEventCreate(&czasStart);
cudaEventCreate(&czasStop);
// deklaracja tablic , rozmiaru grida oraz rozmiaru bloku
float *d_A,*d_B,*d_C,*C;
int rozmiarGrid, rozmiarBlok;
// 4. -----------------------------------------------------------------------
// Deklaracja macierzy A, B i C - tablic dwuwymiarowych
/*
cudaMalloc - alokuje pamiec na na karcie graficznej
(void**) - wskaznik wskazuje adres nowo alokowanej pamieci
sizeof() - rozmiar alokowanej pamieci
*/
gpuErrchk(cudaMalloc((void**) &d_A, sizeof(float)*rozmiarMac*rozmiarMac));
gpuErrchk(cudaMalloc((void**) &d_B, sizeof(float)*rozmiarMac*rozmiarMac));
gpuErrchk(cudaMalloc((void**) &d_C, sizeof(float)*rozmiarMac*rozmiarMac));
// 5. -----------------------------------------------------------------------
// Deklaracja rozmiaru grida oraz ilości wątków w bloku
/*
16 16 16 16
16 16 16 16
16 16 16 16
16 16 16 16
64 x 64 = 1024
*/
rozmiarGrid = 4;
rozmiarBlok = 16;
dim3 grids(rozmiarGrid, rozmiarGrid);
dim3 blocks(rozmiarBlok, rozmiarBlok);
// 6. -----------------------------------------------------------------------
// Wywolanie funkcji "uzupelnij" z podanym rozmiarem grida (grids) i ilością watków (blocks)
uzupelnij <<<grids, blocks>>> (rozmiarMac, d_A, d_B);
cudaDeviceSynchronize(); // blokuje bieżący wątek aplikacji do czasu zakończenia wszystkich oczekiwanych obliczeń na karcie graficznej.
// 7. -----------------------------------------------------------------------
// Alokowanie pamięci dla macierzy C - bez użycia CUDA
C = (float*)malloc(sizeof(float)*rozmiarMac*rozmiarMac);
// 8. -----------------------------------------------------------------------
// Rozpoczecie pomiaru czasu obliczen
cudaEventRecord(czasStart, 0);
obliczC <<<grids, blocks>>> (rozmiarMac, d_A, d_B, d_C); //
cudaDeviceSynchronize(); // blokuje bieżący wątek aplikacji do czasu zakończenia wszystkich oczekiwanych obliczeń na karcie graficznej.
//zakończenie pomiaru czasu obliczen
cudaEventRecord(czasStop, 0);
cudaEventSynchronize(czasStop);
/*
cudaMemcpy() - kopiuje dane miedzy karta graficzna a pamiecia RAM
C - wskaźnik na obszar pamięci, do której nastąpi kopiowanie
d_C - wskaźnik na obszar pamięci, z której nastąpi kopiowanie
sizeof(float)*rozmiarMac*rozmiarMac - liczba bajtów do skopiowania
cudaMemcpyDeviceToHost - obszar pamięci źródłowej należy do pamięci karty graficznej, natomiast docelowy obszar pamięci należy do komputera (RAM)
*/
gpuErrchk(cudaMemcpy(C, d_C , sizeof(float)*rozmiarMac*rozmiarMac, cudaMemcpyDeviceToHost));
// 9. -----------------------------------------------------------------------
// Zwalnienie wcześniej zaalokowanej pamięci na karcie graficznej
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(C); // zwalnianie pamięci w typowy sposób (bez CUDA)
// 10. -----------------------------------------------------------------------
// Obliczenie czasu wykonywanych obliczeń
float roznica;
gpuErrchk(cudaEventElapsedTime(&roznica, czasStart, czasStop));
printf("Czas: %f ms\n",(roznica*1000));
return 0;
}
|
18295d359448af3493f0a4c2c50bdf72dc4cf529.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "decode.h"
#include "utils.h"
#include <algorithm>
#include <cstdint>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/tabulate.h>
#include <thrust/count.h>
#include <thrust/find.h>
#include <thrust/system/hip/detail/hipcub/hipcub.hpp>
#include <thrust/system/hip/detail/cub/iterator/counting_input_iterator.cuh>
namespace retinanet {
namespace cuda {
int decode(int batch_size,
const void *const *inputs, void **outputs,
size_t height, size_t width, size_t scale,
size_t num_anchors, size_t num_classes,
const std::vector<float> &anchors, float score_thresh, int top_n,
void *workspace, size_t workspace_size, hipStream_t stream) {
int scores_size = num_anchors * num_classes * height * width;
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size = get_size_aligned<float>(anchors.size()); // anchors
workspace_size += get_size_aligned<bool>(scores_size); // flags
workspace_size += get_size_aligned<int>(scores_size); // indices
workspace_size += get_size_aligned<int>(scores_size); // indices_sorted
workspace_size += get_size_aligned<float>(scores_size); // scores
workspace_size += get_size_aligned<float>(scores_size); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::hipcub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(scores_size),
(bool *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
size_t temp_size_sort = 0;
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort,
(float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
workspace_size += ::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto anchors_d = get_next_ptr<float>(anchors.size(), workspace, workspace_size);
hipMemcpy(anchors_d, anchors.data(), anchors.size() * sizeof *anchors_d, hipMemcpyHostToDevice);
auto flags = get_next_ptr<bool>(scores_size, workspace, workspace_size);
auto indices = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto scores = get_next_ptr<float>(scores_size, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(scores_size, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * scores_size;
auto in_boxes = static_cast<const float *>(inputs[1]) + batch * (scores_size / num_classes) * 4;
auto out_scores = static_cast<float *>(outputs[0]) + batch * top_n;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * top_n;
auto out_classes = static_cast<float *>(outputs[2]) + batch * top_n;
// Discard scores below threshold
thrust::transform(thrust::device, in_scores, in_scores + scores_size,
flags, thrust::placeholders::_1 > score_thresh);
int *num_selected = reinterpret_cast<int *>(scores);
thrust::cuda_cub::hipcub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(0),
flags, indices, num_selected, scores_size);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Only keep top n scores
if (num_detections > top_n) {
thrust::gather(thrust::device, indices, indices + num_detections,
in_scores, scores);
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices, indices_sorted, num_detections);
indices = indices_sorted;
num_detections = top_n;
}
// Gather boxes
bool has_anchors = !anchors.empty();
thrust::transform(thrust::device, indices, indices + num_detections,
thrust::make_zip_iterator(thrust::make_tuple(out_scores, out_boxes, out_classes)),
[=] __device__ (int i) {
int x = i % width;
int y = (i / width) % height;
int a = (i / num_classes / height / width) % num_anchors;
int cls = (i / height / width) % num_classes;
float4 box = float4{
in_boxes[((a * 4 + 0) * height + y) * width + x],
in_boxes[((a * 4 + 1) * height + y) * width + x],
in_boxes[((a * 4 + 2) * height + y) * width + x],
in_boxes[((a * 4 + 3) * height + y) * width + x]
};
if (has_anchors) {
// Add anchors offsets to deltas
float x = (i % width) * scale;
float y = ((i / width) % height) * scale;
float *d = anchors_d + 4*a;
float x1 = x + d[0];
float y1 = y + d[1];
float x2 = x + d[2];
float y2 = y + d[3];
float w = x2 - x1 + 1.0f;
float h = y2 - y1 + 1.0f;
float pred_ctr_x = box.x * w + x1 + 0.5f * w;
float pred_ctr_y = box.y * h + y1 + 0.5f * h;
float pred_w = exp(box.z) * w;
float pred_h = exp(box.w) * h;
box = float4{
max(0.0f, pred_ctr_x - 0.5f * pred_w),
max(0.0f, pred_ctr_y - 0.5f * pred_h),
min(pred_ctr_x + 0.5f * pred_w - 1.0f, width * scale - 1.0f),
min(pred_ctr_y + 0.5f * pred_h - 1.0f, height * scale - 1.0f)
};
}
return thrust::make_tuple(in_scores[i], box, cls);
});
// Zero-out unused scores
if (num_detections < top_n) {
thrust::fill(thrust::device, out_scores + num_detections,
out_scores + top_n, 0.0f);
thrust::fill(thrust::device, out_classes + num_detections,
out_classes + top_n, 0.0f);
}
}
return 0;
}
}
} | 18295d359448af3493f0a4c2c50bdf72dc4cf529.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "decode.h"
#include "utils.h"
#include <algorithm>
#include <cstdint>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/tabulate.h>
#include <thrust/count.h>
#include <thrust/find.h>
#include <thrust/system/cuda/detail/cub/device/device_radix_sort.cuh>
#include <thrust/system/cuda/detail/cub/iterator/counting_input_iterator.cuh>
namespace retinanet {
namespace cuda {
int decode(int batch_size,
const void *const *inputs, void **outputs,
size_t height, size_t width, size_t scale,
size_t num_anchors, size_t num_classes,
const std::vector<float> &anchors, float score_thresh, int top_n,
void *workspace, size_t workspace_size, cudaStream_t stream) {
int scores_size = num_anchors * num_classes * height * width;
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size = get_size_aligned<float>(anchors.size()); // anchors
workspace_size += get_size_aligned<bool>(scores_size); // flags
workspace_size += get_size_aligned<int>(scores_size); // indices
workspace_size += get_size_aligned<int>(scores_size); // indices_sorted
workspace_size += get_size_aligned<float>(scores_size); // scores
workspace_size += get_size_aligned<float>(scores_size); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::cub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag,
thrust::cuda_cub::cub::CountingInputIterator<int>(scores_size),
(bool *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
size_t temp_size_sort = 0;
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort,
(float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
workspace_size += std::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto anchors_d = get_next_ptr<float>(anchors.size(), workspace, workspace_size);
cudaMemcpy(anchors_d, anchors.data(), anchors.size() * sizeof *anchors_d, cudaMemcpyHostToDevice);
auto flags = get_next_ptr<bool>(scores_size, workspace, workspace_size);
auto indices = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto scores = get_next_ptr<float>(scores_size, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(scores_size, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * scores_size;
auto in_boxes = static_cast<const float *>(inputs[1]) + batch * (scores_size / num_classes) * 4;
auto out_scores = static_cast<float *>(outputs[0]) + batch * top_n;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * top_n;
auto out_classes = static_cast<float *>(outputs[2]) + batch * top_n;
// Discard scores below threshold
thrust::transform(thrust::device, in_scores, in_scores + scores_size,
flags, thrust::placeholders::_1 > score_thresh);
int *num_selected = reinterpret_cast<int *>(scores);
thrust::cuda_cub::cub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::cub::CountingInputIterator<int>(0),
flags, indices, num_selected, scores_size);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Only keep top n scores
if (num_detections > top_n) {
thrust::gather(thrust::device, indices, indices + num_detections,
in_scores, scores);
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices, indices_sorted, num_detections);
indices = indices_sorted;
num_detections = top_n;
}
// Gather boxes
bool has_anchors = !anchors.empty();
thrust::transform(thrust::device, indices, indices + num_detections,
thrust::make_zip_iterator(thrust::make_tuple(out_scores, out_boxes, out_classes)),
[=] __device__ (int i) {
int x = i % width;
int y = (i / width) % height;
int a = (i / num_classes / height / width) % num_anchors;
int cls = (i / height / width) % num_classes;
float4 box = float4{
in_boxes[((a * 4 + 0) * height + y) * width + x],
in_boxes[((a * 4 + 1) * height + y) * width + x],
in_boxes[((a * 4 + 2) * height + y) * width + x],
in_boxes[((a * 4 + 3) * height + y) * width + x]
};
if (has_anchors) {
// Add anchors offsets to deltas
float x = (i % width) * scale;
float y = ((i / width) % height) * scale;
float *d = anchors_d + 4*a;
float x1 = x + d[0];
float y1 = y + d[1];
float x2 = x + d[2];
float y2 = y + d[3];
float w = x2 - x1 + 1.0f;
float h = y2 - y1 + 1.0f;
float pred_ctr_x = box.x * w + x1 + 0.5f * w;
float pred_ctr_y = box.y * h + y1 + 0.5f * h;
float pred_w = exp(box.z) * w;
float pred_h = exp(box.w) * h;
box = float4{
max(0.0f, pred_ctr_x - 0.5f * pred_w),
max(0.0f, pred_ctr_y - 0.5f * pred_h),
min(pred_ctr_x + 0.5f * pred_w - 1.0f, width * scale - 1.0f),
min(pred_ctr_y + 0.5f * pred_h - 1.0f, height * scale - 1.0f)
};
}
return thrust::make_tuple(in_scores[i], box, cls);
});
// Zero-out unused scores
if (num_detections < top_n) {
thrust::fill(thrust::device, out_scores + num_detections,
out_scores + top_n, 0.0f);
thrust::fill(thrust::device, out_classes + num_detections,
out_classes + top_n, 0.0f);
}
}
return 0;
}
}
} |
aa9626e22bdf7b1afcfd6dcfad1ef2d503d7233a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// std::system includes
#include <cstdio>
// CUDA-C includes
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <helper_cuda.h>
#define TOTAL_SIZE 256*1024*1024
// # threadblocks
#define TBLOCKS 1024
#define THREADS 512
// throw error on equality
#define ERR_EQ(X,Y) do { if ((X) == (Y)) { \
fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \
exit(-1);}} while(0)
// throw error on difference
#define ERR_NE(X,Y) do { if ((X) != (Y)) { \
fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \
exit(-1);}} while(0)
// copy from source -> destination arrays
__global__ void slow_kernel(int *dst, int *src, size_t n)
{
int num = gridDim.x * blockDim.x;
int id = blockDim.x * blockIdx.x + threadIdx.x;
int ret = 0; // accumulate garbage computation in ret and write it to dest[i]
for (int i = id; i < n / sizeof(int); i += num) {
for (int j = 0; j < 1000; j++) {
ret += src[i] * j;
}
dst[i] = ret;
}
}
// initialise memory
void mem_init(int *buf, size_t n) {
for (int i = 0; i < n / sizeof(int); i++) {
buf[i] = i;
}
}
// Forward declaration
int preempt_stream(int, int);
int main(int argc, char **argv)
{
hipDeviceProp_t device_prop;
int dev_id;
printf("Starting [%s]...\n", argv[0]);
// set device
dev_id = findCudaDevice(argc, (const char **) argv);
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
if ((device_prop.major << 4) + device_prop.minor < 0x35)
{
fprintf(stderr, "%s requires Compute Capability of SM 3.5 or higher to run.\nexiting...\n", argv[0]);
exit(EXIT_WAIVED);
}
// get the range of priorities available
// [ greatest_priority, lowest_priority ]
int priority_low;
int priority_hi;
checkCudaErrors(hipDeviceGetStreamPriorityRange(&priority_low, &priority_hi));
printf("CUDA stream priority range: LOW: %d to HIGH: %d\n", priority_low, priority_hi);
preempt_stream(priority_low, priority_hi);
exit(EXIT_SUCCESS);
}
/**
* Creates a stream with low priority and starts a long-running kernel on it.
* Creates a stream with high priority and runs a short-running kernel on it,
* after the low-priority kernel has begun.
* -- If preemption works, the run time of the low priority kernel should
* be extended by the runtime of the high priority kernel which preempts it.
*/
int preempt_stream(int priority_low, int priority_hi) {
// Create streams
size_t n_streams = 2; // Two streams (low and high)
// let index 0 hold low and 1 hold high
hipStream_t streams[n_streams];
checkCudaErrors(hipStreamCreateWithPriority(&streams[0],
hipStreamNonBlocking, priority_low));
checkCudaErrors(hipStreamCreateWithPriority(&streams[1],
hipStreamNonBlocking, priority_hi));
size_t size;
size = TOTAL_SIZE;
// initialise host data
int *h_src[n_streams];
for (int i = 0; i < n_streams; i++) {
ERR_EQ(h_src[i] = (int *) malloc(size), NULL);
mem_init(h_src[i], size);
}
// initialise device data
int *h_dst[n_streams];
for (int i = 0; i < n_streams; i++) {
ERR_EQ(h_dst[i] = (int *) malloc(size), NULL);
memset(h_dst[i], 0, size);
}
// copy source data -> device
int *d_src[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(hipMalloc(&d_src[i], size));
checkCudaErrors(hipMemcpy(d_src[i], h_src[i], size, hipMemcpyHostToDevice));
}
// allocate memory for memcopy destination
int *d_dst[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(hipMalloc(&d_dst[i], size));
}
/* */
// Begin profilling
checkCudaErrors(hipProfilerStart());
// Time low priority on its own
{
hipEvent_t start, end;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&end));
checkCudaErrors(hipEventRecord(start, streams[0]));
hipLaunchKernelGGL(( slow_kernel), dim3(TBLOCKS), dim3(THREADS), 0, streams[0], d_dst[0], d_src[0], TOTAL_SIZE);
checkCudaErrors(hipEventRecord(end, streams[0]));
checkCudaErrors(hipEventSynchronize(end));
float ms;
checkCudaErrors(hipEventElapsedTime(&ms, start, end));
printf("Low priority solo elapsed time %0.6f ms\n", ms);
}
// Time high priority on its own
{
hipEvent_t start, end;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&end));
checkCudaErrors(hipEventRecord(start, streams[1]));
hipLaunchKernelGGL(( slow_kernel), dim3(TBLOCKS), dim3(THREADS), 0, streams[1], d_dst[1], d_src[1], TOTAL_SIZE);
checkCudaErrors(hipEventRecord(end, streams[1]));
checkCudaErrors(hipEventSynchronize(end));
float ms;
checkCudaErrors(hipEventElapsedTime(&ms, start, end));
printf("High priority solo elapsed time %0.6f ms\n", ms);
}
// Start low priority then interrupt it with high priority
{
// create some events
hipEvent_t ev_start[n_streams];
hipEvent_t ev_end[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(hipEventCreate(&ev_start[i]));
checkCudaErrors(hipEventCreate(&ev_end[i]));
}
checkCudaErrors(hipEventRecord(ev_start[0], streams[0]));
hipLaunchKernelGGL(( slow_kernel), dim3(TBLOCKS), dim3(THREADS), 0, streams[0], d_dst[0], d_src[0], TOTAL_SIZE);
checkCudaErrors(hipEventRecord(ev_end[0], streams[0]));
// synchronize on the start, so we launch this after the low priority kernel has started
checkCudaErrors(hipEventSynchronize(ev_start[0]));
checkCudaErrors(hipEventRecord(ev_start[1], streams[1]));
hipLaunchKernelGGL(( slow_kernel), dim3(TBLOCKS), dim3(THREADS), 0, streams[1], d_dst[1], d_src[1], TOTAL_SIZE);
checkCudaErrors(hipEventRecord(ev_end[1], streams[1]));
checkCudaErrors(hipEventSynchronize(ev_end[1]));
checkCudaErrors(hipEventSynchronize(ev_end[0]));
float ms[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(hipEventElapsedTime(&ms[i], ev_start[i], ev_end[i]));
}
printf("Low priority preempted by high priority test\n");
printf("Low priority elapsed time %0.6f ms\n", ms[0]);
printf("High priority elapsed time %0.6f ms\n", ms[1]);
}
// Stop profiling
checkCudaErrors(hipProfilerStop());
/* */
size = TOTAL_SIZE;
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(hipMemcpy(h_dst[i], d_dst[i], size, hipMemcpyDeviceToHost));
}
// check results of kernels
/*
// If we were doing some easily checkable computation, we could
// verify that the result is correct here
for (int i = 0; i < n_streams; i++) {
ERR_NE(memcmp(h_dst[i], h_src[i], size), 0);
}
*/
// Clean up
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(hipFree(d_src[i]));
checkCudaErrors(hipFree(d_dst[i]));
}
return 0;
}
| aa9626e22bdf7b1afcfd6dcfad1ef2d503d7233a.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// std::system includes
#include <cstdio>
// CUDA-C includes
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#include <helper_cuda.h>
#define TOTAL_SIZE 256*1024*1024
// # threadblocks
#define TBLOCKS 1024
#define THREADS 512
// throw error on equality
#define ERR_EQ(X,Y) do { if ((X) == (Y)) { \
fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \
exit(-1);}} while(0)
// throw error on difference
#define ERR_NE(X,Y) do { if ((X) != (Y)) { \
fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \
exit(-1);}} while(0)
// copy from source -> destination arrays
__global__ void slow_kernel(int *dst, int *src, size_t n)
{
int num = gridDim.x * blockDim.x;
int id = blockDim.x * blockIdx.x + threadIdx.x;
int ret = 0; // accumulate garbage computation in ret and write it to dest[i]
for (int i = id; i < n / sizeof(int); i += num) {
for (int j = 0; j < 1000; j++) {
ret += src[i] * j;
}
dst[i] = ret;
}
}
// initialise memory
void mem_init(int *buf, size_t n) {
for (int i = 0; i < n / sizeof(int); i++) {
buf[i] = i;
}
}
// Forward declaration
int preempt_stream(int, int);
int main(int argc, char **argv)
{
cudaDeviceProp device_prop;
int dev_id;
printf("Starting [%s]...\n", argv[0]);
// set device
dev_id = findCudaDevice(argc, (const char **) argv);
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
if ((device_prop.major << 4) + device_prop.minor < 0x35)
{
fprintf(stderr, "%s requires Compute Capability of SM 3.5 or higher to run.\nexiting...\n", argv[0]);
exit(EXIT_WAIVED);
}
// get the range of priorities available
// [ greatest_priority, lowest_priority ]
int priority_low;
int priority_hi;
checkCudaErrors(cudaDeviceGetStreamPriorityRange(&priority_low, &priority_hi));
printf("CUDA stream priority range: LOW: %d to HIGH: %d\n", priority_low, priority_hi);
preempt_stream(priority_low, priority_hi);
exit(EXIT_SUCCESS);
}
/**
* Creates a stream with low priority and starts a long-running kernel on it.
* Creates a stream with high priority and runs a short-running kernel on it,
* after the low-priority kernel has begun.
* -- If preemption works, the run time of the low priority kernel should
* be extended by the runtime of the high priority kernel which preempts it.
*/
int preempt_stream(int priority_low, int priority_hi) {
// Create streams
size_t n_streams = 2; // Two streams (low and high)
// let index 0 hold low and 1 hold high
cudaStream_t streams[n_streams];
checkCudaErrors(cudaStreamCreateWithPriority(&streams[0],
cudaStreamNonBlocking, priority_low));
checkCudaErrors(cudaStreamCreateWithPriority(&streams[1],
cudaStreamNonBlocking, priority_hi));
size_t size;
size = TOTAL_SIZE;
// initialise host data
int *h_src[n_streams];
for (int i = 0; i < n_streams; i++) {
ERR_EQ(h_src[i] = (int *) malloc(size), NULL);
mem_init(h_src[i], size);
}
// initialise device data
int *h_dst[n_streams];
for (int i = 0; i < n_streams; i++) {
ERR_EQ(h_dst[i] = (int *) malloc(size), NULL);
memset(h_dst[i], 0, size);
}
// copy source data -> device
int *d_src[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(cudaMalloc(&d_src[i], size));
checkCudaErrors(cudaMemcpy(d_src[i], h_src[i], size, cudaMemcpyHostToDevice));
}
// allocate memory for memcopy destination
int *d_dst[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(cudaMalloc(&d_dst[i], size));
}
/* */
// Begin profilling
checkCudaErrors(cudaProfilerStart());
// Time low priority on its own
{
cudaEvent_t start, end;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&end));
checkCudaErrors(cudaEventRecord(start, streams[0]));
slow_kernel<<<TBLOCKS, THREADS, 0, streams[0]>>>(d_dst[0], d_src[0], TOTAL_SIZE);
checkCudaErrors(cudaEventRecord(end, streams[0]));
checkCudaErrors(cudaEventSynchronize(end));
float ms;
checkCudaErrors(cudaEventElapsedTime(&ms, start, end));
printf("Low priority solo elapsed time %0.6f ms\n", ms);
}
// Time high priority on its own
{
cudaEvent_t start, end;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&end));
checkCudaErrors(cudaEventRecord(start, streams[1]));
slow_kernel<<<TBLOCKS, THREADS, 0, streams[1]>>>(d_dst[1], d_src[1], TOTAL_SIZE);
checkCudaErrors(cudaEventRecord(end, streams[1]));
checkCudaErrors(cudaEventSynchronize(end));
float ms;
checkCudaErrors(cudaEventElapsedTime(&ms, start, end));
printf("High priority solo elapsed time %0.6f ms\n", ms);
}
// Start low priority then interrupt it with high priority
{
// create some events
cudaEvent_t ev_start[n_streams];
cudaEvent_t ev_end[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(cudaEventCreate(&ev_start[i]));
checkCudaErrors(cudaEventCreate(&ev_end[i]));
}
checkCudaErrors(cudaEventRecord(ev_start[0], streams[0]));
slow_kernel<<<TBLOCKS, THREADS, 0, streams[0]>>>(d_dst[0], d_src[0], TOTAL_SIZE);
checkCudaErrors(cudaEventRecord(ev_end[0], streams[0]));
// synchronize on the start, so we launch this after the low priority kernel has started
checkCudaErrors(cudaEventSynchronize(ev_start[0]));
checkCudaErrors(cudaEventRecord(ev_start[1], streams[1]));
slow_kernel<<<TBLOCKS, THREADS, 0, streams[1]>>>(d_dst[1], d_src[1], TOTAL_SIZE);
checkCudaErrors(cudaEventRecord(ev_end[1], streams[1]));
checkCudaErrors(cudaEventSynchronize(ev_end[1]));
checkCudaErrors(cudaEventSynchronize(ev_end[0]));
float ms[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(cudaEventElapsedTime(&ms[i], ev_start[i], ev_end[i]));
}
printf("Low priority preempted by high priority test\n");
printf("Low priority elapsed time %0.6f ms\n", ms[0]);
printf("High priority elapsed time %0.6f ms\n", ms[1]);
}
// Stop profiling
checkCudaErrors(cudaProfilerStop());
/* */
size = TOTAL_SIZE;
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(cudaMemcpy(h_dst[i], d_dst[i], size, cudaMemcpyDeviceToHost));
}
// check results of kernels
/*
// If we were doing some easily checkable computation, we could
// verify that the result is correct here
for (int i = 0; i < n_streams; i++) {
ERR_NE(memcmp(h_dst[i], h_src[i], size), 0);
}
*/
// Clean up
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(cudaFree(d_src[i]));
checkCudaErrors(cudaFree(d_dst[i]));
}
return 0;
}
|
ac8fbb5e2149a0ac86ad74fd5e50a85041ecdced.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include <device_launch_parameters.h>
#include <opencv2/gpu/device/saturate_cast.hpp>
#include "MyBlender.h"
using namespace std;
namespace cv {
namespace gpu {
namespace device {
template<typename T>
__global__ void kernelFeed(int height, int width, T *dst, const T *src, const uchar *mask, int dStep, int sStep, int mStep)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
//int offset = x + y * mStep; // offset for mask
if (x < width && y < height && mask[x + y * mStep])
{
int dOffset = x * 3 + y * dStep; // offset for dst
int offset = x * 3 + y * sStep; // offset for src
dst[dOffset] = src[offset];
dst[dOffset + 1] = src[offset + 1];
dst[dOffset + 2] = src[offset + 2];
}
}
void cudaFeed(const gpu::GpuMat &image, const gpu::GpuMat &mask, gpu::GpuMat &dst, int dx, int dy)
{
dim3 threads(16, 16); // 256 threads yealds better performance
dim3 blocks(image.cols / threads.x, image.rows / threads.y);
switch (image.type())
{
case CV_8UC3:
hipLaunchKernelGGL(( kernelFeed), dim3(blocks), dim3(threads), 0, 0, image.rows, image.cols,
dst.ptr<uchar>(dy) + dx * 3, image.ptr<uchar>(), mask.ptr<uchar>(),
dst.step, image.step, mask.step);
hipDeviceSynchronize();
break;
case CV_16SC3:
hipLaunchKernelGGL(( kernelFeed), dim3(blocks), dim3(threads), 0, 0, image.rows, image.cols,
dst.ptr<short>(dy) + dx * 3, image.ptr<short>(), mask.ptr<uchar>(),
dst.step, image.step, mask.step);
hipDeviceSynchronize();
break;
}
}
void cudaFeed(const Mat &image, const Mat &mask, gpu::GpuMat &dst, int dx, int dy)
{
gpu::GpuMat gpuImg;
gpu::GpuMat gpuMask;
gpuImg.upload(image);
gpuMask.upload(mask);
cudaFeed(gpuImg, gpuMask, dst, dx, dy);
}
void cudaFeed(const Mat &image, const Mat &mask, Mat &dst, int dx, int dy)
{
dim3 threads(8, 8); // 64 threads yealds better performance
dim3 blocks(image.cols / threads.x, image.rows / threads.y);
switch (image.type())
{
case CV_8UC3:
hipLaunchKernelGGL(( kernelFeed), dim3(blocks), dim3(threads), 0, 0, image.rows, image.cols,
dst.ptr<uchar>(dy) + dx * 3, image.ptr<uchar>(), mask.ptr<uchar>(),
dst.step, image.step, mask.step);
hipDeviceSynchronize();
break;
case CV_16SC3:
hipLaunchKernelGGL(( kernelFeed), dim3(blocks), dim3(threads), 0, 0, image.rows, image.cols,
dst.ptr<short>(dy) + dx * 3, image.ptr<short>(), mask.ptr<uchar>(),
dst.step, image.step, mask.step);
hipDeviceSynchronize();
break;
}
}
} // namespace device
} // namespace gpu
} // namespace cv;
| ac8fbb5e2149a0ac86ad74fd5e50a85041ecdced.cu | //#include <device_launch_parameters.h>
#include <opencv2/gpu/device/saturate_cast.hpp>
#include "MyBlender.h"
using namespace std;
namespace cv {
namespace gpu {
namespace device {
template<typename T>
__global__ void kernelFeed(int height, int width, T *dst, const T *src, const uchar *mask, int dStep, int sStep, int mStep)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
//int offset = x + y * mStep; // offset for mask
if (x < width && y < height && mask[x + y * mStep])
{
int dOffset = x * 3 + y * dStep; // offset for dst
int offset = x * 3 + y * sStep; // offset for src
dst[dOffset] = src[offset];
dst[dOffset + 1] = src[offset + 1];
dst[dOffset + 2] = src[offset + 2];
}
}
void cudaFeed(const gpu::GpuMat &image, const gpu::GpuMat &mask, gpu::GpuMat &dst, int dx, int dy)
{
dim3 threads(16, 16); // 256 threads yealds better performance
dim3 blocks(image.cols / threads.x, image.rows / threads.y);
switch (image.type())
{
case CV_8UC3:
kernelFeed<<<blocks, threads>>>(image.rows, image.cols,
dst.ptr<uchar>(dy) + dx * 3, image.ptr<uchar>(), mask.ptr<uchar>(),
dst.step, image.step, mask.step);
cudaDeviceSynchronize();
break;
case CV_16SC3:
kernelFeed<<<blocks, threads>>>(image.rows, image.cols,
dst.ptr<short>(dy) + dx * 3, image.ptr<short>(), mask.ptr<uchar>(),
dst.step, image.step, mask.step);
cudaDeviceSynchronize();
break;
}
}
void cudaFeed(const Mat &image, const Mat &mask, gpu::GpuMat &dst, int dx, int dy)
{
gpu::GpuMat gpuImg;
gpu::GpuMat gpuMask;
gpuImg.upload(image);
gpuMask.upload(mask);
cudaFeed(gpuImg, gpuMask, dst, dx, dy);
}
void cudaFeed(const Mat &image, const Mat &mask, Mat &dst, int dx, int dy)
{
dim3 threads(8, 8); // 64 threads yealds better performance
dim3 blocks(image.cols / threads.x, image.rows / threads.y);
switch (image.type())
{
case CV_8UC3:
kernelFeed<<<blocks, threads>>>(image.rows, image.cols,
dst.ptr<uchar>(dy) + dx * 3, image.ptr<uchar>(), mask.ptr<uchar>(),
dst.step, image.step, mask.step);
cudaDeviceSynchronize();
break;
case CV_16SC3:
kernelFeed<<<blocks, threads>>>(image.rows, image.cols,
dst.ptr<short>(dy) + dx * 3, image.ptr<short>(), mask.ptr<uchar>(),
dst.step, image.step, mask.step);
cudaDeviceSynchronize();
break;
}
}
} // namespace device
} // namespace gpu
} // namespace cv;
|
033c1733ce6059b5e5bce36a7e73f5ea85241068.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
// threadIdx.x contains the index of the current thread within its block,
// blockDim.x contains the number of threads in the block
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
// Execution configuration
// CUDA GPUs run kernels using blocks of threads that are a multiple of 32 in size
hipLaunchKernelGGL(( add), dim3(1), dim3(256), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
} | 033c1733ce6059b5e5bce36a7e73f5ea85241068.cu | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
// threadIdx.x contains the index of the current thread within its block,
// blockDim.x contains the number of threads in the block
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
// Execution configuration
// CUDA GPUs run kernels using blocks of threads that are a multiple of 32 in size
add<<<1, 256>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
ca5c40bee46feeb1a6480bb292d9d33f4e654229.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/gather.h>
#include <thrust/transform.h>
#include <cstring>
#include <algorithm>
#include <exception>
#include "query/algorithm.hpp"
#include "query/iterator.hpp"
#include "query/time_series_aggregate.h"
#include "memory.hpp"
CGoCallResHandle Sort(DimensionColumnVector keys,
int length,
void *cudaStream,
int device) {
CGoCallResHandle resHandle = {nullptr, nullptr};
try {
#ifdef RUN_ON_DEVICE
hipSetDevice(device);
#endif
ares::sort(keys, length,
reinterpret_cast<hipStream_t>(cudaStream));
CheckCUDAError("Sort");
}
catch (std::exception &e) {
std::cerr << "Exception happend when doing Sort:" << e.what()
<< std::endl;
resHandle.pStrErr = strdup(e.what());
}
return resHandle;
}
CGoCallResHandle Reduce(DimensionColumnVector inputKeys,
uint8_t *inputValues,
DimensionColumnVector outputKeys,
uint8_t *outputValues,
int valueBytes,
int length,
AggregateFunction aggFunc,
void *stream,
int device) {
CGoCallResHandle resHandle = {nullptr, nullptr};
try {
#ifdef RUN_ON_DEVICE
hipSetDevice(device);
#endif
hipStream_t cudaStream = reinterpret_cast<hipStream_t>(stream);
resHandle.res = reinterpret_cast<void *>(ares::reduce(inputKeys,
inputValues,
outputKeys,
outputValues,
valueBytes,
length,
aggFunc,
cudaStream));
CheckCUDAError("Reduce");
return resHandle;
}
catch (std::exception &e) {
std::cerr << "Exception happend when doing Reduce:" << e.what()
<< std::endl;
resHandle.pStrErr = strdup(e.what());
}
return resHandle;
}
CGoCallResHandle Expand(DimensionColumnVector inputKeys,
DimensionColumnVector outputKeys,
uint32_t *baseCounts,
uint32_t *indexVector,
int indexVectorLen,
int outputOccupiedLen,
void *stream,
int device) {
CGoCallResHandle resHandle = {nullptr, nullptr};
try {
SET_DEVICE(device);
hipStream_t cudaStream = reinterpret_cast<hipStream_t>(stream);
resHandle.res = reinterpret_cast<void *>(ares::expand(inputKeys,
outputKeys,
baseCounts,
indexVector,
indexVectorLen,
outputOccupiedLen,
cudaStream));
CheckCUDAError("Expand");
return resHandle;
}
catch (std::exception &e) {
std::cerr << "Exception happend when doing Expand:" << e.what()
<< std::endl;
resHandle.pStrErr = strdup(e.what());
}
return resHandle;
}
namespace ares {
// sort based on DimensionColumnVector
void sort(DimensionColumnVector keys,
int length,
hipStream_t cudaStream) {
DimensionHashIterator hashIter(keys.DimValues,
keys.IndexVector,
keys.NumDimsPerDimWidth,
keys.VectorCapacity);
thrust::copy(GET_EXECUTION_POLICY(cudaStream),
hashIter,
hashIter + length,
keys.HashValues);
thrust::stable_sort_by_key(GET_EXECUTION_POLICY(cudaStream),
keys.HashValues,
keys.HashValues + length,
keys.IndexVector);
}
template<typename Value, typename AggFunc>
int reduceInternal(uint64_t *inputHashValues, uint32_t *inputIndexVector,
uint8_t *inputValues, uint64_t *outputHashValues,
uint32_t *outputIndexVector, uint8_t *outputValues,
int length, hipStream_t cudaStream) {
thrust::equal_to<uint64_t> binaryPred;
AggFunc aggFunc;
ReduceByHashFunctor<AggFunc> reduceFunc(aggFunc);
auto zippedInputIter = thrust::make_zip_iterator(thrust::make_tuple(
inputIndexVector,
thrust::make_permutation_iterator(reinterpret_cast<Value *>(inputValues),
inputIndexVector)));
auto zippedOutputIter = thrust::make_zip_iterator(thrust::make_tuple(
outputIndexVector, reinterpret_cast<Value *>(outputValues)));
auto resEnd = thrust::reduce_by_key(GET_EXECUTION_POLICY(cudaStream),
inputHashValues,
inputHashValues + length,
zippedInputIter,
thrust::make_discard_iterator(),
zippedOutputIter,
binaryPred,
reduceFunc);
return thrust::get<1>(resEnd) - zippedOutputIter;
}
struct rolling_avg {
typedef uint64_t first_argument_type;
typedef uint64_t second_argument_type;
typedef uint64_t result_type;
__host__ __device__ uint64_t operator()(
uint64_t lhs, uint64_t rhs) const {
uint32_t lCount = lhs >> 32;
uint32_t rCount = rhs >> 32;
uint32_t totalCount = lCount + rCount;
if (totalCount == 0) {
return 0;
}
uint64_t res = 0;
*(reinterpret_cast<uint32_t *>(&res) + 1) = totalCount;
// do division first to avoid overflow.
*reinterpret_cast<float_t*>(&res) =
*reinterpret_cast<float_t*>(&lhs) / totalCount * lCount +
*reinterpret_cast<float_t*>(&rhs) / totalCount * rCount;
return res;
}
};
int bindValueAndAggFunc(uint64_t *inputHashValues,
uint32_t *inputIndexVector,
uint8_t *inputValues,
uint64_t *outputHashValues,
uint32_t *outputIndexVector,
uint8_t *outputValues,
int valueBytes,
int length,
AggregateFunction aggFunc,
hipStream_t cudaStream) {
switch (aggFunc) {
#define REDUCE_INTERNAL(ValueType, AggFunc) \
return reduceInternal< ValueType, AggFunc >( \
inputHashValues, \
inputIndexVector, \
inputValues, \
outputHashValues, \
outputIndexVector, \
outputValues, \
length, \
cudaStream);
case AGGR_SUM_UNSIGNED:
if (valueBytes == 4) {
REDUCE_INTERNAL(uint32_t, thrust::plus<uint32_t>)
} else {
REDUCE_INTERNAL(uint64_t, thrust::plus<uint64_t>)
}
case AGGR_SUM_SIGNED:
if (valueBytes == 4) {
REDUCE_INTERNAL(int32_t, thrust::plus<int32_t>)
} else {
REDUCE_INTERNAL(int64_t, thrust::plus<int64_t>)
}
case AGGR_SUM_FLOAT:
if (valueBytes == 4) {
REDUCE_INTERNAL(float_t, thrust::plus<float_t>)
} else {
REDUCE_INTERNAL(double_t, thrust::plus<double_t>)
}
case AGGR_MIN_UNSIGNED:
REDUCE_INTERNAL(uint32_t, thrust::minimum<uint32_t>)
case AGGR_MIN_SIGNED:
REDUCE_INTERNAL(int32_t, thrust::minimum<int32_t>)
case AGGR_MIN_FLOAT:
REDUCE_INTERNAL(float_t, thrust::minimum<float_t>)
case AGGR_MAX_UNSIGNED:
REDUCE_INTERNAL(uint32_t, thrust::maximum<uint32_t>)
case AGGR_MAX_SIGNED:
REDUCE_INTERNAL(int32_t, thrust::maximum<int32_t>)
case AGGR_MAX_FLOAT:
REDUCE_INTERNAL(float_t, thrust::maximum<float_t>)
case AGGR_AVG_FLOAT:
REDUCE_INTERNAL(uint64_t, rolling_avg)
default:
throw std::invalid_argument("Unsupported aggregation function type");
}
}
int reduce(DimensionColumnVector inputKeys, uint8_t *inputValues,
DimensionColumnVector outputKeys, uint8_t *outputValues,
int valueBytes, int length, AggregateFunction aggFunc,
hipStream_t cudaStream) {
int outputLength = bindValueAndAggFunc(
inputKeys.HashValues,
inputKeys.IndexVector,
inputValues,
outputKeys.HashValues,
outputKeys.IndexVector,
outputValues,
valueBytes,
length,
aggFunc,
cudaStream);
DimensionColumnPermutateIterator iterIn(
inputKeys.DimValues, outputKeys.IndexVector, inputKeys.VectorCapacity,
outputLength, inputKeys.NumDimsPerDimWidth);
DimensionColumnOutputIterator iterOut(outputKeys.DimValues,
inputKeys.VectorCapacity, outputLength,
inputKeys.NumDimsPerDimWidth, 0);
int numDims = 0;
for (int i = 0; i < NUM_DIM_WIDTH; i++) {
numDims += inputKeys.NumDimsPerDimWidth[i];
}
// copy dim values into output
thrust::copy(GET_EXECUTION_POLICY(cudaStream),
iterIn, iterIn + numDims * 2 * outputLength, iterOut);
return outputLength;
}
int expand(DimensionColumnVector inputKeys,
DimensionColumnVector outputKeys,
uint32_t *baseCounts,
uint32_t *indexVector,
int indexVectorLen,
int outputOccupiedLen,
hipStream_t cudaStream) {
// create count interator from baseCount and indexVector
IndexCountIterator countIter = IndexCountIterator(baseCounts, indexVector);
// total item counts by adding counts together
uint32_t totalCount = thrust::reduce(GET_EXECUTION_POLICY(cudaStream),
countIter,
countIter+indexVectorLen);
// scan the counts to obtain output offsets for each input element
ares::device_vector<uint32_t> offsets(indexVectorLen);
thrust::exclusive_scan(GET_EXECUTION_POLICY(cudaStream),
countIter,
countIter+indexVectorLen,
offsets.begin());
// scatter the nonzero counts into their corresponding output positions
ares::device_vector<uint32_t> indices(totalCount);
thrust::scatter_if(GET_EXECUTION_POLICY(cudaStream),
thrust::counting_iterator<uint32_t>(0),
thrust::counting_iterator<uint32_t>(indexVectorLen),
offsets.begin(),
countIter,
indices.begin());
// compute max-scan over the indices, filling in the holes
thrust::inclusive_scan(GET_EXECUTION_POLICY(cudaStream),
indices.begin(),
indices.end(),
indices.begin(),
thrust::maximum<uint32_t>());
// get the raw pointer from device/host vector
uint32_t * newIndexVector = thrust::raw_pointer_cast(&indices[0]);
int outputLen = min(totalCount, outputKeys.VectorCapacity
- outputOccupiedLen);
// start the real copy operation
DimensionColumnPermutateIterator iterIn(
inputKeys.DimValues, newIndexVector, inputKeys.VectorCapacity,
outputLen, inputKeys.NumDimsPerDimWidth);
DimensionColumnOutputIterator iterOut(outputKeys.DimValues,
outputKeys.VectorCapacity, outputLen,
inputKeys.NumDimsPerDimWidth,
outputOccupiedLen);
int numDims = 0;
for (int i = 0; i < NUM_DIM_WIDTH; i++) {
numDims += inputKeys.NumDimsPerDimWidth[i];
}
// copy dim values into output
thrust::copy(GET_EXECUTION_POLICY(cudaStream), iterIn,
iterIn + numDims * 2 * outputLen, iterOut);
// return total count in the output dimensionVector
return outputLen + outputOccupiedLen;
}
} // namespace ares
| ca5c40bee46feeb1a6480bb292d9d33f4e654229.cu | // Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/gather.h>
#include <thrust/transform.h>
#include <cstring>
#include <algorithm>
#include <exception>
#include "query/algorithm.hpp"
#include "query/iterator.hpp"
#include "query/time_series_aggregate.h"
#include "memory.hpp"
CGoCallResHandle Sort(DimensionColumnVector keys,
int length,
void *cudaStream,
int device) {
CGoCallResHandle resHandle = {nullptr, nullptr};
try {
#ifdef RUN_ON_DEVICE
cudaSetDevice(device);
#endif
ares::sort(keys, length,
reinterpret_cast<cudaStream_t>(cudaStream));
CheckCUDAError("Sort");
}
catch (std::exception &e) {
std::cerr << "Exception happend when doing Sort:" << e.what()
<< std::endl;
resHandle.pStrErr = strdup(e.what());
}
return resHandle;
}
CGoCallResHandle Reduce(DimensionColumnVector inputKeys,
uint8_t *inputValues,
DimensionColumnVector outputKeys,
uint8_t *outputValues,
int valueBytes,
int length,
AggregateFunction aggFunc,
void *stream,
int device) {
CGoCallResHandle resHandle = {nullptr, nullptr};
try {
#ifdef RUN_ON_DEVICE
cudaSetDevice(device);
#endif
cudaStream_t cudaStream = reinterpret_cast<cudaStream_t>(stream);
resHandle.res = reinterpret_cast<void *>(ares::reduce(inputKeys,
inputValues,
outputKeys,
outputValues,
valueBytes,
length,
aggFunc,
cudaStream));
CheckCUDAError("Reduce");
return resHandle;
}
catch (std::exception &e) {
std::cerr << "Exception happend when doing Reduce:" << e.what()
<< std::endl;
resHandle.pStrErr = strdup(e.what());
}
return resHandle;
}
CGoCallResHandle Expand(DimensionColumnVector inputKeys,
DimensionColumnVector outputKeys,
uint32_t *baseCounts,
uint32_t *indexVector,
int indexVectorLen,
int outputOccupiedLen,
void *stream,
int device) {
CGoCallResHandle resHandle = {nullptr, nullptr};
try {
SET_DEVICE(device);
cudaStream_t cudaStream = reinterpret_cast<cudaStream_t>(stream);
resHandle.res = reinterpret_cast<void *>(ares::expand(inputKeys,
outputKeys,
baseCounts,
indexVector,
indexVectorLen,
outputOccupiedLen,
cudaStream));
CheckCUDAError("Expand");
return resHandle;
}
catch (std::exception &e) {
std::cerr << "Exception happend when doing Expand:" << e.what()
<< std::endl;
resHandle.pStrErr = strdup(e.what());
}
return resHandle;
}
namespace ares {
// sort based on DimensionColumnVector
void sort(DimensionColumnVector keys,
int length,
cudaStream_t cudaStream) {
DimensionHashIterator hashIter(keys.DimValues,
keys.IndexVector,
keys.NumDimsPerDimWidth,
keys.VectorCapacity);
thrust::copy(GET_EXECUTION_POLICY(cudaStream),
hashIter,
hashIter + length,
keys.HashValues);
thrust::stable_sort_by_key(GET_EXECUTION_POLICY(cudaStream),
keys.HashValues,
keys.HashValues + length,
keys.IndexVector);
}
template<typename Value, typename AggFunc>
int reduceInternal(uint64_t *inputHashValues, uint32_t *inputIndexVector,
uint8_t *inputValues, uint64_t *outputHashValues,
uint32_t *outputIndexVector, uint8_t *outputValues,
int length, cudaStream_t cudaStream) {
thrust::equal_to<uint64_t> binaryPred;
AggFunc aggFunc;
ReduceByHashFunctor<AggFunc> reduceFunc(aggFunc);
auto zippedInputIter = thrust::make_zip_iterator(thrust::make_tuple(
inputIndexVector,
thrust::make_permutation_iterator(reinterpret_cast<Value *>(inputValues),
inputIndexVector)));
auto zippedOutputIter = thrust::make_zip_iterator(thrust::make_tuple(
outputIndexVector, reinterpret_cast<Value *>(outputValues)));
auto resEnd = thrust::reduce_by_key(GET_EXECUTION_POLICY(cudaStream),
inputHashValues,
inputHashValues + length,
zippedInputIter,
thrust::make_discard_iterator(),
zippedOutputIter,
binaryPred,
reduceFunc);
return thrust::get<1>(resEnd) - zippedOutputIter;
}
struct rolling_avg {
typedef uint64_t first_argument_type;
typedef uint64_t second_argument_type;
typedef uint64_t result_type;
__host__ __device__ uint64_t operator()(
uint64_t lhs, uint64_t rhs) const {
uint32_t lCount = lhs >> 32;
uint32_t rCount = rhs >> 32;
uint32_t totalCount = lCount + rCount;
if (totalCount == 0) {
return 0;
}
uint64_t res = 0;
*(reinterpret_cast<uint32_t *>(&res) + 1) = totalCount;
// do division first to avoid overflow.
*reinterpret_cast<float_t*>(&res) =
*reinterpret_cast<float_t*>(&lhs) / totalCount * lCount +
*reinterpret_cast<float_t*>(&rhs) / totalCount * rCount;
return res;
}
};
int bindValueAndAggFunc(uint64_t *inputHashValues,
uint32_t *inputIndexVector,
uint8_t *inputValues,
uint64_t *outputHashValues,
uint32_t *outputIndexVector,
uint8_t *outputValues,
int valueBytes,
int length,
AggregateFunction aggFunc,
cudaStream_t cudaStream) {
switch (aggFunc) {
#define REDUCE_INTERNAL(ValueType, AggFunc) \
return reduceInternal< ValueType, AggFunc >( \
inputHashValues, \
inputIndexVector, \
inputValues, \
outputHashValues, \
outputIndexVector, \
outputValues, \
length, \
cudaStream);
case AGGR_SUM_UNSIGNED:
if (valueBytes == 4) {
REDUCE_INTERNAL(uint32_t, thrust::plus<uint32_t>)
} else {
REDUCE_INTERNAL(uint64_t, thrust::plus<uint64_t>)
}
case AGGR_SUM_SIGNED:
if (valueBytes == 4) {
REDUCE_INTERNAL(int32_t, thrust::plus<int32_t>)
} else {
REDUCE_INTERNAL(int64_t, thrust::plus<int64_t>)
}
case AGGR_SUM_FLOAT:
if (valueBytes == 4) {
REDUCE_INTERNAL(float_t, thrust::plus<float_t>)
} else {
REDUCE_INTERNAL(double_t, thrust::plus<double_t>)
}
case AGGR_MIN_UNSIGNED:
REDUCE_INTERNAL(uint32_t, thrust::minimum<uint32_t>)
case AGGR_MIN_SIGNED:
REDUCE_INTERNAL(int32_t, thrust::minimum<int32_t>)
case AGGR_MIN_FLOAT:
REDUCE_INTERNAL(float_t, thrust::minimum<float_t>)
case AGGR_MAX_UNSIGNED:
REDUCE_INTERNAL(uint32_t, thrust::maximum<uint32_t>)
case AGGR_MAX_SIGNED:
REDUCE_INTERNAL(int32_t, thrust::maximum<int32_t>)
case AGGR_MAX_FLOAT:
REDUCE_INTERNAL(float_t, thrust::maximum<float_t>)
case AGGR_AVG_FLOAT:
REDUCE_INTERNAL(uint64_t, rolling_avg)
default:
throw std::invalid_argument("Unsupported aggregation function type");
}
}
int reduce(DimensionColumnVector inputKeys, uint8_t *inputValues,
DimensionColumnVector outputKeys, uint8_t *outputValues,
int valueBytes, int length, AggregateFunction aggFunc,
cudaStream_t cudaStream) {
int outputLength = bindValueAndAggFunc(
inputKeys.HashValues,
inputKeys.IndexVector,
inputValues,
outputKeys.HashValues,
outputKeys.IndexVector,
outputValues,
valueBytes,
length,
aggFunc,
cudaStream);
DimensionColumnPermutateIterator iterIn(
inputKeys.DimValues, outputKeys.IndexVector, inputKeys.VectorCapacity,
outputLength, inputKeys.NumDimsPerDimWidth);
DimensionColumnOutputIterator iterOut(outputKeys.DimValues,
inputKeys.VectorCapacity, outputLength,
inputKeys.NumDimsPerDimWidth, 0);
int numDims = 0;
for (int i = 0; i < NUM_DIM_WIDTH; i++) {
numDims += inputKeys.NumDimsPerDimWidth[i];
}
// copy dim values into output
thrust::copy(GET_EXECUTION_POLICY(cudaStream),
iterIn, iterIn + numDims * 2 * outputLength, iterOut);
return outputLength;
}
int expand(DimensionColumnVector inputKeys,
DimensionColumnVector outputKeys,
uint32_t *baseCounts,
uint32_t *indexVector,
int indexVectorLen,
int outputOccupiedLen,
cudaStream_t cudaStream) {
// create count interator from baseCount and indexVector
IndexCountIterator countIter = IndexCountIterator(baseCounts, indexVector);
// total item counts by adding counts together
uint32_t totalCount = thrust::reduce(GET_EXECUTION_POLICY(cudaStream),
countIter,
countIter+indexVectorLen);
// scan the counts to obtain output offsets for each input element
ares::device_vector<uint32_t> offsets(indexVectorLen);
thrust::exclusive_scan(GET_EXECUTION_POLICY(cudaStream),
countIter,
countIter+indexVectorLen,
offsets.begin());
// scatter the nonzero counts into their corresponding output positions
ares::device_vector<uint32_t> indices(totalCount);
thrust::scatter_if(GET_EXECUTION_POLICY(cudaStream),
thrust::counting_iterator<uint32_t>(0),
thrust::counting_iterator<uint32_t>(indexVectorLen),
offsets.begin(),
countIter,
indices.begin());
// compute max-scan over the indices, filling in the holes
thrust::inclusive_scan(GET_EXECUTION_POLICY(cudaStream),
indices.begin(),
indices.end(),
indices.begin(),
thrust::maximum<uint32_t>());
// get the raw pointer from device/host vector
uint32_t * newIndexVector = thrust::raw_pointer_cast(&indices[0]);
int outputLen = min(totalCount, outputKeys.VectorCapacity
- outputOccupiedLen);
// start the real copy operation
DimensionColumnPermutateIterator iterIn(
inputKeys.DimValues, newIndexVector, inputKeys.VectorCapacity,
outputLen, inputKeys.NumDimsPerDimWidth);
DimensionColumnOutputIterator iterOut(outputKeys.DimValues,
outputKeys.VectorCapacity, outputLen,
inputKeys.NumDimsPerDimWidth,
outputOccupiedLen);
int numDims = 0;
for (int i = 0; i < NUM_DIM_WIDTH; i++) {
numDims += inputKeys.NumDimsPerDimWidth[i];
}
// copy dim values into output
thrust::copy(GET_EXECUTION_POLICY(cudaStream), iterIn,
iterIn + numDims * 2 * outputLen, iterOut);
// return total count in the output dimensionVector
return outputLen + outputOccupiedLen;
}
} // namespace ares
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.